HBASE-9721 RegionServer should not accept regionOpen RPC intended for another(previous) server
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1577951 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
543908ecc2
commit
7be181e455
|
@ -39,7 +39,6 @@ import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Abortable;
|
import org.apache.hadoop.hbase.Abortable;
|
||||||
import org.apache.hadoop.hbase.ClusterStatus;
|
import org.apache.hadoop.hbase.ClusterStatus;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.HBaseIOException;
|
import org.apache.hadoop.hbase.HBaseIOException;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
|
@ -47,12 +46,13 @@ import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HRegionLocation;
|
import org.apache.hadoop.hbase.HRegionLocation;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||||
|
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||||
import org.apache.hadoop.hbase.RegionException;
|
import org.apache.hadoop.hbase.RegionException;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableExistsException;
|
import org.apache.hadoop.hbase.TableExistsException;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.TableNotDisabledException;
|
import org.apache.hadoop.hbase.TableNotDisabledException;
|
||||||
import org.apache.hadoop.hbase.TableNotEnabledException;
|
import org.apache.hadoop.hbase.TableNotEnabledException;
|
||||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||||
|
@ -63,12 +63,7 @@ import org.apache.hadoop.hbase.catalog.MetaReader;
|
||||||
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
|
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
|
||||||
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
|
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
|
||||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
|
|
||||||
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
|
|
||||||
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
|
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
|
||||||
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
|
|
||||||
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
|
|
||||||
import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
|
|
||||||
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
|
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
|
||||||
import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel;
|
import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel;
|
||||||
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
|
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
|
||||||
|
@ -135,7 +130,12 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequ
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
|
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
|
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
|
||||||
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
|
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
|
||||||
|
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
|
||||||
|
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
|
||||||
|
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
|
||||||
|
import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
|
||||||
import org.apache.hadoop.hbase.util.Addressing;
|
import org.apache.hadoop.hbase.util.Addressing;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
|
@ -1394,7 +1394,7 @@ public class HBaseAdmin implements Abortable, Closeable {
|
||||||
AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
|
AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
|
||||||
// Close the region without updating zk state.
|
// Close the region without updating zk state.
|
||||||
CloseRegionRequest request =
|
CloseRegionRequest request =
|
||||||
RequestConverter.buildCloseRegionRequest(encodedRegionName, false);
|
RequestConverter.buildCloseRegionRequest(sn, encodedRegionName, false);
|
||||||
try {
|
try {
|
||||||
CloseRegionResponse response = admin.closeRegion(null, request);
|
CloseRegionResponse response = admin.closeRegion(null, request);
|
||||||
boolean isRegionClosed = response.getClosed();
|
boolean isRegionClosed = response.getClosed();
|
||||||
|
@ -1418,7 +1418,7 @@ public class HBaseAdmin implements Abortable, Closeable {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
|
AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
|
||||||
// Close the region without updating zk state.
|
// Close the region without updating zk state.
|
||||||
ProtobufUtil.closeRegion(admin, hri.getRegionName(), false);
|
ProtobufUtil.closeRegion(admin, sn, hri.getRegionName(), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1758,9 +1758,9 @@ public class HBaseAdmin implements Abortable, Closeable {
|
||||||
* region as in unassign. This API can be used when a region not served by any region server and
|
* region as in unassign. This API can be used when a region not served by any region server and
|
||||||
* still online as per Master's in memory state. If this API is incorrectly used on active region
|
* still online as per Master's in memory state. If this API is incorrectly used on active region
|
||||||
* then master will loose track of that region.
|
* then master will loose track of that region.
|
||||||
*
|
*
|
||||||
* This is a special method that should be used by experts or hbck.
|
* This is a special method that should be used by experts or hbck.
|
||||||
*
|
*
|
||||||
* @param regionName
|
* @param regionName
|
||||||
* Region to offline.
|
* Region to offline.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
|
|
|
@ -642,7 +642,7 @@ public final class ProtobufUtil {
|
||||||
* @param cellScanner
|
* @param cellScanner
|
||||||
* @param proto the protocol buffer Mutate to convert
|
* @param proto the protocol buffer Mutate to convert
|
||||||
* @return the converted client Append
|
* @return the converted client Append
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static Append toAppend(final MutationProto proto, final CellScanner cellScanner)
|
public static Append toAppend(final MutationProto proto, final CellScanner cellScanner)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -1548,9 +1548,9 @@ public final class ProtobufUtil {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static void closeRegion(final AdminService.BlockingInterface admin,
|
public static void closeRegion(final AdminService.BlockingInterface admin,
|
||||||
final byte[] regionName, final boolean transitionInZK) throws IOException {
|
final ServerName server, final byte[] regionName, final boolean transitionInZK) throws IOException {
|
||||||
CloseRegionRequest closeRegionRequest =
|
CloseRegionRequest closeRegionRequest =
|
||||||
RequestConverter.buildCloseRegionRequest(regionName, transitionInZK);
|
RequestConverter.buildCloseRegionRequest(server, regionName, transitionInZK);
|
||||||
try {
|
try {
|
||||||
admin.closeRegion(null, closeRegionRequest);
|
admin.closeRegion(null, closeRegionRequest);
|
||||||
} catch (ServiceException se) {
|
} catch (ServiceException se) {
|
||||||
|
@ -1569,11 +1569,12 @@ public final class ProtobufUtil {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static boolean closeRegion(final AdminService.BlockingInterface admin,
|
public static boolean closeRegion(final AdminService.BlockingInterface admin,
|
||||||
|
final ServerName server,
|
||||||
final byte[] regionName,
|
final byte[] regionName,
|
||||||
final int versionOfClosingNode, final ServerName destinationServer,
|
final int versionOfClosingNode, final ServerName destinationServer,
|
||||||
final boolean transitionInZK) throws IOException {
|
final boolean transitionInZK) throws IOException {
|
||||||
CloseRegionRequest closeRegionRequest =
|
CloseRegionRequest closeRegionRequest =
|
||||||
RequestConverter.buildCloseRegionRequest(
|
RequestConverter.buildCloseRegionRequest(server,
|
||||||
regionName, versionOfClosingNode, destinationServer, transitionInZK);
|
regionName, versionOfClosingNode, destinationServer, transitionInZK);
|
||||||
try {
|
try {
|
||||||
CloseRegionResponse response = admin.closeRegion(null, closeRegionRequest);
|
CloseRegionResponse response = admin.closeRegion(null, closeRegionRequest);
|
||||||
|
@ -1591,9 +1592,9 @@ public final class ProtobufUtil {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static void openRegion(final AdminService.BlockingInterface admin,
|
public static void openRegion(final AdminService.BlockingInterface admin,
|
||||||
final HRegionInfo region) throws IOException {
|
ServerName server, final HRegionInfo region) throws IOException {
|
||||||
OpenRegionRequest request =
|
OpenRegionRequest request =
|
||||||
RequestConverter.buildOpenRegionRequest(region, -1, null);
|
RequestConverter.buildOpenRegionRequest(server, region, -1, null);
|
||||||
try {
|
try {
|
||||||
admin.openRegion(null, request);
|
admin.openRegion(null, request);
|
||||||
} catch (ServiceException se) {
|
} catch (ServiceException se) {
|
||||||
|
@ -2489,7 +2490,7 @@ public final class ProtobufUtil {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert a protocol buffer CellVisibility to a client CellVisibility
|
* Convert a protocol buffer CellVisibility to a client CellVisibility
|
||||||
*
|
*
|
||||||
* @param proto
|
* @param proto
|
||||||
* @return the converted client CellVisibility
|
* @return the converted client CellVisibility
|
||||||
*/
|
*/
|
||||||
|
@ -2500,7 +2501,7 @@ public final class ProtobufUtil {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert a protocol buffer CellVisibility bytes to a client CellVisibility
|
* Convert a protocol buffer CellVisibility bytes to a client CellVisibility
|
||||||
*
|
*
|
||||||
* @param protoBytes
|
* @param protoBytes
|
||||||
* @return the converted client CellVisibility
|
* @return the converted client CellVisibility
|
||||||
* @throws DeserializationException
|
* @throws DeserializationException
|
||||||
|
@ -2519,7 +2520,7 @@ public final class ProtobufUtil {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a protocol buffer CellVisibility based on a client CellVisibility.
|
* Create a protocol buffer CellVisibility based on a client CellVisibility.
|
||||||
*
|
*
|
||||||
* @param cellVisibility
|
* @param cellVisibility
|
||||||
* @return a protocol buffer CellVisibility
|
* @return a protocol buffer CellVisibility
|
||||||
*/
|
*/
|
||||||
|
@ -2531,7 +2532,7 @@ public final class ProtobufUtil {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert a protocol buffer Authorizations to a client Authorizations
|
* Convert a protocol buffer Authorizations to a client Authorizations
|
||||||
*
|
*
|
||||||
* @param proto
|
* @param proto
|
||||||
* @return the converted client Authorizations
|
* @return the converted client Authorizations
|
||||||
*/
|
*/
|
||||||
|
@ -2542,7 +2543,7 @@ public final class ProtobufUtil {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert a protocol buffer Authorizations bytes to a client Authorizations
|
* Convert a protocol buffer Authorizations bytes to a client Authorizations
|
||||||
*
|
*
|
||||||
* @param protoBytes
|
* @param protoBytes
|
||||||
* @return the converted client Authorizations
|
* @return the converted client Authorizations
|
||||||
* @throws DeserializationException
|
* @throws DeserializationException
|
||||||
|
@ -2561,7 +2562,7 @@ public final class ProtobufUtil {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a protocol buffer Authorizations based on a client Authorizations.
|
* Create a protocol buffer Authorizations based on a client Authorizations.
|
||||||
*
|
*
|
||||||
* @param authorizations
|
* @param authorizations
|
||||||
* @return a protocol buffer Authorizations
|
* @return a protocol buffer Authorizations
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -23,13 +23,13 @@ import java.util.List;
|
||||||
import com.google.protobuf.HBaseZeroCopyByteString;
|
import com.google.protobuf.HBaseZeroCopyByteString;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.CellScannable;
|
import org.apache.hadoop.hbase.CellScannable;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
|
||||||
import org.apache.hadoop.hbase.TableName;
|
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.Action;
|
import org.apache.hadoop.hbase.client.Action;
|
||||||
import org.apache.hadoop.hbase.client.Append;
|
import org.apache.hadoop.hbase.client.Append;
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
import org.apache.hadoop.hbase.client.Delete;
|
||||||
|
@ -719,15 +719,19 @@ public final class RequestConverter {
|
||||||
/**
|
/**
|
||||||
* Create a protocol buffer OpenRegionRequest for a given region
|
* Create a protocol buffer OpenRegionRequest for a given region
|
||||||
*
|
*
|
||||||
|
* @param server the serverName for the RPC
|
||||||
* @param region the region to open
|
* @param region the region to open
|
||||||
* @param versionOfOfflineNode that needs to be present in the offline node
|
* @param versionOfOfflineNode that needs to be present in the offline node
|
||||||
* @param favoredNodes
|
* @param favoredNodes
|
||||||
* @return a protocol buffer OpenRegionRequest
|
* @return a protocol buffer OpenRegionRequest
|
||||||
*/
|
*/
|
||||||
public static OpenRegionRequest buildOpenRegionRequest(
|
public static OpenRegionRequest buildOpenRegionRequest(ServerName server,
|
||||||
final HRegionInfo region, final int versionOfOfflineNode, List<ServerName> favoredNodes) {
|
final HRegionInfo region, final int versionOfOfflineNode, List<ServerName> favoredNodes) {
|
||||||
OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
|
OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
|
||||||
builder.addOpenInfo(buildRegionOpenInfo(region, versionOfOfflineNode, favoredNodes));
|
builder.addOpenInfo(buildRegionOpenInfo(region, versionOfOfflineNode, favoredNodes));
|
||||||
|
if (server != null) {
|
||||||
|
builder.setServerStartCode(server.getStartcode());
|
||||||
|
}
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -757,17 +761,20 @@ public final class RequestConverter {
|
||||||
* @param transitionInZK indicator if to transition in ZK
|
* @param transitionInZK indicator if to transition in ZK
|
||||||
* @return a CloseRegionRequest
|
* @return a CloseRegionRequest
|
||||||
*/
|
*/
|
||||||
public static CloseRegionRequest buildCloseRegionRequest(
|
public static CloseRegionRequest buildCloseRegionRequest(ServerName server,
|
||||||
final byte[] regionName, final boolean transitionInZK) {
|
final byte[] regionName, final boolean transitionInZK) {
|
||||||
CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder();
|
CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder();
|
||||||
RegionSpecifier region = buildRegionSpecifier(
|
RegionSpecifier region = buildRegionSpecifier(
|
||||||
RegionSpecifierType.REGION_NAME, regionName);
|
RegionSpecifierType.REGION_NAME, regionName);
|
||||||
builder.setRegion(region);
|
builder.setRegion(region);
|
||||||
builder.setTransitionInZK(transitionInZK);
|
builder.setTransitionInZK(transitionInZK);
|
||||||
|
if (server != null) {
|
||||||
|
builder.setServerStartCode(server.getStartcode());
|
||||||
|
}
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static CloseRegionRequest buildCloseRegionRequest(
|
public static CloseRegionRequest buildCloseRegionRequest(ServerName server,
|
||||||
final byte[] regionName, final int versionOfClosingNode,
|
final byte[] regionName, final int versionOfClosingNode,
|
||||||
ServerName destinationServer, final boolean transitionInZK) {
|
ServerName destinationServer, final boolean transitionInZK) {
|
||||||
CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder();
|
CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder();
|
||||||
|
@ -779,6 +786,9 @@ public final class RequestConverter {
|
||||||
if (destinationServer != null){
|
if (destinationServer != null){
|
||||||
builder.setDestinationServer(ProtobufUtil.toServerName( destinationServer) );
|
builder.setDestinationServer(ProtobufUtil.toServerName( destinationServer) );
|
||||||
}
|
}
|
||||||
|
if (server != null) {
|
||||||
|
builder.setServerStartCode(server.getStartcode());
|
||||||
|
}
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -790,7 +800,7 @@ public final class RequestConverter {
|
||||||
* @return a CloseRegionRequest
|
* @return a CloseRegionRequest
|
||||||
*/
|
*/
|
||||||
public static CloseRegionRequest
|
public static CloseRegionRequest
|
||||||
buildCloseRegionRequest(final String encodedRegionName,
|
buildCloseRegionRequest(ServerName server, final String encodedRegionName,
|
||||||
final boolean transitionInZK) {
|
final boolean transitionInZK) {
|
||||||
CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder();
|
CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder();
|
||||||
RegionSpecifier region = buildRegionSpecifier(
|
RegionSpecifier region = buildRegionSpecifier(
|
||||||
|
@ -798,6 +808,9 @@ public final class RequestConverter {
|
||||||
Bytes.toBytes(encodedRegionName));
|
Bytes.toBytes(encodedRegionName));
|
||||||
builder.setRegion(region);
|
builder.setRegion(region);
|
||||||
builder.setTransitionInZK(transitionInZK);
|
builder.setTransitionInZK(transitionInZK);
|
||||||
|
if (server != null) {
|
||||||
|
builder.setServerStartCode(server.getStartcode());
|
||||||
|
}
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3856,6 +3856,24 @@ public final class AdminProtos {
|
||||||
*/
|
*/
|
||||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfoOrBuilder getOpenInfoOrBuilder(
|
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfoOrBuilder getOpenInfoOrBuilder(
|
||||||
int index);
|
int index);
|
||||||
|
|
||||||
|
// optional uint64 serverStartCode = 2;
|
||||||
|
/**
|
||||||
|
* <code>optional uint64 serverStartCode = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the intended server for this RPC.
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
boolean hasServerStartCode();
|
||||||
|
/**
|
||||||
|
* <code>optional uint64 serverStartCode = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the intended server for this RPC.
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
long getServerStartCode();
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* Protobuf type {@code OpenRegionRequest}
|
* Protobuf type {@code OpenRegionRequest}
|
||||||
|
@ -3916,6 +3934,11 @@ public final class AdminProtos {
|
||||||
openInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.PARSER, extensionRegistry));
|
openInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo.PARSER, extensionRegistry));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case 16: {
|
||||||
|
bitField0_ |= 0x00000001;
|
||||||
|
serverStartCode_ = input.readUInt64();
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
|
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
|
||||||
|
@ -4989,6 +5012,7 @@ public final class AdminProtos {
|
||||||
// @@protoc_insertion_point(class_scope:OpenRegionRequest.RegionOpenInfo)
|
// @@protoc_insertion_point(class_scope:OpenRegionRequest.RegionOpenInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private int bitField0_;
|
||||||
// repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
|
// repeated .OpenRegionRequest.RegionOpenInfo open_info = 1;
|
||||||
public static final int OPEN_INFO_FIELD_NUMBER = 1;
|
public static final int OPEN_INFO_FIELD_NUMBER = 1;
|
||||||
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo> openInfo_;
|
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo> openInfo_;
|
||||||
|
@ -5025,8 +5049,33 @@ public final class AdminProtos {
|
||||||
return openInfo_.get(index);
|
return openInfo_.get(index);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// optional uint64 serverStartCode = 2;
|
||||||
|
public static final int SERVERSTARTCODE_FIELD_NUMBER = 2;
|
||||||
|
private long serverStartCode_;
|
||||||
|
/**
|
||||||
|
* <code>optional uint64 serverStartCode = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the intended server for this RPC.
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public boolean hasServerStartCode() {
|
||||||
|
return ((bitField0_ & 0x00000001) == 0x00000001);
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional uint64 serverStartCode = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the intended server for this RPC.
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public long getServerStartCode() {
|
||||||
|
return serverStartCode_;
|
||||||
|
}
|
||||||
|
|
||||||
private void initFields() {
|
private void initFields() {
|
||||||
openInfo_ = java.util.Collections.emptyList();
|
openInfo_ = java.util.Collections.emptyList();
|
||||||
|
serverStartCode_ = 0L;
|
||||||
}
|
}
|
||||||
private byte memoizedIsInitialized = -1;
|
private byte memoizedIsInitialized = -1;
|
||||||
public final boolean isInitialized() {
|
public final boolean isInitialized() {
|
||||||
|
@ -5049,6 +5098,9 @@ public final class AdminProtos {
|
||||||
for (int i = 0; i < openInfo_.size(); i++) {
|
for (int i = 0; i < openInfo_.size(); i++) {
|
||||||
output.writeMessage(1, openInfo_.get(i));
|
output.writeMessage(1, openInfo_.get(i));
|
||||||
}
|
}
|
||||||
|
if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
||||||
|
output.writeUInt64(2, serverStartCode_);
|
||||||
|
}
|
||||||
getUnknownFields().writeTo(output);
|
getUnknownFields().writeTo(output);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5062,6 +5114,10 @@ public final class AdminProtos {
|
||||||
size += com.google.protobuf.CodedOutputStream
|
size += com.google.protobuf.CodedOutputStream
|
||||||
.computeMessageSize(1, openInfo_.get(i));
|
.computeMessageSize(1, openInfo_.get(i));
|
||||||
}
|
}
|
||||||
|
if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
||||||
|
size += com.google.protobuf.CodedOutputStream
|
||||||
|
.computeUInt64Size(2, serverStartCode_);
|
||||||
|
}
|
||||||
size += getUnknownFields().getSerializedSize();
|
size += getUnknownFields().getSerializedSize();
|
||||||
memoizedSerializedSize = size;
|
memoizedSerializedSize = size;
|
||||||
return size;
|
return size;
|
||||||
|
@ -5087,6 +5143,11 @@ public final class AdminProtos {
|
||||||
boolean result = true;
|
boolean result = true;
|
||||||
result = result && getOpenInfoList()
|
result = result && getOpenInfoList()
|
||||||
.equals(other.getOpenInfoList());
|
.equals(other.getOpenInfoList());
|
||||||
|
result = result && (hasServerStartCode() == other.hasServerStartCode());
|
||||||
|
if (hasServerStartCode()) {
|
||||||
|
result = result && (getServerStartCode()
|
||||||
|
== other.getServerStartCode());
|
||||||
|
}
|
||||||
result = result &&
|
result = result &&
|
||||||
getUnknownFields().equals(other.getUnknownFields());
|
getUnknownFields().equals(other.getUnknownFields());
|
||||||
return result;
|
return result;
|
||||||
|
@ -5104,6 +5165,10 @@ public final class AdminProtos {
|
||||||
hash = (37 * hash) + OPEN_INFO_FIELD_NUMBER;
|
hash = (37 * hash) + OPEN_INFO_FIELD_NUMBER;
|
||||||
hash = (53 * hash) + getOpenInfoList().hashCode();
|
hash = (53 * hash) + getOpenInfoList().hashCode();
|
||||||
}
|
}
|
||||||
|
if (hasServerStartCode()) {
|
||||||
|
hash = (37 * hash) + SERVERSTARTCODE_FIELD_NUMBER;
|
||||||
|
hash = (53 * hash) + hashLong(getServerStartCode());
|
||||||
|
}
|
||||||
hash = (29 * hash) + getUnknownFields().hashCode();
|
hash = (29 * hash) + getUnknownFields().hashCode();
|
||||||
memoizedHashCode = hash;
|
memoizedHashCode = hash;
|
||||||
return hash;
|
return hash;
|
||||||
|
@ -5220,6 +5285,8 @@ public final class AdminProtos {
|
||||||
} else {
|
} else {
|
||||||
openInfoBuilder_.clear();
|
openInfoBuilder_.clear();
|
||||||
}
|
}
|
||||||
|
serverStartCode_ = 0L;
|
||||||
|
bitField0_ = (bitField0_ & ~0x00000002);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5247,6 +5314,7 @@ public final class AdminProtos {
|
||||||
public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest buildPartial() {
|
public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest buildPartial() {
|
||||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest(this);
|
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest(this);
|
||||||
int from_bitField0_ = bitField0_;
|
int from_bitField0_ = bitField0_;
|
||||||
|
int to_bitField0_ = 0;
|
||||||
if (openInfoBuilder_ == null) {
|
if (openInfoBuilder_ == null) {
|
||||||
if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
||||||
openInfo_ = java.util.Collections.unmodifiableList(openInfo_);
|
openInfo_ = java.util.Collections.unmodifiableList(openInfo_);
|
||||||
|
@ -5256,6 +5324,11 @@ public final class AdminProtos {
|
||||||
} else {
|
} else {
|
||||||
result.openInfo_ = openInfoBuilder_.build();
|
result.openInfo_ = openInfoBuilder_.build();
|
||||||
}
|
}
|
||||||
|
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
||||||
|
to_bitField0_ |= 0x00000001;
|
||||||
|
}
|
||||||
|
result.serverStartCode_ = serverStartCode_;
|
||||||
|
result.bitField0_ = to_bitField0_;
|
||||||
onBuilt();
|
onBuilt();
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -5297,6 +5370,9 @@ public final class AdminProtos {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (other.hasServerStartCode()) {
|
||||||
|
setServerStartCode(other.getServerStartCode());
|
||||||
|
}
|
||||||
this.mergeUnknownFields(other.getUnknownFields());
|
this.mergeUnknownFields(other.getUnknownFields());
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
@ -5570,6 +5646,55 @@ public final class AdminProtos {
|
||||||
return openInfoBuilder_;
|
return openInfoBuilder_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// optional uint64 serverStartCode = 2;
|
||||||
|
private long serverStartCode_ ;
|
||||||
|
/**
|
||||||
|
* <code>optional uint64 serverStartCode = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the intended server for this RPC.
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public boolean hasServerStartCode() {
|
||||||
|
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional uint64 serverStartCode = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the intended server for this RPC.
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public long getServerStartCode() {
|
||||||
|
return serverStartCode_;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional uint64 serverStartCode = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the intended server for this RPC.
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public Builder setServerStartCode(long value) {
|
||||||
|
bitField0_ |= 0x00000002;
|
||||||
|
serverStartCode_ = value;
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional uint64 serverStartCode = 2;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the intended server for this RPC.
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public Builder clearServerStartCode() {
|
||||||
|
bitField0_ = (bitField0_ & ~0x00000002);
|
||||||
|
serverStartCode_ = 0L;
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
// @@protoc_insertion_point(builder_scope:OpenRegionRequest)
|
// @@protoc_insertion_point(builder_scope:OpenRegionRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6243,6 +6368,24 @@ public final class AdminProtos {
|
||||||
* <code>optional .ServerName destination_server = 4;</code>
|
* <code>optional .ServerName destination_server = 4;</code>
|
||||||
*/
|
*/
|
||||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDestinationServerOrBuilder();
|
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDestinationServerOrBuilder();
|
||||||
|
|
||||||
|
// optional uint64 serverStartCode = 5;
|
||||||
|
/**
|
||||||
|
* <code>optional uint64 serverStartCode = 5;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the intended server for this RPC.
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
boolean hasServerStartCode();
|
||||||
|
/**
|
||||||
|
* <code>optional uint64 serverStartCode = 5;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the intended server for this RPC.
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
long getServerStartCode();
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* Protobuf type {@code CloseRegionRequest}
|
* Protobuf type {@code CloseRegionRequest}
|
||||||
|
@ -6337,6 +6480,11 @@ public final class AdminProtos {
|
||||||
bitField0_ |= 0x00000008;
|
bitField0_ |= 0x00000008;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case 40: {
|
||||||
|
bitField0_ |= 0x00000010;
|
||||||
|
serverStartCode_ = input.readUInt64();
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
|
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
|
||||||
|
@ -6453,11 +6601,36 @@ public final class AdminProtos {
|
||||||
return destinationServer_;
|
return destinationServer_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// optional uint64 serverStartCode = 5;
|
||||||
|
public static final int SERVERSTARTCODE_FIELD_NUMBER = 5;
|
||||||
|
private long serverStartCode_;
|
||||||
|
/**
|
||||||
|
* <code>optional uint64 serverStartCode = 5;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the intended server for this RPC.
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public boolean hasServerStartCode() {
|
||||||
|
return ((bitField0_ & 0x00000010) == 0x00000010);
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional uint64 serverStartCode = 5;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the intended server for this RPC.
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public long getServerStartCode() {
|
||||||
|
return serverStartCode_;
|
||||||
|
}
|
||||||
|
|
||||||
private void initFields() {
|
private void initFields() {
|
||||||
region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
|
region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
|
||||||
versionOfClosingNode_ = 0;
|
versionOfClosingNode_ = 0;
|
||||||
transitionInZK_ = true;
|
transitionInZK_ = true;
|
||||||
destinationServer_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
|
destinationServer_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
|
||||||
|
serverStartCode_ = 0L;
|
||||||
}
|
}
|
||||||
private byte memoizedIsInitialized = -1;
|
private byte memoizedIsInitialized = -1;
|
||||||
public final boolean isInitialized() {
|
public final boolean isInitialized() {
|
||||||
|
@ -6497,6 +6670,9 @@ public final class AdminProtos {
|
||||||
if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
||||||
output.writeMessage(4, destinationServer_);
|
output.writeMessage(4, destinationServer_);
|
||||||
}
|
}
|
||||||
|
if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
||||||
|
output.writeUInt64(5, serverStartCode_);
|
||||||
|
}
|
||||||
getUnknownFields().writeTo(output);
|
getUnknownFields().writeTo(output);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6522,6 +6698,10 @@ public final class AdminProtos {
|
||||||
size += com.google.protobuf.CodedOutputStream
|
size += com.google.protobuf.CodedOutputStream
|
||||||
.computeMessageSize(4, destinationServer_);
|
.computeMessageSize(4, destinationServer_);
|
||||||
}
|
}
|
||||||
|
if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
||||||
|
size += com.google.protobuf.CodedOutputStream
|
||||||
|
.computeUInt64Size(5, serverStartCode_);
|
||||||
|
}
|
||||||
size += getUnknownFields().getSerializedSize();
|
size += getUnknownFields().getSerializedSize();
|
||||||
memoizedSerializedSize = size;
|
memoizedSerializedSize = size;
|
||||||
return size;
|
return size;
|
||||||
|
@ -6565,6 +6745,11 @@ public final class AdminProtos {
|
||||||
result = result && getDestinationServer()
|
result = result && getDestinationServer()
|
||||||
.equals(other.getDestinationServer());
|
.equals(other.getDestinationServer());
|
||||||
}
|
}
|
||||||
|
result = result && (hasServerStartCode() == other.hasServerStartCode());
|
||||||
|
if (hasServerStartCode()) {
|
||||||
|
result = result && (getServerStartCode()
|
||||||
|
== other.getServerStartCode());
|
||||||
|
}
|
||||||
result = result &&
|
result = result &&
|
||||||
getUnknownFields().equals(other.getUnknownFields());
|
getUnknownFields().equals(other.getUnknownFields());
|
||||||
return result;
|
return result;
|
||||||
|
@ -6594,6 +6779,10 @@ public final class AdminProtos {
|
||||||
hash = (37 * hash) + DESTINATION_SERVER_FIELD_NUMBER;
|
hash = (37 * hash) + DESTINATION_SERVER_FIELD_NUMBER;
|
||||||
hash = (53 * hash) + getDestinationServer().hashCode();
|
hash = (53 * hash) + getDestinationServer().hashCode();
|
||||||
}
|
}
|
||||||
|
if (hasServerStartCode()) {
|
||||||
|
hash = (37 * hash) + SERVERSTARTCODE_FIELD_NUMBER;
|
||||||
|
hash = (53 * hash) + hashLong(getServerStartCode());
|
||||||
|
}
|
||||||
hash = (29 * hash) + getUnknownFields().hashCode();
|
hash = (29 * hash) + getUnknownFields().hashCode();
|
||||||
memoizedHashCode = hash;
|
memoizedHashCode = hash;
|
||||||
return hash;
|
return hash;
|
||||||
|
@ -6727,6 +6916,8 @@ public final class AdminProtos {
|
||||||
destinationServerBuilder_.clear();
|
destinationServerBuilder_.clear();
|
||||||
}
|
}
|
||||||
bitField0_ = (bitField0_ & ~0x00000008);
|
bitField0_ = (bitField0_ & ~0x00000008);
|
||||||
|
serverStartCode_ = 0L;
|
||||||
|
bitField0_ = (bitField0_ & ~0x00000010);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6779,6 +6970,10 @@ public final class AdminProtos {
|
||||||
} else {
|
} else {
|
||||||
result.destinationServer_ = destinationServerBuilder_.build();
|
result.destinationServer_ = destinationServerBuilder_.build();
|
||||||
}
|
}
|
||||||
|
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
|
||||||
|
to_bitField0_ |= 0x00000010;
|
||||||
|
}
|
||||||
|
result.serverStartCode_ = serverStartCode_;
|
||||||
result.bitField0_ = to_bitField0_;
|
result.bitField0_ = to_bitField0_;
|
||||||
onBuilt();
|
onBuilt();
|
||||||
return result;
|
return result;
|
||||||
|
@ -6807,6 +7002,9 @@ public final class AdminProtos {
|
||||||
if (other.hasDestinationServer()) {
|
if (other.hasDestinationServer()) {
|
||||||
mergeDestinationServer(other.getDestinationServer());
|
mergeDestinationServer(other.getDestinationServer());
|
||||||
}
|
}
|
||||||
|
if (other.hasServerStartCode()) {
|
||||||
|
setServerStartCode(other.getServerStartCode());
|
||||||
|
}
|
||||||
this.mergeUnknownFields(other.getUnknownFields());
|
this.mergeUnknownFields(other.getUnknownFields());
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
@ -7148,6 +7346,55 @@ public final class AdminProtos {
|
||||||
return destinationServerBuilder_;
|
return destinationServerBuilder_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// optional uint64 serverStartCode = 5;
|
||||||
|
private long serverStartCode_ ;
|
||||||
|
/**
|
||||||
|
* <code>optional uint64 serverStartCode = 5;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the intended server for this RPC.
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public boolean hasServerStartCode() {
|
||||||
|
return ((bitField0_ & 0x00000010) == 0x00000010);
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional uint64 serverStartCode = 5;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the intended server for this RPC.
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public long getServerStartCode() {
|
||||||
|
return serverStartCode_;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional uint64 serverStartCode = 5;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the intended server for this RPC.
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public Builder setServerStartCode(long value) {
|
||||||
|
bitField0_ |= 0x00000010;
|
||||||
|
serverStartCode_ = value;
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional uint64 serverStartCode = 5;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* the intended server for this RPC.
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public Builder clearServerStartCode() {
|
||||||
|
bitField0_ = (bitField0_ & ~0x00000010);
|
||||||
|
serverStartCode_ = 0L;
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
// @@protoc_insertion_point(builder_scope:CloseRegionRequest)
|
// @@protoc_insertion_point(builder_scope:CloseRegionRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20919,76 +21166,77 @@ public final class AdminProtos {
|
||||||
"FileResponse\022\022\n\nstore_file\030\001 \003(\t\"\030\n\026GetO" +
|
"FileResponse\022\022\n\nstore_file\030\001 \003(\t\"\030\n\026GetO" +
|
||||||
"nlineRegionRequest\";\n\027GetOnlineRegionRes" +
|
"nlineRegionRequest\";\n\027GetOnlineRegionRes" +
|
||||||
"ponse\022 \n\013region_info\030\001 \003(\0132\013.RegionInfo\"" +
|
"ponse\022 \n\013region_info\030\001 \003(\0132\013.RegionInfo\"" +
|
||||||
"\275\001\n\021OpenRegionRequest\0224\n\topen_info\030\001 \003(\013" +
|
"\326\001\n\021OpenRegionRequest\0224\n\topen_info\030\001 \003(\013" +
|
||||||
"2!.OpenRegionRequest.RegionOpenInfo\032r\n\016R" +
|
"2!.OpenRegionRequest.RegionOpenInfo\022\027\n\017s" +
|
||||||
"egionOpenInfo\022\033\n\006region\030\001 \002(\0132\013.RegionIn" +
|
"erverStartCode\030\002 \001(\004\032r\n\016RegionOpenInfo\022\033" +
|
||||||
"fo\022\037\n\027version_of_offline_node\030\002 \001(\r\022\"\n\rf" +
|
"\n\006region\030\001 \002(\0132\013.RegionInfo\022\037\n\027version_o" +
|
||||||
"avored_nodes\030\003 \003(\0132\013.ServerName\"\235\001\n\022Open" +
|
"f_offline_node\030\002 \001(\r\022\"\n\rfavored_nodes\030\003 " +
|
||||||
"RegionResponse\022=\n\ropening_state\030\001 \003(\0162&.",
|
"\003(\0132\013.ServerName\"\235\001\n\022OpenRegionResponse\022",
|
||||||
"OpenRegionResponse.RegionOpeningState\"H\n" +
|
"=\n\ropening_state\030\001 \003(\0162&.OpenRegionRespo" +
|
||||||
"\022RegionOpeningState\022\n\n\006OPENED\020\000\022\022\n\016ALREA" +
|
"nse.RegionOpeningState\"H\n\022RegionOpeningS" +
|
||||||
"DY_OPENED\020\001\022\022\n\016FAILED_OPENING\020\002\"\240\001\n\022Clos" +
|
"tate\022\n\n\006OPENED\020\000\022\022\n\016ALREADY_OPENED\020\001\022\022\n\016" +
|
||||||
"eRegionRequest\022 \n\006region\030\001 \002(\0132\020.RegionS" +
|
"FAILED_OPENING\020\002\"\271\001\n\022CloseRegionRequest\022" +
|
||||||
"pecifier\022\037\n\027version_of_closing_node\030\002 \001(" +
|
" \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\037\n\027ver" +
|
||||||
"\r\022\036\n\020transition_in_ZK\030\003 \001(\010:\004true\022\'\n\022des" +
|
"sion_of_closing_node\030\002 \001(\r\022\036\n\020transition" +
|
||||||
"tination_server\030\004 \001(\0132\013.ServerName\"%\n\023Cl" +
|
"_in_ZK\030\003 \001(\010:\004true\022\'\n\022destination_server" +
|
||||||
"oseRegionResponse\022\016\n\006closed\030\001 \002(\010\"P\n\022Flu" +
|
"\030\004 \001(\0132\013.ServerName\022\027\n\017serverStartCode\030\005" +
|
||||||
"shRegionRequest\022 \n\006region\030\001 \002(\0132\020.Region" +
|
" \001(\004\"%\n\023CloseRegionResponse\022\016\n\006closed\030\001 " +
|
||||||
"Specifier\022\030\n\020if_older_than_ts\030\002 \001(\004\"?\n\023F",
|
"\002(\010\"P\n\022FlushRegionRequest\022 \n\006region\030\001 \002(",
|
||||||
"lushRegionResponse\022\027\n\017last_flush_time\030\001 " +
|
"\0132\020.RegionSpecifier\022\030\n\020if_older_than_ts\030" +
|
||||||
"\002(\004\022\017\n\007flushed\030\002 \001(\010\"K\n\022SplitRegionReque" +
|
"\002 \001(\004\"?\n\023FlushRegionResponse\022\027\n\017last_flu" +
|
||||||
"st\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\023\n\013" +
|
"sh_time\030\001 \002(\004\022\017\n\007flushed\030\002 \001(\010\"K\n\022SplitR" +
|
||||||
"split_point\030\002 \001(\014\"\025\n\023SplitRegionResponse" +
|
"egionRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpe" +
|
||||||
"\"W\n\024CompactRegionRequest\022 \n\006region\030\001 \002(\013" +
|
"cifier\022\023\n\013split_point\030\002 \001(\014\"\025\n\023SplitRegi" +
|
||||||
"2\020.RegionSpecifier\022\r\n\005major\030\002 \001(\010\022\016\n\006fam" +
|
"onResponse\"W\n\024CompactRegionRequest\022 \n\006re" +
|
||||||
"ily\030\003 \001(\014\"\027\n\025CompactRegionResponse\"\262\001\n\031U" +
|
"gion\030\001 \002(\0132\020.RegionSpecifier\022\r\n\005major\030\002 " +
|
||||||
"pdateFavoredNodesRequest\022@\n\013update_info\030" +
|
"\001(\010\022\016\n\006family\030\003 \001(\014\"\027\n\025CompactRegionResp" +
|
||||||
"\001 \003(\0132+.UpdateFavoredNodesRequest.Region" +
|
"onse\"\262\001\n\031UpdateFavoredNodesRequest\022@\n\013up" +
|
||||||
"UpdateInfo\032S\n\020RegionUpdateInfo\022\033\n\006region",
|
"date_info\030\001 \003(\0132+.UpdateFavoredNodesRequ",
|
||||||
"\030\001 \002(\0132\013.RegionInfo\022\"\n\rfavored_nodes\030\002 \003" +
|
"est.RegionUpdateInfo\032S\n\020RegionUpdateInfo" +
|
||||||
"(\0132\013.ServerName\".\n\032UpdateFavoredNodesRes" +
|
"\022\033\n\006region\030\001 \002(\0132\013.RegionInfo\022\"\n\rfavored" +
|
||||||
"ponse\022\020\n\010response\030\001 \001(\r\"v\n\023MergeRegionsR" +
|
"_nodes\030\002 \003(\0132\013.ServerName\".\n\032UpdateFavor" +
|
||||||
"equest\022\"\n\010region_a\030\001 \002(\0132\020.RegionSpecifi" +
|
"edNodesResponse\022\020\n\010response\030\001 \001(\r\"v\n\023Mer" +
|
||||||
"er\022\"\n\010region_b\030\002 \002(\0132\020.RegionSpecifier\022\027" +
|
"geRegionsRequest\022\"\n\010region_a\030\001 \002(\0132\020.Reg" +
|
||||||
"\n\010forcible\030\003 \001(\010:\005false\"\026\n\024MergeRegionsR" +
|
"ionSpecifier\022\"\n\010region_b\030\002 \002(\0132\020.RegionS" +
|
||||||
"esponse\"X\n\010WALEntry\022\024\n\003key\030\001 \002(\0132\007.WALKe" +
|
"pecifier\022\027\n\010forcible\030\003 \001(\010:\005false\"\026\n\024Mer" +
|
||||||
"y\022\027\n\017key_value_bytes\030\002 \003(\014\022\035\n\025associated" +
|
"geRegionsResponse\"X\n\010WALEntry\022\024\n\003key\030\001 \002" +
|
||||||
"_cell_count\030\003 \001(\005\"4\n\030ReplicateWALEntryRe" +
|
"(\0132\007.WALKey\022\027\n\017key_value_bytes\030\002 \003(\014\022\035\n\025" +
|
||||||
"quest\022\030\n\005entry\030\001 \003(\0132\t.WALEntry\"\033\n\031Repli",
|
"associated_cell_count\030\003 \001(\005\"4\n\030Replicate",
|
||||||
"cateWALEntryResponse\"\026\n\024RollWALWriterReq" +
|
"WALEntryRequest\022\030\n\005entry\030\001 \003(\0132\t.WALEntr" +
|
||||||
"uest\"0\n\025RollWALWriterResponse\022\027\n\017region_" +
|
"y\"\033\n\031ReplicateWALEntryResponse\"\026\n\024RollWA" +
|
||||||
"to_flush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006r" +
|
"LWriterRequest\"0\n\025RollWALWriterResponse\022" +
|
||||||
"eason\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024Get" +
|
"\027\n\017region_to_flush\030\001 \003(\014\"#\n\021StopServerRe" +
|
||||||
"ServerInfoRequest\"B\n\nServerInfo\022 \n\013serve" +
|
"quest\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopServerRespo" +
|
||||||
"r_name\030\001 \002(\0132\013.ServerName\022\022\n\nwebui_port\030" +
|
"nse\"\026\n\024GetServerInfoRequest\"B\n\nServerInf" +
|
||||||
"\002 \001(\r\"9\n\025GetServerInfoResponse\022 \n\013server" +
|
"o\022 \n\013server_name\030\001 \002(\0132\013.ServerName\022\022\n\nw" +
|
||||||
"_info\030\001 \002(\0132\013.ServerInfo2\306\007\n\014AdminServic" +
|
"ebui_port\030\002 \001(\r\"9\n\025GetServerInfoResponse" +
|
||||||
"e\022>\n\rGetRegionInfo\022\025.GetRegionInfoReques" +
|
"\022 \n\013server_info\030\001 \002(\0132\013.ServerInfo2\306\007\n\014A" +
|
||||||
"t\032\026.GetRegionInfoResponse\022;\n\014GetStoreFil",
|
"dminService\022>\n\rGetRegionInfo\022\025.GetRegion",
|
||||||
"e\022\024.GetStoreFileRequest\032\025.GetStoreFileRe" +
|
"InfoRequest\032\026.GetRegionInfoResponse\022;\n\014G" +
|
||||||
"sponse\022D\n\017GetOnlineRegion\022\027.GetOnlineReg" +
|
"etStoreFile\022\024.GetStoreFileRequest\032\025.GetS" +
|
||||||
"ionRequest\032\030.GetOnlineRegionResponse\0225\n\n" +
|
"toreFileResponse\022D\n\017GetOnlineRegion\022\027.Ge" +
|
||||||
"OpenRegion\022\022.OpenRegionRequest\032\023.OpenReg" +
|
"tOnlineRegionRequest\032\030.GetOnlineRegionRe" +
|
||||||
"ionResponse\0228\n\013CloseRegion\022\023.CloseRegion" +
|
"sponse\0225\n\nOpenRegion\022\022.OpenRegionRequest" +
|
||||||
"Request\032\024.CloseRegionResponse\0228\n\013FlushRe" +
|
"\032\023.OpenRegionResponse\0228\n\013CloseRegion\022\023.C" +
|
||||||
"gion\022\023.FlushRegionRequest\032\024.FlushRegionR" +
|
"loseRegionRequest\032\024.CloseRegionResponse\022" +
|
||||||
"esponse\0228\n\013SplitRegion\022\023.SplitRegionRequ" +
|
"8\n\013FlushRegion\022\023.FlushRegionRequest\032\024.Fl" +
|
||||||
"est\032\024.SplitRegionResponse\022>\n\rCompactRegi" +
|
"ushRegionResponse\0228\n\013SplitRegion\022\023.Split" +
|
||||||
"on\022\025.CompactRegionRequest\032\026.CompactRegio",
|
"RegionRequest\032\024.SplitRegionResponse\022>\n\rC",
|
||||||
"nResponse\022;\n\014MergeRegions\022\024.MergeRegions" +
|
"ompactRegion\022\025.CompactRegionRequest\032\026.Co" +
|
||||||
"Request\032\025.MergeRegionsResponse\022J\n\021Replic" +
|
"mpactRegionResponse\022;\n\014MergeRegions\022\024.Me" +
|
||||||
"ateWALEntry\022\031.ReplicateWALEntryRequest\032\032" +
|
"rgeRegionsRequest\032\025.MergeRegionsResponse" +
|
||||||
".ReplicateWALEntryResponse\022?\n\006Replay\022\031.R" +
|
"\022J\n\021ReplicateWALEntry\022\031.ReplicateWALEntr" +
|
||||||
"eplicateWALEntryRequest\032\032.ReplicateWALEn" +
|
"yRequest\032\032.ReplicateWALEntryResponse\022?\n\006" +
|
||||||
"tryResponse\022>\n\rRollWALWriter\022\025.RollWALWr" +
|
"Replay\022\031.ReplicateWALEntryRequest\032\032.Repl" +
|
||||||
"iterRequest\032\026.RollWALWriterResponse\022>\n\rG" +
|
"icateWALEntryResponse\022>\n\rRollWALWriter\022\025" +
|
||||||
"etServerInfo\022\025.GetServerInfoRequest\032\026.Ge" +
|
".RollWALWriterRequest\032\026.RollWALWriterRes" +
|
||||||
"tServerInfoResponse\0225\n\nStopServer\022\022.Stop" +
|
"ponse\022>\n\rGetServerInfo\022\025.GetServerInfoRe" +
|
||||||
"ServerRequest\032\023.StopServerResponse\022M\n\022Up",
|
"quest\032\026.GetServerInfoResponse\0225\n\nStopSer",
|
||||||
"dateFavoredNodes\022\032.UpdateFavoredNodesReq" +
|
"ver\022\022.StopServerRequest\032\023.StopServerResp" +
|
||||||
"uest\032\033.UpdateFavoredNodesResponseBA\n*org" +
|
"onse\022M\n\022UpdateFavoredNodes\022\032.UpdateFavor" +
|
||||||
".apache.hadoop.hbase.protobuf.generatedB" +
|
"edNodesRequest\032\033.UpdateFavoredNodesRespo" +
|
||||||
"\013AdminProtosH\001\210\001\001\240\001\001"
|
"nseBA\n*org.apache.hadoop.hbase.protobuf." +
|
||||||
|
"generatedB\013AdminProtosH\001\210\001\001\240\001\001"
|
||||||
};
|
};
|
||||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||||
|
@ -21036,7 +21284,7 @@ public final class AdminProtos {
|
||||||
internal_static_OpenRegionRequest_fieldAccessorTable = new
|
internal_static_OpenRegionRequest_fieldAccessorTable = new
|
||||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||||
internal_static_OpenRegionRequest_descriptor,
|
internal_static_OpenRegionRequest_descriptor,
|
||||||
new java.lang.String[] { "OpenInfo", });
|
new java.lang.String[] { "OpenInfo", "ServerStartCode", });
|
||||||
internal_static_OpenRegionRequest_RegionOpenInfo_descriptor =
|
internal_static_OpenRegionRequest_RegionOpenInfo_descriptor =
|
||||||
internal_static_OpenRegionRequest_descriptor.getNestedTypes().get(0);
|
internal_static_OpenRegionRequest_descriptor.getNestedTypes().get(0);
|
||||||
internal_static_OpenRegionRequest_RegionOpenInfo_fieldAccessorTable = new
|
internal_static_OpenRegionRequest_RegionOpenInfo_fieldAccessorTable = new
|
||||||
|
@ -21054,7 +21302,7 @@ public final class AdminProtos {
|
||||||
internal_static_CloseRegionRequest_fieldAccessorTable = new
|
internal_static_CloseRegionRequest_fieldAccessorTable = new
|
||||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||||
internal_static_CloseRegionRequest_descriptor,
|
internal_static_CloseRegionRequest_descriptor,
|
||||||
new java.lang.String[] { "Region", "VersionOfClosingNode", "TransitionInZK", "DestinationServer", });
|
new java.lang.String[] { "Region", "VersionOfClosingNode", "TransitionInZK", "DestinationServer", "ServerStartCode", });
|
||||||
internal_static_CloseRegionResponse_descriptor =
|
internal_static_CloseRegionResponse_descriptor =
|
||||||
getDescriptor().getMessageTypes().get(9);
|
getDescriptor().getMessageTypes().get(9);
|
||||||
internal_static_CloseRegionResponse_fieldAccessorTable = new
|
internal_static_CloseRegionResponse_fieldAccessorTable = new
|
||||||
|
|
|
@ -68,6 +68,8 @@ message GetOnlineRegionResponse {
|
||||||
|
|
||||||
message OpenRegionRequest {
|
message OpenRegionRequest {
|
||||||
repeated RegionOpenInfo open_info = 1;
|
repeated RegionOpenInfo open_info = 1;
|
||||||
|
// the intended server for this RPC.
|
||||||
|
optional uint64 serverStartCode = 2;
|
||||||
|
|
||||||
message RegionOpenInfo {
|
message RegionOpenInfo {
|
||||||
required RegionInfo region = 1;
|
required RegionInfo region = 1;
|
||||||
|
@ -95,6 +97,8 @@ message CloseRegionRequest {
|
||||||
optional uint32 version_of_closing_node = 2;
|
optional uint32 version_of_closing_node = 2;
|
||||||
optional bool transition_in_ZK = 3 [default = true];
|
optional bool transition_in_ZK = 3 [default = true];
|
||||||
optional ServerName destination_server = 4;
|
optional ServerName destination_server = 4;
|
||||||
|
// the intended server for this RPC.
|
||||||
|
optional uint64 serverStartCode = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message CloseRegionResponse {
|
message CloseRegionResponse {
|
||||||
|
|
|
@ -628,7 +628,7 @@ public class ServerManager {
|
||||||
return RegionOpeningState.FAILED_OPENING;
|
return RegionOpeningState.FAILED_OPENING;
|
||||||
}
|
}
|
||||||
OpenRegionRequest request =
|
OpenRegionRequest request =
|
||||||
RequestConverter.buildOpenRegionRequest(region, versionOfOfflineNode, favoredNodes);
|
RequestConverter.buildOpenRegionRequest(server, region, versionOfOfflineNode, favoredNodes);
|
||||||
try {
|
try {
|
||||||
OpenRegionResponse response = admin.openRegion(null, request);
|
OpenRegionResponse response = admin.openRegion(null, request);
|
||||||
return ResponseConverter.getRegionOpeningState(response);
|
return ResponseConverter.getRegionOpeningState(response);
|
||||||
|
@ -690,7 +690,7 @@ public class ServerManager {
|
||||||
region.getRegionNameAsString() +
|
region.getRegionNameAsString() +
|
||||||
" failed because no RPC connection found to this server");
|
" failed because no RPC connection found to this server");
|
||||||
}
|
}
|
||||||
return ProtobufUtil.closeRegion(admin, region.getRegionName(),
|
return ProtobufUtil.closeRegion(admin, server, region.getRegionName(),
|
||||||
versionOfClosingNode, dest, transitionInZK);
|
versionOfClosingNode, dest, transitionInZK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -881,7 +881,7 @@ public class ServerManager {
|
||||||
Map<ServerName, Boolean> getRequeuedDeadServers() {
|
Map<ServerName, Boolean> getRequeuedDeadServers() {
|
||||||
return Collections.unmodifiableMap(this.requeuedDeadServers);
|
return Collections.unmodifiableMap(this.requeuedDeadServers);
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean isServerOnline(ServerName serverName) {
|
public boolean isServerOnline(ServerName serverName) {
|
||||||
return serverName != null && onlineServers.containsKey(serverName);
|
return serverName != null && onlineServers.containsKey(serverName);
|
||||||
}
|
}
|
||||||
|
@ -969,7 +969,7 @@ public class ServerManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* To clear any dead server with same host name and port of any online server
|
* To clear any dead server with same host name and port of any online server
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -3606,6 +3606,15 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
|
||||||
throw new ServiceException(ie);
|
throw new ServiceException(ie);
|
||||||
}
|
}
|
||||||
requestCount.increment();
|
requestCount.increment();
|
||||||
|
if (request.hasServerStartCode() && this.serverNameFromMasterPOV != null) {
|
||||||
|
// check that we are the same server that this RPC is intended for.
|
||||||
|
long serverStartCode = request.getServerStartCode();
|
||||||
|
if (this.serverNameFromMasterPOV.getStartcode() != serverStartCode) {
|
||||||
|
throw new ServiceException(new DoNotRetryIOException("This RPC was intended for a " +
|
||||||
|
"different server with startCode: " + serverStartCode + ", this server is: "
|
||||||
|
+ this.serverNameFromMasterPOV));
|
||||||
|
}
|
||||||
|
}
|
||||||
OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder();
|
OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder();
|
||||||
final int regionCount = request.getOpenInfoCount();
|
final int regionCount = request.getOpenInfoCount();
|
||||||
final Map<TableName, HTableDescriptor> htds =
|
final Map<TableName, HTableDescriptor> htds =
|
||||||
|
@ -3763,6 +3772,15 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
|
||||||
|
|
||||||
try {
|
try {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
|
if (request.hasServerStartCode() && this.serverNameFromMasterPOV != null) {
|
||||||
|
// check that we are the same server that this RPC is intended for.
|
||||||
|
long serverStartCode = request.getServerStartCode();
|
||||||
|
if (this.serverNameFromMasterPOV.getStartcode() != serverStartCode) {
|
||||||
|
throw new ServiceException(new DoNotRetryIOException("This RPC was intended for a " +
|
||||||
|
"different server with startCode: " + serverStartCode + ", this server is: "
|
||||||
|
+ this.serverNameFromMasterPOV));
|
||||||
|
}
|
||||||
|
}
|
||||||
final String encodedRegionName = ProtobufUtil.getRegionEncodedName(request.getRegion());
|
final String encodedRegionName = ProtobufUtil.getRegionEncodedName(request.getRegion());
|
||||||
|
|
||||||
// Can be null if we're calling close on a region that's not online
|
// Can be null if we're calling close on a region that's not online
|
||||||
|
|
|
@ -149,7 +149,7 @@ public class HBaseFsckRepair {
|
||||||
HConnection connection = admin.getConnection();
|
HConnection connection = admin.getConnection();
|
||||||
AdminService.BlockingInterface rs = connection.getAdmin(server);
|
AdminService.BlockingInterface rs = connection.getAdmin(server);
|
||||||
try {
|
try {
|
||||||
ProtobufUtil.closeRegion(rs, region.getRegionName(), false);
|
ProtobufUtil.closeRegion(rs, server, region.getRegionName(), false);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("Exception when closing region: " + region.getRegionNameAsString(), e);
|
LOG.warn("Exception when closing region: " + region.getRegionNameAsString(), e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -472,7 +472,7 @@ public class TestScannersFromClientSide {
|
||||||
byte[] regionName = hri.getRegionName();
|
byte[] regionName = hri.getRegionName();
|
||||||
int i = cluster.getServerWith(regionName);
|
int i = cluster.getServerWith(regionName);
|
||||||
HRegionServer rs = cluster.getRegionServer(i);
|
HRegionServer rs = cluster.getRegionServer(i);
|
||||||
ProtobufUtil.closeRegion(rs, regionName, false);
|
ProtobufUtil.closeRegion(rs, rs.getServerName(), regionName, false);
|
||||||
long startTime = EnvironmentEdgeManager.currentTimeMillis();
|
long startTime = EnvironmentEdgeManager.currentTimeMillis();
|
||||||
long timeOut = 300000;
|
long timeOut = 300000;
|
||||||
while (true) {
|
while (true) {
|
||||||
|
@ -492,7 +492,7 @@ public class TestScannersFromClientSide {
|
||||||
states.regionOffline(hri);
|
states.regionOffline(hri);
|
||||||
states.updateRegionState(hri, State.OPENING);
|
states.updateRegionState(hri, State.OPENING);
|
||||||
ZKAssign.createNodeOffline(zkw, hri, loc.getServerName());
|
ZKAssign.createNodeOffline(zkw, hri, loc.getServerName());
|
||||||
ProtobufUtil.openRegion(rs, hri);
|
ProtobufUtil.openRegion(rs, rs.getServerName(), hri);
|
||||||
startTime = EnvironmentEdgeManager.currentTimeMillis();
|
startTime = EnvironmentEdgeManager.currentTimeMillis();
|
||||||
while (true) {
|
while (true) {
|
||||||
if (rs.getOnlineRegion(regionName) != null) {
|
if (rs.getOnlineRegion(regionName) != null) {
|
||||||
|
|
|
@ -32,6 +32,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.CellScannable;
|
import org.apache.hadoop.hbase.CellScannable;
|
||||||
import org.apache.hadoop.hbase.CellUtil;
|
import org.apache.hadoop.hbase.CellUtil;
|
||||||
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
@ -56,8 +57,8 @@ import org.apache.hadoop.hbase.executor.ExecutorType;
|
||||||
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
|
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
|
||||||
import org.apache.hadoop.hbase.master.RegionState.State;
|
import org.apache.hadoop.hbase.master.RegionState.State;
|
||||||
import org.apache.hadoop.hbase.master.TableLockManager.NullTableLockManager;
|
import org.apache.hadoop.hbase.master.TableLockManager.NullTableLockManager;
|
||||||
import org.apache.hadoop.hbase.master.balancer.SimpleLoadBalancer;
|
|
||||||
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
|
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
|
||||||
|
import org.apache.hadoop.hbase.master.balancer.SimpleLoadBalancer;
|
||||||
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
|
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
|
||||||
import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
|
import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
|
@ -611,6 +612,7 @@ public class TestAssignmentManager {
|
||||||
Mockito.when(implementation.scan(
|
Mockito.when(implementation.scan(
|
||||||
(RpcController)Mockito.any(), (ScanRequest)Mockito.any())).
|
(RpcController)Mockito.any(), (ScanRequest)Mockito.any())).
|
||||||
thenAnswer(new Answer<ScanResponse>() {
|
thenAnswer(new Answer<ScanResponse>() {
|
||||||
|
@Override
|
||||||
public ScanResponse answer(InvocationOnMock invocation) throws Throwable {
|
public ScanResponse answer(InvocationOnMock invocation) throws Throwable {
|
||||||
PayloadCarryingRpcController controller = (PayloadCarryingRpcController) invocation
|
PayloadCarryingRpcController controller = (PayloadCarryingRpcController) invocation
|
||||||
.getArguments()[0];
|
.getArguments()[0];
|
||||||
|
@ -716,6 +718,7 @@ public class TestAssignmentManager {
|
||||||
this.server, this.serverManager);
|
this.server, this.serverManager);
|
||||||
Watcher zkw = new ZooKeeperWatcher(HBaseConfiguration.create(), "unittest",
|
Watcher zkw = new ZooKeeperWatcher(HBaseConfiguration.create(), "unittest",
|
||||||
null) {
|
null) {
|
||||||
|
@Override
|
||||||
public RecoverableZooKeeper getRecoverableZooKeeper() {
|
public RecoverableZooKeeper getRecoverableZooKeeper() {
|
||||||
return recoverableZk;
|
return recoverableZk;
|
||||||
}
|
}
|
||||||
|
@ -1111,6 +1114,7 @@ public class TestAssignmentManager {
|
||||||
final List<CellScannable> rows = new ArrayList<CellScannable>(1);
|
final List<CellScannable> rows = new ArrayList<CellScannable>(1);
|
||||||
rows.add(r);
|
rows.add(r);
|
||||||
Answer<ScanResponse> ans = new Answer<ClientProtos.ScanResponse>() {
|
Answer<ScanResponse> ans = new Answer<ClientProtos.ScanResponse>() {
|
||||||
|
@Override
|
||||||
public ScanResponse answer(InvocationOnMock invocation) throws Throwable {
|
public ScanResponse answer(InvocationOnMock invocation) throws Throwable {
|
||||||
PayloadCarryingRpcController controller = (PayloadCarryingRpcController) invocation
|
PayloadCarryingRpcController controller = (PayloadCarryingRpcController) invocation
|
||||||
.getArguments()[0];
|
.getArguments()[0];
|
||||||
|
@ -1240,6 +1244,7 @@ public class TestAssignmentManager {
|
||||||
// Thats ok because we make a new zk watcher for each test.
|
// Thats ok because we make a new zk watcher for each test.
|
||||||
watcher.registerListenerFirst(am);
|
watcher.registerListenerFirst(am);
|
||||||
Thread t = new Thread("RunAmJoinCluster") {
|
Thread t = new Thread("RunAmJoinCluster") {
|
||||||
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
// Call the joinCluster function as though we were doing a master
|
// Call the joinCluster function as though we were doing a master
|
||||||
// failover at this point. It will stall just before we go to add
|
// failover at this point. It will stall just before we go to add
|
||||||
|
@ -1362,4 +1367,35 @@ public class TestAssignmentManager {
|
||||||
assertFalse("The region should not in transition",
|
assertFalse("The region should not in transition",
|
||||||
regionStates.isRegionInTransition(hri));
|
regionStates.isRegionInTransition(hri));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests an on-the-fly RPC that was scheduled for the earlier RS on the same port
|
||||||
|
* for openRegion. AM should assign this somewhere else. (HBASE-9721)
|
||||||
|
*/
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
@Test
|
||||||
|
public void testOpenCloseRegionRPCIntendedForPreviousServer() throws Exception {
|
||||||
|
Mockito.when(this.serverManager.sendRegionOpen(Mockito.eq(SERVERNAME_B), Mockito.eq(REGIONINFO),
|
||||||
|
Mockito.anyInt(), (List<ServerName>)Mockito.any()))
|
||||||
|
.thenThrow(new DoNotRetryIOException());
|
||||||
|
this.server.getConfiguration().setInt("hbase.assignment.maximum.attempts", 100);
|
||||||
|
|
||||||
|
HRegionInfo hri = REGIONINFO;
|
||||||
|
CatalogTracker ct = Mockito.mock(CatalogTracker.class);
|
||||||
|
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
|
||||||
|
server.getConfiguration());
|
||||||
|
// Create an AM.
|
||||||
|
AssignmentManager am = new AssignmentManager(this.server,
|
||||||
|
this.serverManager, ct, balancer, null, null, master.getTableLockManager());
|
||||||
|
RegionStates regionStates = am.getRegionStates();
|
||||||
|
try {
|
||||||
|
am.regionPlans.put(REGIONINFO.getEncodedName(),
|
||||||
|
new RegionPlan(REGIONINFO, null, SERVERNAME_B));
|
||||||
|
|
||||||
|
// Should fail once, but succeed on the second attempt for the SERVERNAME_A
|
||||||
|
am.assign(hri, true, false);
|
||||||
|
} finally {
|
||||||
|
assertEquals(SERVERNAME_A, regionStates.getRegionState(REGIONINFO).getServerName());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.ServerLoad;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.UnknownRegionException;
|
import org.apache.hadoop.hbase.UnknownRegionException;
|
||||||
|
import org.apache.hadoop.hbase.Waiter;
|
||||||
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
|
@ -137,9 +138,13 @@ public class TestAssignmentManagerOnCluster {
|
||||||
/**
|
/**
|
||||||
* This tests region assignment on a simulated restarted server
|
* This tests region assignment on a simulated restarted server
|
||||||
*/
|
*/
|
||||||
@Test (timeout=60000)
|
@Test (timeout=120000)
|
||||||
public void testAssignRegionOnRestartedServer() throws Exception {
|
public void testAssignRegionOnRestartedServer() throws Exception {
|
||||||
String table = "testAssignRegionOnRestartedServer";
|
String table = "testAssignRegionOnRestartedServer";
|
||||||
|
TEST_UTIL.getMiniHBaseCluster().getConf().setInt("hbase.assignment.maximum.attempts", 40);
|
||||||
|
TEST_UTIL.getMiniHBaseCluster().stopMaster(0);
|
||||||
|
TEST_UTIL.getMiniHBaseCluster().startMaster(); //restart the master so that conf take into affect
|
||||||
|
|
||||||
ServerName deadServer = null;
|
ServerName deadServer = null;
|
||||||
HMaster master = null;
|
HMaster master = null;
|
||||||
try {
|
try {
|
||||||
|
@ -148,7 +153,7 @@ public class TestAssignmentManagerOnCluster {
|
||||||
admin.createTable(desc);
|
admin.createTable(desc);
|
||||||
|
|
||||||
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
|
HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
|
||||||
HRegionInfo hri = new HRegionInfo(
|
final HRegionInfo hri = new HRegionInfo(
|
||||||
desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
|
desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
|
||||||
MetaEditor.addRegionToMeta(meta, hri);
|
MetaEditor.addRegionToMeta(meta, hri);
|
||||||
|
|
||||||
|
@ -164,7 +169,7 @@ public class TestAssignmentManagerOnCluster {
|
||||||
destServer.getPort(), destServer.getStartcode() - 100L);
|
destServer.getPort(), destServer.getStartcode() - 100L);
|
||||||
master.serverManager.recordNewServerWithLock(deadServer, ServerLoad.EMPTY_SERVERLOAD);
|
master.serverManager.recordNewServerWithLock(deadServer, ServerLoad.EMPTY_SERVERLOAD);
|
||||||
|
|
||||||
AssignmentManager am = master.getAssignmentManager();
|
final AssignmentManager am = master.getAssignmentManager();
|
||||||
RegionPlan plan = new RegionPlan(hri, null, deadServer);
|
RegionPlan plan = new RegionPlan(hri, null, deadServer);
|
||||||
am.addPlan(hri.getEncodedName(), plan);
|
am.addPlan(hri.getEncodedName(), plan);
|
||||||
master.assignRegion(hri);
|
master.assignRegion(hri);
|
||||||
|
@ -174,15 +179,14 @@ public class TestAssignmentManagerOnCluster {
|
||||||
EventType.RS_ZK_REGION_OPENING, 0);
|
EventType.RS_ZK_REGION_OPENING, 0);
|
||||||
assertEquals("TansitionNode should fail", -1, version);
|
assertEquals("TansitionNode should fail", -1, version);
|
||||||
|
|
||||||
// Give region 2 seconds to assign, which may not be enough.
|
TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
|
||||||
// However, if HBASE-8545 is broken, this test will be flaky.
|
@Override
|
||||||
// Otherwise, this test should never be flaky.
|
public boolean evaluate() throws Exception {
|
||||||
Thread.sleep(2000);
|
return ! am.getRegionStates().isRegionInTransition(hri);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
assertTrue("Region should still be in transition",
|
assertFalse("Region should be assigned", am.getRegionStates().isRegionInTransition(hri));
|
||||||
am.getRegionStates().isRegionInTransition(hri));
|
|
||||||
assertEquals("Assign node should still be in version 0", 0,
|
|
||||||
ZKAssign.getVersion(master.getZooKeeper(), hri));
|
|
||||||
} finally {
|
} finally {
|
||||||
if (deadServer != null) {
|
if (deadServer != null) {
|
||||||
master.serverManager.expireServer(deadServer);
|
master.serverManager.expireServer(deadServer);
|
||||||
|
|
|
@ -337,7 +337,7 @@ public class TestMasterFailover {
|
||||||
region = enabledRegions.remove(0);
|
region = enabledRegions.remove(0);
|
||||||
regionsThatShouldBeOnline.add(region);
|
regionsThatShouldBeOnline.add(region);
|
||||||
ZKAssign.createNodeOffline(zkw, region, serverName);
|
ZKAssign.createNodeOffline(zkw, region, serverName);
|
||||||
ProtobufUtil.openRegion(hrs, region);
|
ProtobufUtil.openRegion(hrs, hrs.getServerName(), region);
|
||||||
while (true) {
|
while (true) {
|
||||||
byte [] bytes = ZKAssign.getData(zkw, region.getEncodedName());
|
byte [] bytes = ZKAssign.getData(zkw, region.getEncodedName());
|
||||||
RegionTransition rt = RegionTransition.parseFrom(bytes);
|
RegionTransition rt = RegionTransition.parseFrom(bytes);
|
||||||
|
@ -352,7 +352,7 @@ public class TestMasterFailover {
|
||||||
region = disabledRegions.remove(0);
|
region = disabledRegions.remove(0);
|
||||||
regionsThatShouldBeOffline.add(region);
|
regionsThatShouldBeOffline.add(region);
|
||||||
ZKAssign.createNodeOffline(zkw, region, serverName);
|
ZKAssign.createNodeOffline(zkw, region, serverName);
|
||||||
ProtobufUtil.openRegion(hrs, region);
|
ProtobufUtil.openRegion(hrs, hrs.getServerName(), region);
|
||||||
while (true) {
|
while (true) {
|
||||||
byte [] bytes = ZKAssign.getData(zkw, region.getEncodedName());
|
byte [] bytes = ZKAssign.getData(zkw, region.getEncodedName());
|
||||||
RegionTransition rt = RegionTransition.parseFrom(bytes);
|
RegionTransition rt = RegionTransition.parseFrom(bytes);
|
||||||
|
@ -736,7 +736,7 @@ public class TestMasterFailover {
|
||||||
region = enabledRegions.remove(0);
|
region = enabledRegions.remove(0);
|
||||||
regionsThatShouldBeOnline.add(region);
|
regionsThatShouldBeOnline.add(region);
|
||||||
ZKAssign.createNodeOffline(zkw, region, deadServerName);
|
ZKAssign.createNodeOffline(zkw, region, deadServerName);
|
||||||
ProtobufUtil.openRegion(hrsDead, region);
|
ProtobufUtil.openRegion(hrsDead, hrsDead.getServerName(), region);
|
||||||
while (true) {
|
while (true) {
|
||||||
byte [] bytes = ZKAssign.getData(zkw, region.getEncodedName());
|
byte [] bytes = ZKAssign.getData(zkw, region.getEncodedName());
|
||||||
RegionTransition rt = RegionTransition.parseFrom(bytes);
|
RegionTransition rt = RegionTransition.parseFrom(bytes);
|
||||||
|
@ -752,7 +752,7 @@ public class TestMasterFailover {
|
||||||
region = disabledRegions.remove(0);
|
region = disabledRegions.remove(0);
|
||||||
regionsThatShouldBeOffline.add(region);
|
regionsThatShouldBeOffline.add(region);
|
||||||
ZKAssign.createNodeOffline(zkw, region, deadServerName);
|
ZKAssign.createNodeOffline(zkw, region, deadServerName);
|
||||||
ProtobufUtil.openRegion(hrsDead, region);
|
ProtobufUtil.openRegion(hrsDead, hrsDead.getServerName(), region);
|
||||||
while (true) {
|
while (true) {
|
||||||
byte [] bytes = ZKAssign.getData(zkw, region.getEncodedName());
|
byte [] bytes = ZKAssign.getData(zkw, region.getEncodedName());
|
||||||
RegionTransition rt = RegionTransition.parseFrom(bytes);
|
RegionTransition rt = RegionTransition.parseFrom(bytes);
|
||||||
|
@ -772,7 +772,7 @@ public class TestMasterFailover {
|
||||||
region = enabledRegions.remove(0);
|
region = enabledRegions.remove(0);
|
||||||
regionsThatShouldBeOnline.add(region);
|
regionsThatShouldBeOnline.add(region);
|
||||||
ZKAssign.createNodeOffline(zkw, region, deadServerName);
|
ZKAssign.createNodeOffline(zkw, region, deadServerName);
|
||||||
ProtobufUtil.openRegion(hrsDead, region);
|
ProtobufUtil.openRegion(hrsDead, hrsDead.getServerName(), region);
|
||||||
while (true) {
|
while (true) {
|
||||||
byte [] bytes = ZKAssign.getData(zkw, region.getEncodedName());
|
byte [] bytes = ZKAssign.getData(zkw, region.getEncodedName());
|
||||||
RegionTransition rt = RegionTransition.parseFrom(bytes);
|
RegionTransition rt = RegionTransition.parseFrom(bytes);
|
||||||
|
@ -790,7 +790,7 @@ public class TestMasterFailover {
|
||||||
region = disabledRegions.remove(0);
|
region = disabledRegions.remove(0);
|
||||||
regionsThatShouldBeOffline.add(region);
|
regionsThatShouldBeOffline.add(region);
|
||||||
ZKAssign.createNodeOffline(zkw, region, deadServerName);
|
ZKAssign.createNodeOffline(zkw, region, deadServerName);
|
||||||
ProtobufUtil.openRegion(hrsDead, region);
|
ProtobufUtil.openRegion(hrsDead, hrsDead.getServerName(), region);
|
||||||
while (true) {
|
while (true) {
|
||||||
byte [] bytes = ZKAssign.getData(zkw, region.getEncodedName());
|
byte [] bytes = ZKAssign.getData(zkw, region.getEncodedName());
|
||||||
RegionTransition rt = RegionTransition.parseFrom(bytes);
|
RegionTransition rt = RegionTransition.parseFrom(bytes);
|
||||||
|
@ -824,12 +824,12 @@ public class TestMasterFailover {
|
||||||
log("Waiting for master to be ready");
|
log("Waiting for master to be ready");
|
||||||
assertTrue(cluster.waitForActiveAndReadyMaster());
|
assertTrue(cluster.waitForActiveAndReadyMaster());
|
||||||
log("Master is ready");
|
log("Master is ready");
|
||||||
|
|
||||||
// Wait until SSH processing completed for dead server.
|
// Wait until SSH processing completed for dead server.
|
||||||
while (master.getServerManager().areDeadServersInProgress()) {
|
while (master.getServerManager().areDeadServersInProgress()) {
|
||||||
Thread.sleep(10);
|
Thread.sleep(10);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Failover should be completed, now wait for no RIT
|
// Failover should be completed, now wait for no RIT
|
||||||
log("Waiting for no more RIT");
|
log("Waiting for no more RIT");
|
||||||
ZKAssign.blockUntilNoRIT(zkw);
|
ZKAssign.blockUntilNoRIT(zkw);
|
||||||
|
|
|
@ -30,19 +30,19 @@ import java.util.Collection;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.MediumTests;
|
import org.apache.hadoop.hbase.MediumTests;
|
||||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||||
import org.apache.hadoop.hbase.TableDescriptors;
|
import org.apache.hadoop.hbase.TableDescriptors;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.Durability;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.client.Durability;
|
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
@ -230,7 +230,7 @@ public class TestZKBasedOpenCloseRegion {
|
||||||
Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
|
Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
|
||||||
Mockito.doThrow(new IOException()).when(htd).get((TableName) Mockito.any());
|
Mockito.doThrow(new IOException()).when(htd).get((TableName) Mockito.any());
|
||||||
try {
|
try {
|
||||||
ProtobufUtil.openRegion(regionServer, REGIONINFO);
|
ProtobufUtil.openRegion(regionServer, regionServer.getServerName(), REGIONINFO);
|
||||||
fail("It should throw IOException ");
|
fail("It should throw IOException ");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,17 +19,21 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.regionserver;
|
package org.apache.hadoop.hbase.regionserver;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.MediumTests;
|
import org.apache.hadoop.hbase.MediumTests;
|
||||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||||
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.executor.EventType;
|
import org.apache.hadoop.hbase.executor.EventType;
|
||||||
import org.apache.hadoop.hbase.protobuf.RequestConverter;
|
import org.apache.hadoop.hbase.protobuf.RequestConverter;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
|
||||||
|
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
|
||||||
import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
|
import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
|
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
|
||||||
|
@ -104,7 +108,7 @@ public class TestRegionServerNoMaster {
|
||||||
// We reopen. We need a ZK node here, as a open is always triggered by a master.
|
// We reopen. We need a ZK node here, as a open is always triggered by a master.
|
||||||
ZKAssign.createNodeOffline(HTU.getZooKeeperWatcher(), hri, getRS().getServerName());
|
ZKAssign.createNodeOffline(HTU.getZooKeeperWatcher(), hri, getRS().getServerName());
|
||||||
// first version is '0'
|
// first version is '0'
|
||||||
AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(hri, 0, null);
|
AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(getRS().getServerName(), hri, 0, null);
|
||||||
AdminProtos.OpenRegionResponse responseOpen = getRS().openRegion(null, orr);
|
AdminProtos.OpenRegionResponse responseOpen = getRS().openRegion(null, orr);
|
||||||
Assert.assertTrue(responseOpen.getOpeningStateCount() == 1);
|
Assert.assertTrue(responseOpen.getOpeningStateCount() == 1);
|
||||||
Assert.assertTrue(responseOpen.getOpeningState(0).
|
Assert.assertTrue(responseOpen.getOpeningState(0).
|
||||||
|
@ -150,7 +154,7 @@ public class TestRegionServerNoMaster {
|
||||||
private void closeNoZK() throws Exception {
|
private void closeNoZK() throws Exception {
|
||||||
// no transition in ZK
|
// no transition in ZK
|
||||||
AdminProtos.CloseRegionRequest crr =
|
AdminProtos.CloseRegionRequest crr =
|
||||||
RequestConverter.buildCloseRegionRequest(regionName, false);
|
RequestConverter.buildCloseRegionRequest(getRS().getServerName(), regionName, false);
|
||||||
AdminProtos.CloseRegionResponse responseClose = getRS().closeRegion(null, crr);
|
AdminProtos.CloseRegionResponse responseClose = getRS().closeRegion(null, crr);
|
||||||
Assert.assertTrue(responseClose.getClosed());
|
Assert.assertTrue(responseClose.getClosed());
|
||||||
|
|
||||||
|
@ -170,7 +174,7 @@ public class TestRegionServerNoMaster {
|
||||||
|
|
||||||
// Transition in ZK on. This should fail, as there is no znode
|
// Transition in ZK on. This should fail, as there is no znode
|
||||||
AdminProtos.CloseRegionRequest crr = RequestConverter.buildCloseRegionRequest(
|
AdminProtos.CloseRegionRequest crr = RequestConverter.buildCloseRegionRequest(
|
||||||
regionName, true);
|
getRS().getServerName(), regionName, true);
|
||||||
AdminProtos.CloseRegionResponse responseClose = getRS().closeRegion(null, crr);
|
AdminProtos.CloseRegionResponse responseClose = getRS().closeRegion(null, crr);
|
||||||
Assert.assertTrue(responseClose.getClosed());
|
Assert.assertTrue(responseClose.getClosed());
|
||||||
|
|
||||||
|
@ -189,7 +193,7 @@ public class TestRegionServerNoMaster {
|
||||||
ZKAssign.createNodeClosing(HTU.getZooKeeperWatcher(), hri, getRS().getServerName());
|
ZKAssign.createNodeClosing(HTU.getZooKeeperWatcher(), hri, getRS().getServerName());
|
||||||
|
|
||||||
AdminProtos.CloseRegionRequest crr = RequestConverter.buildCloseRegionRequest(
|
AdminProtos.CloseRegionRequest crr = RequestConverter.buildCloseRegionRequest(
|
||||||
regionName, true);
|
getRS().getServerName(), regionName, true);
|
||||||
AdminProtos.CloseRegionResponse responseClose = getRS().closeRegion(null, crr);
|
AdminProtos.CloseRegionResponse responseClose = getRS().closeRegion(null, crr);
|
||||||
Assert.assertTrue(responseClose.getClosed());
|
Assert.assertTrue(responseClose.getClosed());
|
||||||
|
|
||||||
|
@ -223,7 +227,7 @@ public class TestRegionServerNoMaster {
|
||||||
|
|
||||||
// We're sending multiple requests in a row. The region server must handle this nicely.
|
// We're sending multiple requests in a row. The region server must handle this nicely.
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(hri, 0, null);
|
AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(getRS().getServerName(), hri, 0, null);
|
||||||
AdminProtos.OpenRegionResponse responseOpen = getRS().openRegion(null, orr);
|
AdminProtos.OpenRegionResponse responseOpen = getRS().openRegion(null, orr);
|
||||||
Assert.assertTrue(responseOpen.getOpeningStateCount() == 1);
|
Assert.assertTrue(responseOpen.getOpeningStateCount() == 1);
|
||||||
|
|
||||||
|
@ -244,7 +248,7 @@ public class TestRegionServerNoMaster {
|
||||||
try {
|
try {
|
||||||
// fake region to be closing now, need to clear state afterwards
|
// fake region to be closing now, need to clear state afterwards
|
||||||
getRS().regionsInTransitionInRS.put(hri.getEncodedNameAsBytes(), Boolean.FALSE);
|
getRS().regionsInTransitionInRS.put(hri.getEncodedNameAsBytes(), Boolean.FALSE);
|
||||||
AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(hri, 0, null);
|
AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(getRS().getServerName(), hri, 0, null);
|
||||||
getRS().openRegion(null, orr);
|
getRS().openRegion(null, orr);
|
||||||
Assert.fail("The closing region should not be opened");
|
Assert.fail("The closing region should not be opened");
|
||||||
} catch (ServiceException se) {
|
} catch (ServiceException se) {
|
||||||
|
@ -262,7 +266,7 @@ public class TestRegionServerNoMaster {
|
||||||
ZKAssign.createNodeClosing(HTU.getZooKeeperWatcher(), hri, getRS().getServerName());
|
ZKAssign.createNodeClosing(HTU.getZooKeeperWatcher(), hri, getRS().getServerName());
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
AdminProtos.CloseRegionRequest crr =
|
AdminProtos.CloseRegionRequest crr =
|
||||||
RequestConverter.buildCloseRegionRequest(regionName, 0, null, true);
|
RequestConverter.buildCloseRegionRequest(getRS().getServerName(), regionName, 0, null, true);
|
||||||
try {
|
try {
|
||||||
AdminProtos.CloseRegionResponse responseClose = getRS().closeRegion(null, crr);
|
AdminProtos.CloseRegionResponse responseClose = getRS().closeRegion(null, crr);
|
||||||
Assert.assertEquals("The first request should succeeds", 0, i);
|
Assert.assertEquals("The first request should succeeds", 0, i);
|
||||||
|
@ -298,7 +302,7 @@ public class TestRegionServerNoMaster {
|
||||||
|
|
||||||
// That's a close without ZK.
|
// That's a close without ZK.
|
||||||
AdminProtos.CloseRegionRequest crr =
|
AdminProtos.CloseRegionRequest crr =
|
||||||
RequestConverter.buildCloseRegionRequest(regionName, false);
|
RequestConverter.buildCloseRegionRequest(getRS().getServerName(), regionName, false);
|
||||||
try {
|
try {
|
||||||
getRS().closeRegion(null, crr);
|
getRS().closeRegion(null, crr);
|
||||||
Assert.assertTrue(false);
|
Assert.assertTrue(false);
|
||||||
|
@ -341,7 +345,7 @@ public class TestRegionServerNoMaster {
|
||||||
// That's a close without ZK.
|
// That's a close without ZK.
|
||||||
ZKAssign.createNodeClosing(HTU.getZooKeeperWatcher(), hri, getRS().getServerName());
|
ZKAssign.createNodeClosing(HTU.getZooKeeperWatcher(), hri, getRS().getServerName());
|
||||||
AdminProtos.CloseRegionRequest crr =
|
AdminProtos.CloseRegionRequest crr =
|
||||||
RequestConverter.buildCloseRegionRequest(regionName, false);
|
RequestConverter.buildCloseRegionRequest(getRS().getServerName(), regionName, false);
|
||||||
try {
|
try {
|
||||||
getRS().closeRegion(null, crr);
|
getRS().closeRegion(null, crr);
|
||||||
Assert.assertTrue(false);
|
Assert.assertTrue(false);
|
||||||
|
@ -375,4 +379,38 @@ public class TestRegionServerNoMaster {
|
||||||
|
|
||||||
reopenRegion();
|
reopenRegion();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests an on-the-fly RPC that was scheduled for the earlier RS on the same port
|
||||||
|
* for openRegion. The region server should reject this RPC. (HBASE-9721)
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testOpenCloseRegionRPCIntendedForPreviousServer() throws Exception {
|
||||||
|
Assert.assertTrue(getRS().getRegion(regionName).isAvailable());
|
||||||
|
|
||||||
|
ServerName sn = getRS().getServerName();
|
||||||
|
ServerName earlierServerName = ServerName.valueOf(sn.getHostname(), sn.getPort(), 1);
|
||||||
|
|
||||||
|
try {
|
||||||
|
CloseRegionRequest request = RequestConverter.buildCloseRegionRequest(earlierServerName, regionName, true);
|
||||||
|
getRS().closeRegion(null, request);
|
||||||
|
Assert.fail("The closeRegion should have been rejected");
|
||||||
|
} catch (ServiceException se) {
|
||||||
|
Assert.assertTrue(se.getCause() instanceof IOException);
|
||||||
|
Assert.assertTrue(se.getCause().getMessage().contains("This RPC was intended for a different server"));
|
||||||
|
}
|
||||||
|
|
||||||
|
//actual close
|
||||||
|
closeNoZK();
|
||||||
|
try {
|
||||||
|
AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(earlierServerName, hri, 0, null);
|
||||||
|
getRS().openRegion(null, orr);
|
||||||
|
Assert.fail("The openRegion should have been rejected");
|
||||||
|
} catch (ServiceException se) {
|
||||||
|
Assert.assertTrue(se.getCause() instanceof IOException);
|
||||||
|
Assert.assertTrue(se.getCause().getMessage().contains("This RPC was intended for a different server"));
|
||||||
|
} finally {
|
||||||
|
reopenRegion();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2137,7 +2137,7 @@ public class TestHBaseFsck {
|
||||||
HConnection connection = HConnectionManager.getConnection(conf);
|
HConnection connection = HConnectionManager.getConnection(conf);
|
||||||
HRegionLocation metaLocation = connection.locateRegion(TableName.META_TABLE_NAME,
|
HRegionLocation metaLocation = connection.locateRegion(TableName.META_TABLE_NAME,
|
||||||
HConstants.EMPTY_START_ROW);
|
HConstants.EMPTY_START_ROW);
|
||||||
ServerName hsa = ServerName.valueOf(metaLocation.getHostnamePort(), 0L);
|
ServerName hsa = metaLocation.getServerName();
|
||||||
HRegionInfo hri = metaLocation.getRegionInfo();
|
HRegionInfo hri = metaLocation.getRegionInfo();
|
||||||
if (unassign) {
|
if (unassign) {
|
||||||
LOG.info("Undeploying meta region " + hri + " from server " + hsa);
|
LOG.info("Undeploying meta region " + hri + " from server " + hsa);
|
||||||
|
|
Loading…
Reference in New Issue