HBASE-6659 Port HBASE-6508 Filter out edits at log split time

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1381684 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Zhihong Yu 2012-09-06 17:22:01 +00:00
parent 82d9956a7a
commit c310653a19
16 changed files with 1369 additions and 70 deletions

View File

@ -153,4 +153,12 @@ public class RegionLoad {
public long getCurrentCompactedKVs() {
return regionLoadPB.getCurrentCompactedKVs();
}
/**
* This does not really belong inside RegionLoad but its being done in the name of expediency.
* @return the completed sequence Id for the region
*/
public long getCompleteSequenceId() {
return regionLoadPB.getCompleteSequenceId();
}
}

View File

@ -30,8 +30,11 @@ import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.SortedMap;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
@ -58,6 +61,7 @@ import org.apache.hadoop.hbase.MasterMonitorProtocol;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.PleaseHoldException;
import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.RegionServerStatusProtocol;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerLoad;
@ -150,6 +154,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDe
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
@ -1149,9 +1155,17 @@ Server {
return resp;
}
@Override
public GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(RpcController controller,
GetLastFlushedSequenceIdRequest request) throws ServiceException {
byte[] regionName = request.getRegionName().toByteArray();
long seqId = serverManager.getLastFlushedSequenceId(regionName);
return ResponseConverter.buildGetLastFlushedSequenceIdResponse(seqId);
}
@Override
public RegionServerReportResponse regionServerReport(
RpcController controller,RegionServerReportRequest request) throws ServiceException {
RpcController controller, RegionServerReportRequest request) throws ServiceException {
try {
HBaseProtos.ServerLoad sl = request.getLoad();
this.serverManager.regionServerReport(ProtobufUtil.toServerName(request.getServer()), new ServerLoad(sl));
@ -1746,12 +1760,14 @@ Server {
}
@Override
public GetClusterStatusResponse getClusterStatus(RpcController controller, GetClusterStatusRequest req)
public GetClusterStatusResponse getClusterStatus(RpcController controller,
GetClusterStatusRequest req)
throws ServiceException {
GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder();
response.setClusterStatus(getClusterStatus().convert());
return response.build();
}
/**
* @return cluster status
*/
@ -1770,7 +1786,8 @@ Server {
for (String s: backupMasterStrings) {
try {
byte [] bytes =
ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(this.zooKeeper.backupMasterAddressesZNode, s));
ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(
this.zooKeeper.backupMasterAddressesZNode, s));
if (bytes != null) {
ServerName sn;
try {

View File

@ -28,7 +28,10 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -37,14 +40,10 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClockOutOfSyncException;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.PleaseHoldException;
import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.YouAreDeadException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
@ -55,8 +54,16 @@ import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler;
import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.util.Bytes;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
@ -79,6 +86,9 @@ public class ServerManager {
// Set if we are to shutdown the cluster.
private volatile boolean clusterShutdown = false;
private final SortedMap<byte[], Long> flushedSequenceIdByRegion =
new ConcurrentSkipListMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
/** Map of registered servers to their current load */
private final Map<ServerName, ServerLoad> onlineServers =
new ConcurrentHashMap<ServerName, ServerLoad>();
@ -163,6 +173,33 @@ public class ServerManager {
return sn;
}
/**
* Updates last flushed sequence Ids for the regions on server sn
* @param sn
* @param hsl
*/
private void updateLastFlushedSequenceIds(ServerName sn, ServerLoad hsl) {
Map<byte[], RegionLoad> regionsLoad = hsl.getRegionsLoad();
for (Entry<byte[], RegionLoad> entry : regionsLoad.entrySet()) {
Long existingValue = flushedSequenceIdByRegion.get(entry.getKey());
long l = entry.getValue().getCompleteSequenceId();
if (existingValue != null) {
if (l != -1 && l < existingValue) {
if (LOG.isDebugEnabled()) {
LOG.debug("RegionServer " + sn +
" indicates a last flushed sequence id (" + entry.getValue() +
") that is less than the previous last flushed sequence id (" +
existingValue + ") for region " +
Bytes.toString(entry.getKey()) + " Ignoring.");
}
continue; // Don't let smaller sequence ids override greater
// sequence ids.
}
}
flushedSequenceIdByRegion.put(entry.getKey(), l);
}
}
void regionServerReport(ServerName sn, ServerLoad sl)
throws YouAreDeadException, PleaseHoldException {
checkIsDead(sn, "REPORT");
@ -178,6 +215,7 @@ public class ServerManager {
} else {
this.onlineServers.put(sn, sl);
}
updateLastFlushedSequenceIds(sn, sl);
}
/**
@ -271,6 +309,14 @@ public class ServerManager {
this.serverConnections.remove(serverName);
}
public long getLastFlushedSequenceId(byte[] regionName) {
long seqId = -1;
if (flushedSequenceIdByRegion.containsKey(regionName)) {
seqId = flushedSequenceIdByRegion.get(regionName);
}
return seqId;
}
/**
* @param serverName
* @return ServerLoad if serverName is known else null

View File

@ -101,6 +101,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceReque
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@ -1138,4 +1139,15 @@ public final class RequestConverter {
public static IsCatalogJanitorEnabledRequest buildIsCatalogJanitorEnabledRequest() {
return IsCatalogJanitorEnabledRequest.newBuilder().build();
}
/**
* Creates a request for querying the master the last flushed sequence Id for a region
* @param regionName
* @return A {@link GetLastFlushedSequenceIdRequest}
*/
public static GetLastFlushedSequenceIdRequest buildGetLastFlushedSequenceIdRequest(
byte[] regionName) {
return GetLastFlushedSequenceIdRequest.newBuilder().setRegionName(
ByteString.copyFrom(regionName)).build();
}
}

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.util.StringUtils;
@ -238,4 +239,14 @@ public final class ResponseConverter {
}
// End utilities for Admin
/**
* Creates a response for the last flushed sequence Id request
* @return A GetLastFlushedSequenceIdResponse
*/
public static GetLastFlushedSequenceIdResponse buildGetLastFlushedSequenceIdResponse(
long seqId) {
return GetLastFlushedSequenceIdResponse.newBuilder().setLastFlushedSequenceId(seqId).build();
}
}

View File

@ -4049,6 +4049,10 @@ public final class HBaseProtos {
getCoprocessorsOrBuilderList();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder(
int index);
// optional uint64 completeSequenceId = 16;
boolean hasCompleteSequenceId();
long getCompleteSequenceId();
}
public static final class RegionLoad extends
com.google.protobuf.GeneratedMessage
@ -4243,6 +4247,16 @@ public final class HBaseProtos {
return coprocessors_.get(index);
}
// optional uint64 completeSequenceId = 16;
public static final int COMPLETESEQUENCEID_FIELD_NUMBER = 16;
private long completeSequenceId_;
public boolean hasCompleteSequenceId() {
return ((bitField0_ & 0x00004000) == 0x00004000);
}
public long getCompleteSequenceId() {
return completeSequenceId_;
}
private void initFields() {
regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
stores_ = 0;
@ -4259,6 +4273,7 @@ public final class HBaseProtos {
totalStaticIndexSizeKB_ = 0;
totalStaticBloomSizeKB_ = 0;
coprocessors_ = java.util.Collections.emptyList();
completeSequenceId_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@ -4331,6 +4346,9 @@ public final class HBaseProtos {
for (int i = 0; i < coprocessors_.size(); i++) {
output.writeMessage(15, coprocessors_.get(i));
}
if (((bitField0_ & 0x00004000) == 0x00004000)) {
output.writeUInt64(16, completeSequenceId_);
}
getUnknownFields().writeTo(output);
}
@ -4400,6 +4418,10 @@ public final class HBaseProtos {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(15, coprocessors_.get(i));
}
if (((bitField0_ & 0x00004000) == 0x00004000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(16, completeSequenceId_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@ -4495,6 +4517,11 @@ public final class HBaseProtos {
}
result = result && getCoprocessorsList()
.equals(other.getCoprocessorsList());
result = result && (hasCompleteSequenceId() == other.hasCompleteSequenceId());
if (hasCompleteSequenceId()) {
result = result && (getCompleteSequenceId()
== other.getCompleteSequenceId());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@ -4564,6 +4591,10 @@ public final class HBaseProtos {
hash = (37 * hash) + COPROCESSORS_FIELD_NUMBER;
hash = (53 * hash) + getCoprocessorsList().hashCode();
}
if (hasCompleteSequenceId()) {
hash = (37 * hash) + COMPLETESEQUENCEID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCompleteSequenceId());
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
@ -4720,6 +4751,8 @@ public final class HBaseProtos {
} else {
coprocessorsBuilder_.clear();
}
completeSequenceId_ = 0L;
bitField0_ = (bitField0_ & ~0x00008000);
return this;
}
@ -4827,6 +4860,10 @@ public final class HBaseProtos {
} else {
result.coprocessors_ = coprocessorsBuilder_.build();
}
if (((from_bitField0_ & 0x00008000) == 0x00008000)) {
to_bitField0_ |= 0x00004000;
}
result.completeSequenceId_ = completeSequenceId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@ -4911,6 +4948,9 @@ public final class HBaseProtos {
}
}
}
if (other.hasCompleteSequenceId()) {
setCompleteSequenceId(other.getCompleteSequenceId());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@ -5036,6 +5076,11 @@ public final class HBaseProtos {
addCoprocessors(subBuilder.buildPartial());
break;
}
case 128: {
bitField0_ |= 0x00008000;
completeSequenceId_ = input.readUInt64();
break;
}
}
}
}
@ -5591,6 +5636,27 @@ public final class HBaseProtos {
return coprocessorsBuilder_;
}
// optional uint64 completeSequenceId = 16;
private long completeSequenceId_ ;
public boolean hasCompleteSequenceId() {
return ((bitField0_ & 0x00008000) == 0x00008000);
}
public long getCompleteSequenceId() {
return completeSequenceId_;
}
public Builder setCompleteSequenceId(long value) {
bitField0_ |= 0x00008000;
completeSequenceId_ = value;
onChanged();
return this;
}
public Builder clearCompleteSequenceId() {
bitField0_ = (bitField0_ & ~0x00008000);
completeSequenceId_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:RegionLoad)
}
@ -11091,7 +11157,7 @@ public final class HBaseProtos {
"\0222\n\004type\030\001 \002(\0162$.RegionSpecifier.RegionS" +
"pecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpe" +
"cifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCODED_R" +
"EGION_NAME\020\002\"\270\003\n\nRegionLoad\022)\n\017regionSpe" +
"EGION_NAME\020\002\"\324\003\n\nRegionLoad\022)\n\017regionSpe" +
"cifier\030\001 \002(\0132\020.RegionSpecifier\022\016\n\006stores" +
"\030\002 \001(\r\022\022\n\nstorefiles\030\003 \001(\r\022\037\n\027storeUncom" +
"pressedSizeMB\030\004 \001(\r\022\027\n\017storefileSizeMB\030\005" +
@ -11102,31 +11168,31 @@ public final class HBaseProtos {
"dKVs\030\013 \001(\004\022\027\n\017rootIndexSizeKB\030\014 \001(\r\022\036\n\026t" +
"otalStaticIndexSizeKB\030\r \001(\r\022\036\n\026totalStat" +
"icBloomSizeKB\030\016 \001(\r\022\"\n\014coprocessors\030\017 \003(" +
"\0132\014.Coprocessor\"\342\001\n\nServerLoad\022\030\n\020number" +
"OfRequests\030\001 \001(\r\022\035\n\025totalNumberOfRequest" +
"s\030\002 \001(\r\022\022\n\nusedHeapMB\030\003 \001(\r\022\021\n\tmaxHeapMB" +
"\030\004 \001(\r\022 \n\013regionLoads\030\005 \003(\0132\013.RegionLoad" +
"\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocessor\022\027\n\017",
"reportStartTime\030\007 \001(\004\022\025\n\rreportEndTime\030\010" +
" \001(\004\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 " +
"\001(\004\"0\n\006Filter\022\014\n\004name\030\001 \002(\t\022\030\n\020serialize" +
"dFilter\030\002 \001(\014\"w\n\010KeyValue\022\013\n\003row\030\001 \002(\014\022\016" +
"\n\006family\030\002 \002(\014\022\021\n\tqualifier\030\003 \002(\014\022\021\n\ttim" +
"estamp\030\004 \001(\004\022\031\n\007keyType\030\005 \001(\0162\010.KeyType\022" +
"\r\n\005value\030\006 \001(\014\"?\n\nServerName\022\020\n\010hostName" +
"\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\021\n\tstartCode\030\003 \001(\004\"" +
"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStri" +
"ngPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rN",
"ameBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(" +
"\014\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006se" +
"cond\030\002 \002(\014*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rL" +
"ESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003" +
"\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005N" +
"O_OP\020\006*_\n\007KeyType\022\013\n\007MINIMUM\020\000\022\007\n\003PUT\020\004\022" +
"\n\n\006DELETE\020\010\022\021\n\rDELETE_COLUMN\020\014\022\021\n\rDELETE" +
"_FAMILY\020\016\022\014\n\007MAXIMUM\020\377\001B>\n*org.apache.ha" +
"doop.hbase.protobuf.generatedB\013HBaseProt" +
"osH\001\240\001\001"
"\0132\014.Coprocessor\022\032\n\022completeSequenceId\030\020 " +
"\001(\004\"\342\001\n\nServerLoad\022\030\n\020numberOfRequests\030\001" +
" \001(\r\022\035\n\025totalNumberOfRequests\030\002 \001(\r\022\022\n\nu" +
"sedHeapMB\030\003 \001(\r\022\021\n\tmaxHeapMB\030\004 \001(\r\022 \n\013re" +
"gionLoads\030\005 \003(\0132\013.RegionLoad\022\"\n\014coproces",
"sors\030\006 \003(\0132\014.Coprocessor\022\027\n\017reportStartT" +
"ime\030\007 \001(\004\022\025\n\rreportEndTime\030\010 \001(\004\"%\n\tTime" +
"Range\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"0\n\006Filte" +
"r\022\014\n\004name\030\001 \002(\t\022\030\n\020serializedFilter\030\002 \001(" +
"\014\"w\n\010KeyValue\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002" +
"(\014\022\021\n\tqualifier\030\003 \002(\014\022\021\n\ttimestamp\030\004 \001(\004" +
"\022\031\n\007keyType\030\005 \001(\0162\010.KeyType\022\r\n\005value\030\006 \001" +
"(\014\"?\n\nServerName\022\020\n\010hostName\030\001 \002(\t\022\014\n\004po" +
"rt\030\002 \001(\r\022\021\n\tstartCode\030\003 \001(\004\"\033\n\013Coprocess" +
"or\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n\004na",
"me\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesPair" +
"\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016BytesBy" +
"tesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(\014*r" +
"\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL" +
"\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_" +
"OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*_\n\007Ke" +
"yType\022\013\n\007MINIMUM\020\000\022\007\n\003PUT\020\004\022\n\n\006DELETE\020\010\022" +
"\021\n\rDELETE_COLUMN\020\014\022\021\n\rDELETE_FAMILY\020\016\022\014\n" +
"\007MAXIMUM\020\377\001B>\n*org.apache.hadoop.hbase.p" +
"rotobuf.generatedB\013HBaseProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -11186,7 +11252,7 @@ public final class HBaseProtos {
internal_static_RegionLoad_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionLoad_descriptor,
new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "Coprocessors", },
new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "Coprocessors", "CompleteSequenceId", },
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.Builder.class);
internal_static_ServerLoad_descriptor =

View File

@ -2949,6 +2949,767 @@ public final class RegionServerStatusProtos {
// @@protoc_insertion_point(class_scope:ReportRSFatalErrorResponse)
}
public interface GetLastFlushedSequenceIdRequestOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required bytes regionName = 1;
boolean hasRegionName();
com.google.protobuf.ByteString getRegionName();
}
public static final class GetLastFlushedSequenceIdRequest extends
com.google.protobuf.GeneratedMessage
implements GetLastFlushedSequenceIdRequestOrBuilder {
// Use GetLastFlushedSequenceIdRequest.newBuilder() to construct.
private GetLastFlushedSequenceIdRequest(Builder builder) {
super(builder);
}
private GetLastFlushedSequenceIdRequest(boolean noInit) {}
private static final GetLastFlushedSequenceIdRequest defaultInstance;
public static GetLastFlushedSequenceIdRequest getDefaultInstance() {
return defaultInstance;
}
public GetLastFlushedSequenceIdRequest getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdRequest_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdRequest_fieldAccessorTable;
}
private int bitField0_;
// required bytes regionName = 1;
public static final int REGIONNAME_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString regionName_;
public boolean hasRegionName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public com.google.protobuf.ByteString getRegionName() {
return regionName_;
}
private void initFields() {
regionName_ = com.google.protobuf.ByteString.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegionName()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, regionName_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, regionName_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest) obj;
boolean result = true;
result = result && (hasRegionName() == other.hasRegionName());
if (hasRegionName()) {
result = result && getRegionName()
.equals(other.getRegionName());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegionName()) {
hash = (37 * hash) + REGIONNAME_FIELD_NUMBER;
hash = (53 * hash) + getRegionName().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequestOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdRequest_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdRequest_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
regionName_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.getDescriptor();
}
public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest build() {
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.regionName_ = regionName_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.getDefaultInstance()) return this;
if (other.hasRegionName()) {
setRegionName(other.getRegionName());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegionName()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
regionName_ = input.readBytes();
break;
}
}
}
}
private int bitField0_;
// required bytes regionName = 1;
private com.google.protobuf.ByteString regionName_ = com.google.protobuf.ByteString.EMPTY;
public boolean hasRegionName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public com.google.protobuf.ByteString getRegionName() {
return regionName_;
}
public Builder setRegionName(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
regionName_ = value;
onChanged();
return this;
}
public Builder clearRegionName() {
bitField0_ = (bitField0_ & ~0x00000001);
regionName_ = getDefaultInstance().getRegionName();
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:GetLastFlushedSequenceIdRequest)
}
static {
defaultInstance = new GetLastFlushedSequenceIdRequest(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:GetLastFlushedSequenceIdRequest)
}
public interface GetLastFlushedSequenceIdResponseOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint64 lastFlushedSequenceId = 1;
boolean hasLastFlushedSequenceId();
long getLastFlushedSequenceId();
}
public static final class GetLastFlushedSequenceIdResponse extends
com.google.protobuf.GeneratedMessage
implements GetLastFlushedSequenceIdResponseOrBuilder {
// Use GetLastFlushedSequenceIdResponse.newBuilder() to construct.
private GetLastFlushedSequenceIdResponse(Builder builder) {
super(builder);
}
private GetLastFlushedSequenceIdResponse(boolean noInit) {}
private static final GetLastFlushedSequenceIdResponse defaultInstance;
public static GetLastFlushedSequenceIdResponse getDefaultInstance() {
return defaultInstance;
}
public GetLastFlushedSequenceIdResponse getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdResponse_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdResponse_fieldAccessorTable;
}
private int bitField0_;
// required uint64 lastFlushedSequenceId = 1;
public static final int LASTFLUSHEDSEQUENCEID_FIELD_NUMBER = 1;
private long lastFlushedSequenceId_;
public boolean hasLastFlushedSequenceId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public long getLastFlushedSequenceId() {
return lastFlushedSequenceId_;
}
private void initFields() {
lastFlushedSequenceId_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasLastFlushedSequenceId()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, lastFlushedSequenceId_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, lastFlushedSequenceId_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse) obj;
boolean result = true;
result = result && (hasLastFlushedSequenceId() == other.hasLastFlushedSequenceId());
if (hasLastFlushedSequenceId()) {
result = result && (getLastFlushedSequenceId()
== other.getLastFlushedSequenceId());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
@java.lang.Override
public int hashCode() {
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasLastFlushedSequenceId()) {
hash = (37 * hash) + LASTFLUSHEDSEQUENCEID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLastFlushedSequenceId());
}
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
return builder.buildParsed();
} else {
return null;
}
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponseOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdResponse_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdResponse_fieldAccessorTable;
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
lastFlushedSequenceId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDescriptor();
}
public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse build() {
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
private org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.lastFlushedSequenceId_ = lastFlushedSequenceId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance()) return this;
if (other.hasLastFlushedSequenceId()) {
setLastFlushedSequenceId(other.getLastFlushedSequenceId());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasLastFlushedSequenceId()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder(
this.getUnknownFields());
while (true) {
int tag = input.readTag();
switch (tag) {
case 0:
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
this.setUnknownFields(unknownFields.build());
onChanged();
return this;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
lastFlushedSequenceId_ = input.readUInt64();
break;
}
}
}
}
private int bitField0_;
// required uint64 lastFlushedSequenceId = 1;
private long lastFlushedSequenceId_ ;
public boolean hasLastFlushedSequenceId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
public long getLastFlushedSequenceId() {
return lastFlushedSequenceId_;
}
public Builder setLastFlushedSequenceId(long value) {
bitField0_ |= 0x00000001;
lastFlushedSequenceId_ = value;
onChanged();
return this;
}
public Builder clearLastFlushedSequenceId() {
bitField0_ = (bitField0_ & ~0x00000001);
lastFlushedSequenceId_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:GetLastFlushedSequenceIdResponse)
}
static {
defaultInstance = new GetLastFlushedSequenceIdResponse(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:GetLastFlushedSequenceIdResponse)
}
public static abstract class RegionServerStatusService
implements com.google.protobuf.Service {
protected RegionServerStatusService() {}
@ -2969,6 +3730,11 @@ public final class RegionServerStatusProtos {
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request,
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse> done);
public abstract void getLastFlushedSequenceId(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request,
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse> done);
}
public static com.google.protobuf.Service newReflectiveService(
@ -2998,6 +3764,14 @@ public final class RegionServerStatusProtos {
impl.reportRSFatalError(controller, request, done);
}
@java.lang.Override
public void getLastFlushedSequenceId(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request,
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse> done) {
impl.getLastFlushedSequenceId(controller, request, done);
}
};
}
@ -3026,6 +3800,8 @@ public final class RegionServerStatusProtos {
return impl.regionServerReport(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest)request);
case 2:
return impl.reportRSFatalError(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest)request);
case 3:
return impl.getLastFlushedSequenceId(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
@ -3046,6 +3822,8 @@ public final class RegionServerStatusProtos {
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.getDefaultInstance();
case 2:
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.getDefaultInstance();
case 3:
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@ -3066,6 +3844,8 @@ public final class RegionServerStatusProtos {
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance();
case 2:
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance();
case 3:
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@ -3089,6 +3869,11 @@ public final class RegionServerStatusProtos {
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request,
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse> done);
public abstract void getLastFlushedSequenceId(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request,
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse> done);
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
@ -3126,6 +3911,11 @@ public final class RegionServerStatusProtos {
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse>specializeCallback(
done));
return;
case 3:
this.getLastFlushedSequenceId(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest)request,
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse>specializeCallback(
done));
return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
@ -3146,6 +3936,8 @@ public final class RegionServerStatusProtos {
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.getDefaultInstance();
case 2:
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.getDefaultInstance();
case 3:
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@ -3166,6 +3958,8 @@ public final class RegionServerStatusProtos {
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance();
case 2:
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance();
case 3:
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@ -3231,6 +4025,21 @@ public final class RegionServerStatusProtos {
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.class,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance()));
}
public void getLastFlushedSequenceId(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request,
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse> done) {
channel.callMethod(
getDescriptor().getMethods().get(3),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.class,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance()));
}
}
public static BlockingInterface newBlockingStub(
@ -3253,6 +4062,11 @@ public final class RegionServerStatusProtos {
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request)
throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
@ -3297,6 +4111,18 @@ public final class RegionServerStatusProtos {
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance());
}
public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse) channel.callBlockingMethod(
getDescriptor().getMethods().get(3),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance());
}
}
}
@ -3330,6 +4156,16 @@ public final class RegionServerStatusProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_ReportRSFatalErrorResponse_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_GetLastFlushedSequenceIdRequest_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_GetLastFlushedSequenceIdRequest_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_GetLastFlushedSequenceIdResponse_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_GetLastFlushedSequenceIdResponse_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@ -3349,16 +4185,21 @@ public final class RegionServerStatusProtos {
"rverLoad\"\034\n\032RegionServerReportResponse\"N" +
"\n\031ReportRSFatalErrorRequest\022\033\n\006server\030\001 " +
"\002(\0132\013.ServerName\022\024\n\014errorMessage\030\002 \002(\t\"\034",
"\n\032ReportRSFatalErrorResponse2\213\002\n\031RegionS" +
"erverStatusService\022P\n\023regionServerStartu" +
"p\022\033.RegionServerStartupRequest\032\034.RegionS" +
"erverStartupResponse\022M\n\022regionServerRepo" +
"rt\022\032.RegionServerReportRequest\032\033.RegionS" +
"erverReportResponse\022M\n\022reportRSFatalErro" +
"r\022\032.ReportRSFatalErrorRequest\032\033.ReportRS" +
"FatalErrorResponseBN\n*org.apache.hadoop." +
"hbase.protobuf.generatedB\030RegionServerSt" +
"atusProtosH\001\210\001\001\240\001\001"
"\n\032ReportRSFatalErrorResponse\"5\n\037GetLastF" +
"lushedSequenceIdRequest\022\022\n\nregionName\030\001 " +
"\002(\014\"A\n GetLastFlushedSequenceIdResponse\022" +
"\035\n\025lastFlushedSequenceId\030\001 \002(\0042\354\002\n\031Regio" +
"nServerStatusService\022P\n\023regionServerStar" +
"tup\022\033.RegionServerStartupRequest\032\034.Regio" +
"nServerStartupResponse\022M\n\022regionServerRe" +
"port\022\032.RegionServerReportRequest\032\033.Regio" +
"nServerReportResponse\022M\n\022reportRSFatalEr" +
"ror\022\032.ReportRSFatalErrorRequest\032\033.Report",
"RSFatalErrorResponse\022_\n\030getLastFlushedSe" +
"quenceId\022 .GetLastFlushedSequenceIdReque" +
"st\032!.GetLastFlushedSequenceIdResponseBN\n" +
"*org.apache.hadoop.hbase.protobuf.genera" +
"tedB\030RegionServerStatusProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -3413,6 +4254,22 @@ public final class RegionServerStatusProtos {
new java.lang.String[] { },
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.class,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.Builder.class);
internal_static_GetLastFlushedSequenceIdRequest_descriptor =
getDescriptor().getMessageTypes().get(6);
internal_static_GetLastFlushedSequenceIdRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_GetLastFlushedSequenceIdRequest_descriptor,
new java.lang.String[] { "RegionName", },
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.class,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.Builder.class);
internal_static_GetLastFlushedSequenceIdResponse_descriptor =
getDescriptor().getMessageTypes().get(7);
internal_static_GetLastFlushedSequenceIdResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_GetLastFlushedSequenceIdResponse_descriptor,
new java.lang.String[] { "LastFlushedSequenceId", },
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.class,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.Builder.class);
return null;
}
};

View File

@ -79,6 +79,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NotServingRegionException;
@ -190,6 +191,8 @@ public class HRegion implements HeapSize { // , Writable{
*/
final AtomicBoolean closing = new AtomicBoolean(false);
protected long completeSequenceId = -1L;
//////////////////////////////////////////////////////////////////////////////
// Members
//////////////////////////////////////////////////////////////////////////////
@ -1478,7 +1481,6 @@ public class HRegion implements HeapSize { // , Writable{
// again so its value will represent the size of the updates received
// during the flush
long sequenceId = -1L;
long completeSequenceId = -1L;
MultiVersionConsistencyControl.WriteEntry w = null;
// We have to take a write lock during snapshot, or else a write could
@ -1489,6 +1491,7 @@ public class HRegion implements HeapSize { // , Writable{
long flushsize = this.memstoreSize.get();
status.setStatus("Preparing to flush by snapshotting stores");
List<StoreFlusher> storeFlushers = new ArrayList<StoreFlusher>(stores.size());
long completeSeqId = -1L;
try {
// Record the mvcc for all transactions in progress.
w = mvcc.beginMemstoreInsert();
@ -1496,10 +1499,9 @@ public class HRegion implements HeapSize { // , Writable{
sequenceId = (wal == null)? myseqid:
wal.startCacheFlush(this.regionInfo.getEncodedNameAsBytes());
completeSequenceId = this.getCompleteCacheFlushSequenceId(sequenceId);
completeSeqId = this.getCompleteCacheFlushSequenceId(sequenceId);
for (Store s : stores.values()) {
storeFlushers.add(s.getStoreFlusher(completeSequenceId));
storeFlushers.add(s.getStoreFlusher(completeSeqId));
}
// prepare flush (take a snapshot)
@ -1577,10 +1579,15 @@ public class HRegion implements HeapSize { // , Writable{
// log-sequence-ids can be safely ignored.
if (wal != null) {
wal.completeCacheFlush(this.regionInfo.getEncodedNameAsBytes(),
regionInfo.getTableName(), completeSequenceId,
regionInfo.getTableName(), completeSeqId,
this.getRegionInfo().isMetaRegion());
}
// Update the last flushed sequence id for region
if (this.rsServices != null) {
completeSequenceId = completeSeqId;
}
// C. Finally notify anyone waiting on memstore to clear:
// e.g. checkResources().
synchronized (this) {
@ -5010,7 +5017,7 @@ public class HRegion implements HeapSize { // , Writable{
ClassSize.OBJECT +
ClassSize.ARRAY +
36 * ClassSize.REFERENCE + Bytes.SIZEOF_INT +
(6 * Bytes.SIZEOF_LONG) +
(7 * Bytes.SIZEOF_LONG) +
Bytes.SIZEOF_BOOLEAN);
public static final long DEEP_OVERHEAD = FIXED_OVERHEAD +

View File

@ -49,6 +49,7 @@ import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.management.ObjectName;
@ -114,6 +115,7 @@ import org.apache.hadoop.hbase.ipc.ProtocolSignature;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
@ -214,6 +216,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
@ -232,7 +235,7 @@ import com.google.protobuf.RpcController;
@InterfaceAudience.Private
@SuppressWarnings("deprecation")
public class HRegionServer implements ClientProtocol,
AdminProtocol, Runnable, RegionServerServices, HBaseRPCErrorHandler {
AdminProtocol, Runnable, RegionServerServices, HBaseRPCErrorHandler, LastSequenceId {
public static final Log LOG = LogFactory.getLog(HRegionServer.class);
@ -1248,7 +1251,8 @@ public class HRegionServer implements ClientProtocol,
.setReadRequestsCount((int) r.readRequestsCount.get())
.setWriteRequestsCount((int) r.writeRequestsCount.get())
.setTotalCompactingKVs(totalCompactingKVs)
.setCurrentCompactedKVs(currentCompactedKVs);
.setCurrentCompactedKVs(currentCompactedKVs)
.setCompleteSequenceId(r.completeSequenceId);
Set<String> coprocessors = r.getCoprocessorHost().getCoprocessors();
for (String coprocessor : coprocessors) {
regionLoad.addCoprocessors(
@ -1622,7 +1626,7 @@ public class HRegionServer implements ClientProtocol,
// Create the log splitting worker and start it
this.splitLogWorker = new SplitLogWorker(this.zooKeeper,
this.getConfiguration(), this.getServerName());
this.getConfiguration(), this.getServerName(), this);
splitLogWorker.start();
}
@ -1969,6 +1973,22 @@ public class HRegionServer implements ClientProtocol,
return result;
}
@Override
public long getLastSequenceId(byte[] region) {
Long lastFlushedSequenceId = -1l;
try {
GetLastFlushedSequenceIdRequest req =
RequestConverter.buildGetLastFlushedSequenceIdRequest(region);
lastFlushedSequenceId = hbaseMaster.getLastFlushedSequenceId(null, req)
.getLastFlushedSequenceId();
} catch (ServiceException e) {
lastFlushedSequenceId = -1l;
LOG.warn("Unable to connect to the master to check " +
"the last flushed sequence id", e);
}
return lastFlushedSequenceId;
}
/**
* Closes all regions. Called on our way out.
* Assumes that its not possible for new regions to be added to onlineRegions

View File

@ -0,0 +1,33 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Last flushed sequence Ids for the regions on region server
*/
@InterfaceAudience.Private
public interface LastSequenceId {
/**
* @param regionname
* @return Last flushed sequence Id for regionname
*/
public long getLastSequenceId(byte[] regionname);
}

View File

@ -30,9 +30,11 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.DeserializationException;
import org.apache.hadoop.hbase.RegionServerStatusProtocol;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.SplitLogCounters;
import org.apache.hadoop.hbase.SplitLogTask;
import org.apache.hadoop.hbase.master.SplitLogManager;
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.FSUtils;
@ -90,7 +92,7 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable {
}
public SplitLogWorker(ZooKeeperWatcher watcher, final Configuration conf,
final ServerName serverName) {
final ServerName serverName, final LastSequenceId sequenceIdChecker) {
this(watcher, conf, serverName, new TaskExecutor () {
@Override
public Status exec(String filename, CancelableProgressable p) {
@ -108,7 +110,7 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable {
// encountered a bad non-retry-able persistent error.
try {
if (HLogSplitter.splitLogFile(rootdir,
fs.getFileStatus(new Path(filename)), fs, conf, p) == false) {
fs.getFileStatus(new Path(filename)), fs, conf, p, sequenceIdChecker) == false) {
return Status.PREEMPTED;
}
} catch (InterruptedIOException iioe) {

View File

@ -56,7 +56,9 @@ import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.master.SplitLogManager;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.LastSequenceId;
import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry;
import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
import org.apache.hadoop.hbase.regionserver.wal.HLog.Writer;
@ -120,6 +122,8 @@ public class HLogSplitter {
// Used in distributed log splitting
private DistributedLogSplittingHelper distributedLogSplittingHelper = null;
// For checking the latest flushed sequence id
protected final LastSequenceId sequenceIdChecker;
/**
* Create a new HLogSplitter using the given {@link Configuration} and the
@ -147,8 +151,9 @@ public class HLogSplitter {
Path.class, // rootDir
Path.class, // srcDir
Path.class, // oldLogDir
FileSystem.class); // fs
return constructor.newInstance(conf, rootDir, srcDir, oldLogDir, fs);
FileSystem.class, // fs
LastSequenceId.class);
return constructor.newInstance(conf, rootDir, srcDir, oldLogDir, fs, null);
} catch (IllegalArgumentException e) {
throw new RuntimeException(e);
} catch (InstantiationException e) {
@ -165,12 +170,13 @@ public class HLogSplitter {
}
public HLogSplitter(Configuration conf, Path rootDir, Path srcDir,
Path oldLogDir, FileSystem fs) {
Path oldLogDir, FileSystem fs, LastSequenceId idChecker) {
this.conf = conf;
this.rootDir = rootDir;
this.srcDir = srcDir;
this.oldLogDir = oldLogDir;
this.fs = fs;
this.sequenceIdChecker = idChecker;
entryBuffers = new EntryBuffers(
conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
@ -344,6 +350,29 @@ public class HLogSplitter {
return ret;
}
/**
* Splits a HLog file into region's recovered-edits directory
* <p>
* If the log file has N regions then N recovered.edits files will be
* produced.
* <p>
* @param rootDir
* @param logfile
* @param fs
* @param conf
* @param reporter
* @param idChecker
* @return false if it is interrupted by the progress-able.
* @throws IOException
*/
static public boolean splitLogFile(Path rootDir, FileStatus logfile,
FileSystem fs, Configuration conf, CancelableProgressable reporter,
LastSequenceId idChecker)
throws IOException {
HLogSplitter s = new HLogSplitter(conf, rootDir, null, null /* oldLogDir */, fs, idChecker);
return s.splitLogFile(logfile, reporter);
}
/**
* Splits a HLog file into region's recovered-edits directory
* <p>
@ -361,8 +390,7 @@ public class HLogSplitter {
static public boolean splitLogFile(Path rootDir, FileStatus logfile,
FileSystem fs, Configuration conf, CancelableProgressable reporter)
throws IOException {
HLogSplitter s = new HLogSplitter(conf, rootDir, null, null /* oldLogDir */, fs);
return s.splitLogFile(logfile, reporter);
return HLogSplitter.splitLogFile(rootDir, logfile, fs, conf, reporter, null);
}
public boolean splitLogFile(FileStatus logfile,
@ -402,17 +430,34 @@ public class HLogSplitter {
outputSink.startWriterThreads();
// Report progress every so many edits and/or files opened (opening a file
// takes a bit of time).
int editsCount = 0;
Map<byte[], Long> lastFlushedSequenceIds =
new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
Entry entry;
int editsCount = 0;
int editsSkipped = 0;
try {
while ((entry = getNextLogLine(in,logPath, skipErrors)) != null) {
byte[] region = entry.getKey().getEncodedRegionName();
Long lastFlushedSequenceId = -1l;
if (sequenceIdChecker != null) {
lastFlushedSequenceId = lastFlushedSequenceIds.get(region);
if (lastFlushedSequenceId == null) {
lastFlushedSequenceId = sequenceIdChecker.getLastSequenceId(region);
lastFlushedSequenceIds.put(region, lastFlushedSequenceId);
}
}
if (lastFlushedSequenceId >= entry.getKey().getLogSeqNum()) {
editsSkipped++;
continue;
}
entryBuffers.appendEntry(entry);
editsCount++;
// If sufficient edits have passed, check if we should report progress.
if (editsCount % interval == 0
|| (outputSink.logWriters.size() - numOpenedFilesLastCheck) > numOpenedFilesBeforeReporting) {
numOpenedFilesLastCheck = outputSink.logWriters.size();
String countsStr = "edits=" + editsCount;
String countsStr = (editsCount - editsSkipped) +
" edits, skipped " + editsSkipped + " edits.";
status.setStatus("Split " + countsStr);
if (!reportProgressIfIsDistributedLogSplitting()) {
return false;

View File

@ -67,6 +67,16 @@ message ReportRSFatalErrorRequest {
message ReportRSFatalErrorResponse {
}
message GetLastFlushedSequenceIdRequest {
/** region name */
required bytes regionName = 1;
}
message GetLastFlushedSequenceIdResponse {
/** the last HLog sequence id flushed from MemStore to HFile for the region */
required uint64 lastFlushedSequenceId = 1;
}
service RegionServerStatusService {
/** Called when a region server first starts. */
rpc regionServerStartup(RegionServerStartupRequest)
@ -82,4 +92,10 @@ service RegionServerStatusService {
*/
rpc reportRSFatalError(ReportRSFatalErrorRequest)
returns(ReportRSFatalErrorResponse);
/** Called to get the sequence id of the last MemStore entry flushed to an
* HFile for a specified region. Used by the region server to speed up
* log splitting. */
rpc getLastFlushedSequenceId(GetLastFlushedSequenceIdRequest)
returns(GetLastFlushedSequenceIdResponse);
}

View File

@ -132,6 +132,9 @@ message RegionLoad {
/** Region-level coprocessors. */
repeated Coprocessor coprocessors = 15;
/** the most recent sequence Id from cache flush */
optional uint64 completeSequenceId = 16;
}
/* Server-level protobufs */

View File

@ -0,0 +1,157 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver.wal;
import static junit.framework.Assert.assertEquals;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import com.google.common.collect.Lists;
import com.google.protobuf.ServiceException;
@Category(MediumTests.class)
public class TestHLogFiltering {
private static final Log LOG = LogFactory.getLog(TestHLogFiltering.class);
private static final int NUM_MASTERS = 1;
private static final int NUM_RS = 4;
private static final byte[] TABLE_NAME = Bytes.toBytes("TestHLogFiltering");
private static final byte[] CF1 = Bytes.toBytes("MyCF1");
private static final byte[] CF2 = Bytes.toBytes("MyCF2");
private static final byte[][] FAMILIES = { CF1, CF2 };
private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
@Before
public void setUp() throws Exception {
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
fillTable();
}
@After
public void tearDown() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
private void fillTable() throws IOException, InterruptedException {
HTable table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES, 3,
Bytes.toBytes("row0"), Bytes.toBytes("row99"), NUM_RS);
Random rand = new Random(19387129L);
for (int iStoreFile = 0; iStoreFile < 4; ++iStoreFile) {
for (int iRow = 0; iRow < 100; ++iRow) {
final byte[] row = Bytes.toBytes("row" + iRow);
Put put = new Put(row);
Delete del = new Delete(row);
for (int iCol = 0; iCol < 10; ++iCol) {
final byte[] cf = rand.nextBoolean() ? CF1 : CF2;
final long ts = rand.nextInt();
final byte[] qual = Bytes.toBytes("col" + iCol);
if (rand.nextBoolean()) {
final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
"_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
ts + "_random_" + rand.nextLong());
put.add(cf, qual, ts, value);
} else if (rand.nextDouble() < 0.8) {
del.deleteColumn(cf, qual, ts);
} else {
del.deleteColumns(cf, qual, ts);
}
}
table.put(put);
table.delete(del);
table.flushCommits();
}
}
TEST_UTIL.waitUntilAllRegionsAssigned(NUM_RS);
}
@Test
public void testFlushedSequenceIdsSentToHMaster()
throws IOException, InterruptedException, ServiceException {
SortedMap<byte[], Long> allFlushedSequenceIds =
new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
for (int i = 0; i < NUM_RS; ++i) {
flushAllRegions(i);
}
Thread.sleep(10000);
HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
for (int i = 0; i < NUM_RS; ++i) {
for (byte[] regionName : getRegionsByServer(i)) {
if (allFlushedSequenceIds.containsKey(regionName)) {
GetLastFlushedSequenceIdRequest req =
RequestConverter.buildGetLastFlushedSequenceIdRequest(regionName);
assertEquals((long)allFlushedSequenceIds.get(regionName),
master.getLastFlushedSequenceId(null, req).getLastFlushedSequenceId());
}
}
}
}
private List<byte[]> getRegionsByServer(int rsId) throws IOException {
List<byte[]> regionNames = Lists.newArrayList();
HRegionServer hrs = getRegionServer(rsId);
for (HRegion r : hrs.getOnlineRegions(TABLE_NAME)) {
regionNames.add(r.getRegionName());
}
return regionNames;
}
private HRegionServer getRegionServer(int rsId) {
return TEST_UTIL.getMiniHBaseCluster().getRegionServer(rsId);
}
private void flushAllRegions(int rsId)
throws ServiceException, IOException {
HRegionServer hrs = getRegionServer(rsId);
for (byte[] regionName : getRegionsByServer(rsId)) {
FlushRegionRequest request =
RequestConverter.buildFlushRegionRequest(regionName);
hrs.flushRegion(null, request);
}
}
@org.junit.Rule
public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
}

View File

@ -665,13 +665,12 @@ public class TestHLogSplit {
fs.initialize(fs.getUri(), conf);
// Set up a splitter that will throw an IOE on the output side
HLogSplitter logSplitter = new HLogSplitter(
conf, hbaseDir, hlogDir, oldLogDir, fs) {
conf, hbaseDir, hlogDir, oldLogDir, fs, null) {
protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf)
throws IOException {
HLog.Writer mockWriter = Mockito.mock(HLog.Writer.class);
Mockito.doThrow(new IOException("Injected")).when(mockWriter).append(Mockito.<HLog.Entry>any());
return mockWriter;
}
};
try {
@ -698,7 +697,7 @@ public class TestHLogSplit {
when(spiedFs).append(Mockito.<Path>any());
HLogSplitter logSplitter = new HLogSplitter(
conf, hbaseDir, hlogDir, oldLogDir, spiedFs);
conf, hbaseDir, hlogDir, oldLogDir, spiedFs, null);
try {
logSplitter.splitLog();
@ -756,7 +755,7 @@ public class TestHLogSplit {
// Create a splitter that reads and writes the data without touching disk
HLogSplitter logSplitter = new HLogSplitter(
localConf, hbaseDir, hlogDir, oldLogDir, fs) {
localConf, hbaseDir, hlogDir, oldLogDir, fs, null) {
/* Produce a mock writer that doesn't write anywhere */
protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf)
@ -1015,7 +1014,7 @@ public class TestHLogSplit {
generateHLogs(1, 10, -1);
FileStatus logfile = fs.listStatus(hlogDir)[0];
fs.initialize(fs.getUri(), conf);
HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter);
HLogSplitter.splitLogFile(hbaseDir, logfile, fs, conf, reporter, null);
HLogSplitter.finishSplitLogFile(hbaseDir, oldLogDir, logfile.getPath()
.toString(), conf);
@ -1122,7 +1121,7 @@ public class TestHLogSplit {
generateHLogs(-1);
HLogSplitter logSplitter = new HLogSplitter(
conf, hbaseDir, hlogDir, oldLogDir, fs) {
conf, hbaseDir, hlogDir, oldLogDir, fs, null) {
protected HLog.Writer createWriter(FileSystem fs, Path logfile, Configuration conf)
throws IOException {
HLog.Writer writer = HLog.createWriter(fs, logfile, conf);