From f8c7f1b0fb064fb082f4cb386672ad2cf4a42094 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Mon, 23 Apr 2012 18:12:16 +0000 Subject: [PATCH] HBASE-5443 Convert admin protocol of HRegionInterface to PB git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1329358 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/hbase/ipc/SecureRpcEngine.java | 24 +- .../apache/hadoop/hbase/ipc/SecureServer.java | 2 +- .../hadoop/hbase/catalog/CatalogTracker.java | 38 +- .../{protobuf => client}/AdminProtocol.java | 2 +- .../{protobuf => client}/ClientProtocol.java | 2 +- .../hadoop/hbase/client/HBaseAdmin.java | 101 +- .../hadoop/hbase/client/HConnection.java | 32 +- .../hbase/client/HConnectionManager.java | 121 +- .../apache/hadoop/hbase/client/HTable.java | 21 +- .../hadoop/hbase/client/ServerCallable.java | 2 +- .../hadoop/hbase/ipc/ExecRPCInvoker.java | 10 +- .../apache/hadoop/hbase/ipc/Invocation.java | 8 +- .../apache/hadoop/hbase/ipc/RpcEngine.java | 2 + .../hadoop/hbase/ipc/WritableRpcEngine.java | 13 + .../mapreduce/LoadIncrementalHFiles.java | 10 +- .../hbase/master/AssignmentManager.java | 48 +- .../hadoop/hbase/master/ServerManager.java | 39 +- .../hadoop/hbase/protobuf/ProtobufUtil.java | 404 +++++- .../hbase/protobuf/RequestConverter.java | 332 ++++- .../hbase/protobuf/ResponseConverter.java | 17 +- .../hbase/protobuf/generated/AdminProtos.java | 1123 ++++++++--------- .../hbase/regionserver/HRegionServer.java | 113 +- .../regionserver/HRegionThriftServer.java | 19 +- .../hbase/regionserver/RegionServer.java | 492 +++++++- .../regionserver/ReplicationSource.java | 16 +- .../apache/hadoop/hbase/util/HBaseFsck.java | 9 +- .../hadoop/hbase/util/HBaseFsckRepair.java | 14 +- .../hbase/util/SortedCopyOnWriteSet.java | 2 +- src/main/protobuf/Admin.proto | 18 +- .../hbase/catalog/TestCatalogTracker.java | 77 +- .../TestMetaReaderEditorNoCluster.java | 2 +- .../client/HConnectionTestingUtility.java | 23 +- .../apache/hadoop/hbase/client/TestAdmin.java | 12 +- .../hbase/client/TestFromClientSide3.java | 48 +- .../hadoop/hbase/client/TestHTableUtil.java | 1 - ...estLoadIncrementalHFilesSplitRecovery.java | 16 +- .../hadoop/hbase/master/MockRegionServer.java | 427 ++----- .../hbase/master/TestAssignmentManager.java | 2 +- .../hbase/master/TestCatalogJanitor.java | 10 +- .../hadoop/hbase/master/TestMaster.java | 2 - .../hbase/regionserver/TestHRegion.java | 1 - .../TestHRegionServerBulkLoad.java | 10 +- .../hadoop/hbase/util/TestHBaseFsck.java | 13 +- 43 files changed, 2278 insertions(+), 1400 deletions(-) rename src/main/java/org/apache/hadoop/hbase/{protobuf => client}/AdminProtocol.java (97%) rename src/main/java/org/apache/hadoop/hbase/{protobuf => client}/ClientProtocol.java (97%) diff --git a/security/src/main/java/org/apache/hadoop/hbase/ipc/SecureRpcEngine.java b/security/src/main/java/org/apache/hadoop/hbase/ipc/SecureRpcEngine.java index 39d20e6f941..8383d6c8e88 100644 --- a/security/src/main/java/org/apache/hadoop/hbase/ipc/SecureRpcEngine.java +++ b/security/src/main/java/org/apache/hadoop/hbase/ipc/SecureRpcEngine.java @@ -212,11 +212,25 @@ public class SecureRpcEngine implements RpcEngine { (VersionedProtocol) Proxy.newProxyInstance( protocol.getClassLoader(), new Class[] { protocol }, new Invoker(protocol, addr, ticket, conf, factory, rpcTimeout)); - long serverVersion = proxy.getProtocolVersion(protocol.getName(), - clientVersion); - if (serverVersion != clientVersion) { - throw new HBaseRPC.VersionMismatch(protocol.getName(), clientVersion, - serverVersion); + try { + long serverVersion = proxy.getProtocolVersion(protocol.getName(), + clientVersion); + if (serverVersion != clientVersion) { + throw new HBaseRPC.VersionMismatch(protocol.getName(), clientVersion, + serverVersion); + } + } catch (Throwable t) { + if (t instanceof UndeclaredThrowableException) { + t = t.getCause(); + } + if (t instanceof ServiceException) { + throw ProtobufUtil.getRemoteException((ServiceException)t); + } + if (!(t instanceof IOException)) { + LOG.error("Unexpected throwable object ", t); + throw new IOException(t); + } + throw (IOException)t; } return proxy; } diff --git a/security/src/main/java/org/apache/hadoop/hbase/ipc/SecureServer.java b/security/src/main/java/org/apache/hadoop/hbase/ipc/SecureServer.java index 4a298bed06b..8f285560243 100644 --- a/security/src/main/java/org/apache/hadoop/hbase/ipc/SecureServer.java +++ b/security/src/main/java/org/apache/hadoop/hbase/ipc/SecureServer.java @@ -85,7 +85,7 @@ public abstract class SecureServer extends HBaseServer { // 3 : Introduce the protocol into the RPC connection header // 4 : Introduced SASL security layer public static final byte CURRENT_VERSION = 4; - public static final Set INSECURE_VERSIONS = ImmutableSet.of((byte) 3); + public static final Set INSECURE_VERSIONS = ImmutableSet.of((byte) 5); public static final Log LOG = LogFactory.getLog("org.apache.hadoop.ipc.SecureServer"); private static final Log AUDITLOG = diff --git a/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java b/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java index 408db79d5b0..bafec30d3d2 100644 --- a/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java +++ b/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java @@ -34,11 +34,13 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; +import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.RetriesExhaustedException; -import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MetaNodeTracker; import org.apache.hadoop.hbase.zookeeper.RootRegionTracker; @@ -340,7 +342,7 @@ public class CatalogTracker { * @throws IOException * @deprecated Use #getRootServerConnection(long) */ - public HRegionInterface waitForRootServerConnection(long timeout) + public AdminProtocol waitForRootServerConnection(long timeout) throws InterruptedException, NotAllMetaRegionsOnlineException, IOException { return getRootServerConnection(timeout); } @@ -356,7 +358,7 @@ public class CatalogTracker { * @throws NotAllMetaRegionsOnlineException if timed out waiting * @throws IOException */ - HRegionInterface getRootServerConnection(long timeout) + AdminProtocol getRootServerConnection(long timeout) throws InterruptedException, NotAllMetaRegionsOnlineException, IOException { return getCachedConnection(waitForRoot(timeout)); } @@ -370,7 +372,7 @@ public class CatalogTracker { * @throws IOException * @deprecated Use #getRootServerConnection(long) */ - public HRegionInterface waitForRootServerConnectionDefault() + public AdminProtocol waitForRootServerConnectionDefault() throws NotAllMetaRegionsOnlineException, IOException { try { return getRootServerConnection(this.defaultTimeout); @@ -395,11 +397,11 @@ public class CatalogTracker { * @throws IOException * @throws InterruptedException */ - private HRegionInterface getMetaServerConnection() + private AdminProtocol getMetaServerConnection() throws IOException, InterruptedException { synchronized (metaAvailable) { if (metaAvailable.get()) { - HRegionInterface current = getCachedConnection(this.metaLocation); + AdminProtocol current = getCachedConnection(this.metaLocation); // If we are to refresh, verify we have a good connection by making // an invocation on it. if (verifyRegionLocation(current, this.metaLocation, META_REGION_NAME)) { @@ -416,7 +418,7 @@ public class CatalogTracker { ServerName newLocation = MetaReader.getMetaRegionLocation(this); if (newLocation == null) return null; - HRegionInterface newConnection = getCachedConnection(newLocation); + AdminProtocol newConnection = getCachedConnection(newLocation); if (verifyRegionLocation(newConnection, newLocation, META_REGION_NAME)) { setMetaLocation(newLocation); return newConnection; @@ -495,7 +497,7 @@ public class CatalogTracker { * @throws IOException * @deprecated Does not retry; use an HTable instance instead. */ - public HRegionInterface waitForMetaServerConnection(long timeout) + public AdminProtocol waitForMetaServerConnection(long timeout) throws InterruptedException, NotAllMetaRegionsOnlineException, IOException { return getCachedConnection(waitForMeta(timeout)); } @@ -510,7 +512,7 @@ public class CatalogTracker { * @throws IOException * @deprecated Does not retry; use an HTable instance instead. */ - public HRegionInterface waitForMetaServerConnectionDefault() + public AdminProtocol waitForMetaServerConnectionDefault() throws NotAllMetaRegionsOnlineException, IOException { try { return getCachedConnection(waitForMeta(defaultTimeout)); @@ -546,19 +548,19 @@ public class CatalogTracker { /** * @param sn ServerName to get a connection against. - * @return The HRegionInterface we got when we connected to sn + * @return The AdminProtocol we got when we connected to sn * May have come from cache, may not be good, may have been setup by this * invocation, or may be null. * @throws IOException */ - private HRegionInterface getCachedConnection(ServerName sn) + private AdminProtocol getCachedConnection(ServerName sn) throws IOException { if (sn == null) { return null; } - HRegionInterface protocol = null; + AdminProtocol protocol = null; try { - protocol = connection.getHRegionConnection(sn.getHostname(), sn.getPort()); + protocol = connection.getAdmin(sn.getHostname(), sn.getPort()); } catch (RetriesExhaustedException e) { if (e.getCause() != null && e.getCause() instanceof ConnectException) { // Catch this; presume it means the cached connection has gone bad. @@ -599,11 +601,11 @@ public class CatalogTracker { * the Interface. * @throws IOException */ - // TODO: We should be able to get the ServerName from the HRegionInterface + // TODO: We should be able to get the ServerName from the AdminProtocol // rather than have to pass it in. Its made awkward by the fact that the // HRI is likely a proxy against remote server so the getServerName needs // to be fixed to go to a local method or to a cache before we can do this. - private boolean verifyRegionLocation(HRegionInterface hostingServer, + private boolean verifyRegionLocation(AdminProtocol hostingServer, final ServerName address, final byte [] regionName) throws IOException { if (hostingServer == null) { @@ -613,7 +615,7 @@ public class CatalogTracker { Throwable t = null; try { // Try and get regioninfo from the hosting server. - return hostingServer.getRegionInfo(regionName) != null; + return ProtobufUtil.getRegionInfo(hostingServer, regionName) != null; } catch (ConnectException e) { t = e; } catch (RetriesExhaustedException e) { @@ -647,7 +649,7 @@ public class CatalogTracker { */ public boolean verifyRootRegionLocation(final long timeout) throws InterruptedException, IOException { - HRegionInterface connection = null; + AdminProtocol connection = null; try { connection = waitForRootServerConnection(timeout); } catch (NotAllMetaRegionsOnlineException e) { @@ -672,7 +674,7 @@ public class CatalogTracker { */ public boolean verifyMetaRegionLocation(final long timeout) throws InterruptedException, IOException { - HRegionInterface connection = null; + AdminProtocol connection = null; try { connection = waitForMetaServerConnection(timeout); } catch (NotAllMetaRegionsOnlineException e) { diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/AdminProtocol.java b/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java similarity index 97% rename from src/main/java/org/apache/hadoop/hbase/protobuf/AdminProtocol.java rename to src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java index 422e8658774..bc37197ca19 100644 --- a/src/main/java/org/apache/hadoop/hbase/protobuf/AdminProtocol.java +++ b/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.protobuf; +package org.apache.hadoop.hbase.client; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.ipc.VersionedProtocol; diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/ClientProtocol.java b/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java similarity index 97% rename from src/main/java/org/apache/hadoop/hbase/protobuf/ClientProtocol.java rename to src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java index 3d6a23aeeb4..1745aa3ed6e 100644 --- a/src/main/java/org/apache/hadoop/hbase/protobuf/ClientProtocol.java +++ b/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.protobuf; +package org.apache.hadoop.hbase.client; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index ee16e720d32..9a29561bfa7 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -53,13 +53,21 @@ import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaReader; +import org.apache.hadoop.hbase.client.AdminProtocol; +import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.ipc.HMasterInterface; -import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.ClientProtocol; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; @@ -71,6 +79,7 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StringUtils; import org.apache.zookeeper.KeeperException; +import com.google.protobuf.ByteString; import com.google.protobuf.ServiceException; /** @@ -1092,20 +1101,26 @@ public class HBaseAdmin implements Abortable, Closeable { */ public boolean closeRegionWithEncodedRegionName(final String encodedRegionName, final String serverName) throws IOException { - byte[] encodedRegionNameInBytes = Bytes.toBytes(encodedRegionName); if (null == serverName || ("").equals(serverName.trim())) { throw new IllegalArgumentException( "The servername cannot be null or empty."); } ServerName sn = new ServerName(serverName); - HRegionInterface rs = this.connection.getHRegionConnection( + AdminProtocol admin = this.connection.getAdmin( sn.getHostname(), sn.getPort()); // Close the region without updating zk state. - boolean isRegionClosed = rs.closeRegion(encodedRegionNameInBytes, false); - if (false == isRegionClosed) { - LOG.error("Not able to close the region " + encodedRegionName + "."); + CloseRegionRequest request = + RequestConverter.buildCloseRegionRequest(encodedRegionName, false); + try { + CloseRegionResponse response = admin.closeRegion(null, request); + boolean isRegionClosed = response.getClosed(); + if (false == isRegionClosed) { + LOG.error("Not able to close the region " + encodedRegionName + "."); + } + return isRegionClosed; + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); } - return isRegionClosed; } /** @@ -1117,10 +1132,10 @@ public class HBaseAdmin implements Abortable, Closeable { */ public void closeRegion(final ServerName sn, final HRegionInfo hri) throws IOException { - HRegionInterface rs = - this.connection.getHRegionConnection(sn.getHostname(), sn.getPort()); + AdminProtocol admin = + this.connection.getAdmin(sn.getHostname(), sn.getPort()); // Close the region without updating zk state. - rs.closeRegion(hri, false); + ProtobufUtil.closeRegion(admin, hri.getRegionName(), false); } /** @@ -1183,9 +1198,15 @@ public class HBaseAdmin implements Abortable, Closeable { private void flush(final ServerName sn, final HRegionInfo hri) throws IOException { - HRegionInterface rs = - this.connection.getHRegionConnection(sn.getHostname(), sn.getPort()); - rs.flushRegion(hri); + AdminProtocol admin = + this.connection.getAdmin(sn.getHostname(), sn.getPort()); + FlushRegionRequest request = + RequestConverter.buildFlushRegionRequest(hri.getRegionName()); + try { + admin.flushRegion(null, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } } /** @@ -1289,9 +1310,15 @@ public class HBaseAdmin implements Abortable, Closeable { private void compact(final ServerName sn, final HRegionInfo hri, final boolean major) throws IOException { - HRegionInterface rs = - this.connection.getHRegionConnection(sn.getHostname(), sn.getPort()); - rs.compactRegion(hri, major); + AdminProtocol admin = + this.connection.getAdmin(sn.getHostname(), sn.getPort()); + CompactRegionRequest request = + RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major); + try { + admin.compactRegion(null, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } } /** @@ -1471,9 +1498,15 @@ public class HBaseAdmin implements Abortable, Closeable { private void split(final ServerName sn, final HRegionInfo hri, byte[] splitPoint) throws IOException { - HRegionInterface rs = - this.connection.getHRegionConnection(sn.getHostname(), sn.getPort()); - rs.splitRegion(hri, splitPoint); + AdminProtocol admin = + this.connection.getAdmin(sn.getHostname(), sn.getPort()); + SplitRegionRequest request = + RequestConverter.buildSplitRegionRequest(hri.getRegionName(), splitPoint); + try { + admin.splitRegion(null, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } } /** @@ -1572,9 +1605,15 @@ public class HBaseAdmin implements Abortable, Closeable { throws IOException { String hostname = Addressing.parseHostname(hostnamePort); int port = Addressing.parsePort(hostnamePort); - HRegionInterface rs = - this.connection.getHRegionConnection(hostname, port); - rs.stop("Called by admin client " + this.connection.toString()); + AdminProtocol admin = + this.connection.getAdmin(hostname, port); + StopServerRequest request = RequestConverter.buildStopServerRequest( + "Called by admin client " + this.connection.toString()); + try { + admin.stopServer(null, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } } /** @@ -1715,9 +1754,21 @@ public class HBaseAdmin implements Abortable, Closeable { public synchronized byte[][] rollHLogWriter(String serverName) throws IOException, FailedLogCloseException { ServerName sn = new ServerName(serverName); - HRegionInterface rs = this.connection.getHRegionConnection( + AdminProtocol admin = this.connection.getAdmin( sn.getHostname(), sn.getPort()); - return rs.rollHLogWriter(); + RollWALWriterRequest request = RequestConverter.buildRollWALWriterRequest();; + try { + RollWALWriterResponse response = admin.rollWALWriter(null, request); + int regionCount = response.getRegionToFlushCount(); + byte[][] regionsToFlush = new byte[regionCount][]; + for (int i = 0; i < regionCount; i++) { + ByteString region = response.getRegionToFlush(i); + regionsToFlush[i] = region.toByteArray(); + } + return regionsToFlush; + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } } public String[] getMasterCoprocessors() { diff --git a/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/src/main/java/org/apache/hadoop/hbase/client/HConnection.java index 23f8e5ac8e0..0ae82c348aa 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -36,11 +36,11 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.client.AdminProtocol; +import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.ipc.CoprocessorProtocol; import org.apache.hadoop.hbase.ipc.HMasterInterface; -import org.apache.hadoop.hbase.ipc.HRegionInterface; -import org.apache.hadoop.hbase.protobuf.ClientProtocol; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; /** @@ -199,17 +199,6 @@ public interface HConnection extends Abortable, Closeable { public List locateRegions(byte[] tableName) throws IOException; - /** - * Establishes a connection to the region server at the specified address. - * @param regionServer - the server to connect to - * @return proxy for HRegionServer - * @throws IOException if a remote or network exception occurs - * @deprecated Use {@link #getHRegionConnection(String, int)} - */ - @Deprecated - public HRegionInterface getHRegionConnection(HServerAddress regionServer) - throws IOException; - /** * Establishes a connection to the region server at the specified address. * @param hostname RegionServer hostname @@ -218,7 +207,7 @@ public interface HConnection extends Abortable, Closeable { * @throws IOException if a remote or network exception occurs * */ - public HRegionInterface getHRegionConnection(final String hostname, final int port) + public AdminProtocol getAdmin(final String hostname, final int port) throws IOException; /** @@ -234,19 +223,6 @@ public interface HConnection extends Abortable, Closeable { public ClientProtocol getClient(final String hostname, final int port) throws IOException; - /** - * Establishes a connection to the region server at the specified address. - * @param regionServer - the server to connect to - * @param getMaster - do we check if master is alive - * @return proxy for HRegionServer - * @throws IOException if a remote or network exception occurs - * @deprecated Use {@link #getHRegionConnection(String, int)} - */ - @Deprecated - public HRegionInterface getHRegionConnection(HServerAddress regionServer, - boolean getMaster) - throws IOException; - /** * Establishes a connection to the region server at the specified address. * @param hostname RegionServer hostname @@ -255,7 +231,7 @@ public interface HConnection extends Abortable, Closeable { * @return proxy for HRegionServer * @throws IOException if a remote or network exception occurs */ - public HRegionInterface getHRegionConnection(final String hostname, + public AdminProtocol getAdmin(final String hostname, final int port, boolean getMaster) throws IOException; diff --git a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 820e2a90499..349698d6fa5 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -66,20 +66,16 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.client.AdminProtocol; +import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.ipc.CoprocessorProtocol; import org.apache.hadoop.hbase.ipc.ExecRPCInvoker; import org.apache.hadoop.hbase.ipc.HBaseRPC; import org.apache.hadoop.hbase.ipc.HMasterInterface; -import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.ipc.VersionedProtocol; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.ClientProtocol; -import org.apache.hadoop.hbase.protobuf.RequestConverter; -import org.apache.hadoop.hbase.protobuf.ResponseConverter; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; @@ -161,6 +157,12 @@ public class HConnectionManager { /** Default client protocol class name. */ public static final String DEFAULT_CLIENT_PROTOCOL_CLASS = ClientProtocol.class.getName(); + /** Parameter name for what admin protocol to use. */ + public static final String REGION_PROTOCOL_CLASS = "hbase.adminprotocol.class"; + + /** Default admin protocol class name. */ + public static final String DEFAULT_ADMIN_PROTOCOL_CLASS = AdminProtocol.class.getName(); + private static final Log LOG = LogFactory.getLog(HConnectionManager.class); static { @@ -507,7 +509,7 @@ public class HConnectionManager { /* Encapsulates connection to zookeeper and regionservers.*/ static class HConnectionImplementation implements HConnection, Closeable { static final Log LOG = LogFactory.getLog(HConnectionImplementation.class); - private final Class serverInterfaceClass; + private final Class adminClass; private final Class clientClass; private final long pause; private final int numRetries; @@ -535,8 +537,8 @@ public class HConnectionManager { private final Configuration conf; - // Known region HServerAddress.toString() -> HRegionInterface + // Known region ServerName.toString() -> RegionClient/Admin private final ConcurrentHashMap> servers = new ConcurrentHashMap>(); private final ConcurrentHashMap connectionLock = @@ -576,15 +578,15 @@ public class HConnectionManager { throws ZooKeeperConnectionException { this.conf = conf; this.managed = managed; - String serverClassName = conf.get(HConstants.REGION_SERVER_CLASS, - HConstants.DEFAULT_REGION_SERVER_CLASS); + String adminClassName = conf.get(REGION_PROTOCOL_CLASS, + DEFAULT_ADMIN_PROTOCOL_CLASS); this.closed = false; try { - this.serverInterfaceClass = - (Class) Class.forName(serverClassName); + this.adminClass = + (Class) Class.forName(adminClassName); } catch (ClassNotFoundException e) { throw new UnsupportedOperationException( - "Unable to find region server interface " + serverClassName, e); + "Unable to find region server interface " + adminClassName, e); } String clientClassName = conf.get(CLIENT_PROTOCOL_CLASS, DEFAULT_CLIENT_PROTOCOL_CLASS); @@ -730,9 +732,6 @@ public class HConnectionManager { return getKeepAliveMaster(); } catch (MasterNotRunningException e) { throw e; - } catch (IOException e) { - throw new ZooKeeperConnectionException( - "Can't create a connection to master", e); } } } @@ -1057,8 +1056,8 @@ public class HConnectionManager { metaLocation = locateRegion(parentTable, metaKey); // If null still, go around again. if (metaLocation == null) continue; - HRegionInterface server = - getHRegionConnection(metaLocation.getHostname(), metaLocation.getPort()); + ClientProtocol server = + getClient(metaLocation.getHostname(), metaLocation.getPort()); Result regionInfoRow = null; // This block guards against two threads trying to load the meta @@ -1086,9 +1085,9 @@ public class HConnectionManager { } // Query the root or meta region for the location of the meta region - regionInfoRow = server.getClosestRowBefore( - metaLocation.getRegionInfo().getRegionName(), metaKey, - HConstants.CATALOG_FAMILY); + regionInfoRow = ProtobufUtil.getRowOrBefore(server, + metaLocation.getRegionInfo().getRegionName(), metaKey, + HConstants.CATALOG_FAMILY); } if (regionInfoRow == null) { throw new TableNotFoundException(Bytes.toString(tableName)); @@ -1340,17 +1339,9 @@ public class HConnectionManager { } @Override - @Deprecated - public HRegionInterface getHRegionConnection(HServerAddress hsa) - throws IOException { - return getHRegionConnection(hsa, false); - } - - @Override - public HRegionInterface getHRegionConnection(final String hostname, - final int port) - throws IOException { - return getHRegionConnection(hostname, port, false); + public AdminProtocol getAdmin(final String hostname, + final int port) throws IOException { + return getAdmin(hostname, port, false); } @Override @@ -1361,21 +1352,10 @@ public class HConnectionManager { } @Override - @Deprecated - public HRegionInterface getHRegionConnection(HServerAddress hsa, - boolean master) - throws IOException { - String hostname = hsa.getInetSocketAddress().getHostName(); - int port = hsa.getInetSocketAddress().getPort(); - return getHRegionConnection(hostname, port, master); - } - - @Override - public HRegionInterface getHRegionConnection(final String hostname, - final int port, final boolean master) - throws IOException { - return (HRegionInterface)getProtocol(hostname, port, - serverInterfaceClass, HRegionInterface.VERSION); + public AdminProtocol getAdmin(final String hostname, + final int port, final boolean master) throws IOException { + return (AdminProtocol)getProtocol(hostname, port, + adminClass, AdminProtocol.VERSION); } /** @@ -1591,11 +1571,19 @@ public class HConnectionManager { }catch (InvocationTargetException e){ // We will have this for all the exception, checked on not, sent // by any layer, including the functional exception - if (e.getCause () == null){ + Throwable cause = e.getCause(); + if (cause == null){ throw new RuntimeException( "Proxy invocation failed and getCause is null", e); } - throw e.getCause(); + if (cause instanceof UndeclaredThrowableException) { + cause = cause.getCause(); + } + if (cause instanceof ServiceException) { + ServiceException se = (ServiceException)cause; + cause = ProtobufUtil.getRemoteException(se); + } + throw cause; } } } @@ -1715,39 +1703,8 @@ public class HConnectionManager { ServerCallable callable = new ServerCallable(connection, tableName, null) { public MultiResponse call() throws IOException { - try { - MultiResponse response = new MultiResponse(); - for (Map.Entry>> e: multi.actions.entrySet()) { - byte[] regionName = e.getKey(); - int rowMutations = 0; - List> actions = e.getValue(); - for (Action action: actions) { - Row row = action.getAction(); - if (row instanceof RowMutations) { - MultiRequest request = - RequestConverter.buildMultiRequest(regionName, (RowMutations)row); - server.multi(null, request); - response.add(regionName, action.getOriginalIndex(), new Result()); - rowMutations++; - } - } - if (actions.size() > rowMutations) { - MultiRequest request = - RequestConverter.buildMultiRequest(regionName, actions); - ClientProtos.MultiResponse - proto = server.multi(null, request); - List results = ResponseConverter.getResults(proto); - for (int i = 0, n = results.size(); i < n; i++) { - int originalIndex = actions.get(i).getOriginalIndex(); - response.add(regionName, originalIndex, results.get(i)); - } - } - } - return response; - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } + return ProtobufUtil.multi(server, multi); + } @Override public void connect(boolean reload) throws IOException { server = connection.getClient( diff --git a/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/src/main/java/org/apache/hadoop/hbase/client/HTable.java index 2c87d50d429..b8290e45d5a 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -662,15 +662,8 @@ public class HTable implements HTableInterface { throws IOException { return new ServerCallable(connection, tableName, row, operationTimeout) { public Result call() throws IOException { - try { - GetRequest request = RequestConverter.buildGetRequest( - location.getRegionInfo().getRegionName(), row, family, true); - GetResponse response = server.get(null, request); - if (!response.hasResult()) return null; - return ProtobufUtil.toResult(response.getResult()); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } + return ProtobufUtil.getRowOrBefore(server, + location.getRegionInfo().getRegionName(), row, family); } }.withRetries(); } @@ -715,14 +708,8 @@ public class HTable implements HTableInterface { public Result get(final Get get) throws IOException { return new ServerCallable(connection, tableName, get.getRow(), operationTimeout) { public Result call() throws IOException { - try { - GetRequest request = RequestConverter.buildGetRequest( - location.getRegionInfo().getRegionName(), get); - GetResponse response = server.get(null, request); - return ProtobufUtil.toResult(response.getResult()); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } + return ProtobufUtil.get(server, + location.getRegionInfo().getRegionName(), get); } }.withRetries(); } diff --git a/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java b/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java index cd4cccbf6f9..a879604db1f 100644 --- a/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java +++ b/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java @@ -34,8 +34,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.ipc.HBaseRPC; -import org.apache.hadoop.hbase.protobuf.ClientProtocol; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.ipc.RemoteException; diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/ExecRPCInvoker.java b/src/main/java/org/apache/hadoop/hbase/ipc/ExecRPCInvoker.java index 2fc4a154ba2..578b2b2e64e 100644 --- a/src/main/java/org/apache/hadoop/hbase/ipc/ExecRPCInvoker.java +++ b/src/main/java/org/apache/hadoop/hbase/ipc/ExecRPCInvoker.java @@ -31,9 +31,6 @@ import org.apache.hadoop.hbase.client.ServerCallable; import org.apache.hadoop.hbase.client.coprocessor.Exec; import org.apache.hadoop.hbase.client.coprocessor.ExecResult; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.RequestConverter; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ExecCoprocessorRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ExecCoprocessorResponse; import org.apache.hadoop.hbase.util.Bytes; /** @@ -80,12 +77,7 @@ public class ExecRPCInvoker implements InvocationHandler { new ServerCallable(connection, table, row) { public ExecResult call() throws Exception { byte[] regionName = location.getRegionInfo().getRegionName(); - ExecCoprocessorRequest request = - RequestConverter.buildExecCoprocessorRequest(regionName, exec); - ExecCoprocessorResponse response = - server.execCoprocessor(null, request); - Object value = ProtobufUtil.toObject(response.getValue()); - return new ExecResult(regionName, value); + return ProtobufUtil.execCoprocessor(server, exec, regionName); } }; ExecResult result = callable.withRetries(); diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/Invocation.java b/src/main/java/org/apache/hadoop/hbase/ipc/Invocation.java index 57c94435fec..bb6ab3b181d 100644 --- a/src/main/java/org/apache/hadoop/hbase/ipc/Invocation.java +++ b/src/main/java/org/apache/hadoop/hbase/ipc/Invocation.java @@ -32,9 +32,10 @@ import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.client.AdminProtocol; +import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.io.HbaseObjectWritable; -import org.apache.hadoop.hbase.protobuf.AdminProtocol; -import org.apache.hadoop.hbase.protobuf.ClientProtocol; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.io.VersionMismatchException; import org.apache.hadoop.io.VersionedWritable; @@ -50,7 +51,6 @@ public class Invocation extends VersionedWritable implements Configurable { private long clientVersion; private int clientMethodsHash; - // For generated protocol classes which don't have VERSION field, // such as protobuf interfaces. private static final Map, Long> @@ -59,6 +59,8 @@ public class Invocation extends VersionedWritable implements Configurable { static { PROTOCOL_VERSION.put(ClientService.BlockingInterface.class, Long.valueOf(ClientProtocol.VERSION)); + PROTOCOL_VERSION.put(AdminService.BlockingInterface.class, + Long.valueOf(AdminProtocol.VERSION)); } // For protobuf protocols, which use ServiceException, instead of IOException diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/RpcEngine.java b/src/main/java/org/apache/hadoop/hbase/ipc/RpcEngine.java index 52d179db1df..8608070e453 100644 --- a/src/main/java/org/apache/hadoop/hbase/ipc/RpcEngine.java +++ b/src/main/java/org/apache/hadoop/hbase/ipc/RpcEngine.java @@ -28,6 +28,8 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import com.google.protobuf.ServiceException; + /** An RPC implementation. */ @InterfaceAudience.Private interface RpcEngine { diff --git a/src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java b/src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java index a974e6b9d27..d300e28ad72 100644 --- a/src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java +++ b/src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java @@ -28,14 +28,18 @@ import java.lang.reflect.UndeclaredThrowableException; import java.net.InetSocketAddress; import java.io.*; +import java.util.HashSet; import java.util.Map; import java.util.HashMap; +import java.util.Set; import javax.net.SocketFactory; import org.apache.commons.logging.*; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.client.AdminProtocol; +import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.Operation; import org.apache.hadoop.hbase.io.HbaseObjectWritable; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; @@ -62,6 +66,15 @@ class WritableRpcEngine implements RpcEngine { // DEBUG log level does NOT emit RPC-level logging. private static final Log LOG = LogFactory.getLog("org.apache.hadoop.ipc.RPCEngine"); + // For protobuf protocols, which use ServiceException, instead of IOException + protected static final Set> + PROTOBUF_PROTOCOLS = new HashSet>(); + + static { + PROTOBUF_PROTOCOLS.add(ClientProtocol.class); + PROTOBUF_PROTOCOLS.add(AdminProtocol.class); + } + /* Cache a client using its socket factory as the hash key */ static private class ClientCache { private Map clients = diff --git a/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index d0570b98e14..9e4ada90b44 100644 --- a/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -72,9 +72,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; import org.apache.hadoop.hbase.io.hfile.HFileScanner; -import org.apache.hadoop.hbase.protobuf.RequestConverter; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; @@ -489,11 +487,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { LOG.debug("Going to connect to server " + location + " for row " + Bytes.toStringBinary(row)); byte[] regionName = location.getRegionInfo().getRegionName(); - BulkLoadHFileRequest request = - RequestConverter.buildBulkLoadHFileRequest(famPaths, regionName); - BulkLoadHFileResponse response = - server.bulkLoadHFile(null, request); - return response.getLoaded(); + return ProtobufUtil.bulkLoadHFile(server, famPaths, regionName); } }; diff --git a/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 269a402be55..5c29e9c0cd2 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -2051,40 +2051,36 @@ public class AssignmentManager extends ZooKeeperListener { // This never happens. Currently regionserver close always return true. LOG.warn("Server " + server + " region CLOSE RPC returned false for " + region.getRegionNameAsString()); - } catch (NotServingRegionException nsre) { - LOG.info("Server " + server + " returned " + nsre + " for " + - region.getRegionNameAsString()); - // Presume that master has stale data. Presume remote side just split. - // Presume that the split message when it comes in will fix up the master's - // in memory cluster state. } catch (Throwable t) { if (t instanceof RemoteException) { t = ((RemoteException)t).unwrapRemoteException(); - if (t instanceof NotServingRegionException) { - if (checkIfRegionBelongsToDisabling(region)) { - // Remove from the regionsinTransition map - LOG.info("While trying to recover the table " - + region.getTableNameAsString() - + " to DISABLED state the region " + region - + " was offlined but the table was in DISABLING state"); - synchronized (this.regionsInTransition) { - this.regionsInTransition.remove(region.getEncodedName()); - } - // Remove from the regionsMap - synchronized (this.regions) { - this.regions.remove(region); - } - deleteClosingOrClosedNode(region); + } + if (t instanceof NotServingRegionException) { + // Presume that master has stale data. Presume remote side just split. + // Presume that the split message when it comes in will fix up the master's + // in memory cluster state. + if (checkIfRegionBelongsToDisabling(region)) { + // Remove from the regionsinTransition map + LOG.info("While trying to recover the table " + + region.getTableNameAsString() + + " to DISABLED state the region " + region + + " was offlined but the table was in DISABLING state"); + synchronized (this.regionsInTransition) { + this.regionsInTransition.remove(region.getEncodedName()); } + // Remove from the regionsMap + synchronized (this.regions) { + this.regions.remove(region); + } + deleteClosingOrClosedNode(region); } + } else if (t instanceof RegionAlreadyInTransitionException) { // RS is already processing this region, only need to update the timestamp - if (t instanceof RegionAlreadyInTransitionException) { - LOG.debug("update " + state + " the timestamp."); - state.update(state.getState()); - } + LOG.debug("update " + state + " the timestamp."); + state.update(state.getState()); } LOG.info("Server " + server + " returned " + t + " for " + - region.getEncodedName()); + region.getRegionNameAsString()); // Presume retry or server will expire. } } diff --git a/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 70901fe3905..80271b188d1 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -44,13 +44,14 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.YouAreDeadException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.RetriesExhaustedException; -import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler; import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler; import org.apache.hadoop.hbase.monitoring.MonitoredTask; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; /** @@ -81,8 +82,8 @@ public class ServerManager { /** * Map from full server-instance name to the RPC connection for this server. */ - private final Map serverConnections = - new HashMap(); + private final Map serverConnections = + new HashMap(); /** * List of region servers that should not get any more new @@ -476,14 +477,13 @@ public class ServerManager { public RegionOpeningState sendRegionOpen(final ServerName server, HRegionInfo region, int versionOfOfflineNode) throws IOException { - HRegionInterface hri = getServerConnection(server); - if (hri == null) { + AdminProtocol admin = getServerConnection(server); + if (admin == null) { LOG.warn("Attempting to send OPEN RPC to server " + server.toString() + " failed because no RPC connection found to this server"); return RegionOpeningState.FAILED_OPENING; } - return (versionOfOfflineNode == -1) ? hri.openRegion(region) : hri - .openRegion(region, versionOfOfflineNode); + return ProtobufUtil.openRegion(admin, region, versionOfOfflineNode); } /** @@ -496,13 +496,13 @@ public class ServerManager { */ public void sendRegionOpen(ServerName server, List regions) throws IOException { - HRegionInterface hri = getServerConnection(server); - if (hri == null) { + AdminProtocol admin = getServerConnection(server); + if (admin == null) { LOG.warn("Attempting to send OPEN RPC to server " + server.toString() + " failed because no RPC connection found to this server"); return; } - hri.openRegions(regions); + ProtobufUtil.openRegion(admin, regions); } /** @@ -521,14 +521,15 @@ public class ServerManager { public boolean sendRegionClose(ServerName server, HRegionInfo region, int versionOfClosingNode) throws IOException { if (server == null) throw new NullPointerException("Passed server is null"); - HRegionInterface hri = getServerConnection(server); - if (hri == null) { + AdminProtocol admin = getServerConnection(server); + if (admin == null) { throw new IOException("Attempting to send CLOSE RPC to server " + server.toString() + " for region " + region.getRegionNameAsString() + " failed because no RPC connection found to this server"); } - return hri.closeRegion(region, versionOfClosingNode); + return ProtobufUtil.closeRegion(admin, region.getRegionName(), + versionOfClosingNode); } /** @@ -538,15 +539,15 @@ public class ServerManager { * @throws RetriesExhaustedException wrapping a ConnectException if failed * putting up proxy. */ - private HRegionInterface getServerConnection(final ServerName sn) + private AdminProtocol getServerConnection(final ServerName sn) throws IOException { - HRegionInterface hri = this.serverConnections.get(sn); - if (hri == null) { + AdminProtocol admin = this.serverConnections.get(sn.toString()); + if (admin == null) { LOG.debug("New connection to " + sn.toString()); - hri = this.connection.getHRegionConnection(sn.getHostname(), sn.getPort()); - this.serverConnections.put(sn, hri); + admin = this.connection.getAdmin(sn.getHostname(), sn.getPort()); + this.serverConnections.put(sn, admin); } - return hri; + return admin; } /** diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index b0568302bd0..994cb76a453 100644 --- a/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -39,24 +39,52 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.Action; +import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.MultiAction; +import org.apache.hadoop.hbase.client.MultiResponse; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.RowLock; +import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.coprocessor.Exec; +import org.apache.hadoop.hbase.client.coprocessor.ExecResult; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.HbaseObjectWritable; import org.apache.hadoop.hbase.ipc.CoprocessorProtocol; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UUID; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit.FamilyScope; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ExecCoprocessorRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ExecCoprocessorResponse; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.ColumnValue; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Mutate.ColumnValue.QualifierValue; @@ -66,10 +94,12 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; +import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; import com.google.protobuf.ByteString; import com.google.protobuf.ServiceException; @@ -217,6 +247,27 @@ public final class ProtobufUtil { return builder.build(); } + /** + * Convert a protocol buffer ServerName to a ServerName + * + * @param proto the protocol buffer ServerName to convert + * @return the converted ServerName + */ + public static ServerName toServerName( + final HBaseProtos.ServerName proto) { + if (proto == null) return null; + String hostName = proto.getHostName(); + long startCode = -1; + int port = -1; + if (proto.hasPort()) { + port = proto.getPort(); + } + if (proto.hasStartCode()) { + startCode = proto.getStartCode(); + } + return new ServerName(hostName, port, startCode); + } + /** * Convert a RegionInfo to a HRegionInfo * @@ -227,6 +278,11 @@ public final class ProtobufUtil { toRegionInfo(final RegionInfo proto) { if (proto == null) return null; byte[] tableName = proto.getTableName().toByteArray(); + if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) { + return HRegionInfo.ROOT_REGIONINFO; + } else if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { + return HRegionInfo.FIRST_META_REGIONINFO; + } long regionId = proto.getRegionId(); byte[] startKey = null; byte[] endKey = null; @@ -236,9 +292,16 @@ public final class ProtobufUtil { if (proto.hasEndKey()) { endKey = proto.getEndKey().toByteArray(); } - - return new HRegionInfo(tableName, - startKey, endKey, false, regionId); + boolean split = false; + if (proto.hasSplit()) { + split = proto.getSplit(); + } + HRegionInfo hri = new HRegionInfo(tableName, + startKey, endKey, split, regionId); + if (proto.hasOffline()) { + hri.setOffline(proto.getOffline()); + } + return hri; } /** @@ -259,6 +322,8 @@ public final class ProtobufUtil { if (info.getEndKey() != null) { builder.setEndKey(ByteString.copyFrom(info.getEndKey())); } + builder.setOffline(info.isOffline()); + builder.setSplit(info.isSplit()); return builder.build(); } @@ -596,7 +661,7 @@ public final class ProtobufUtil { toHLogEntries(final List protoList) { List entries = new ArrayList(); for (WALEntry entry: protoList) { - WALKey walKey = entry.getWalKey(); + WALKey walKey = entry.getKey(); java.util.UUID clusterId = HConstants.DEFAULT_CLUSTER_ID; if (walKey.hasClusterId()) { UUID protoUuid = walKey.getClusterId(); @@ -608,7 +673,7 @@ public final class ProtobufUtil { walKey.getWriteTime(), clusterId); WALEntry.WALEdit walEdit = entry.getEdit(); WALEdit edit = new WALEdit(); - for (ByteString keyValue: walEdit.getKeyValueList()) { + for (ByteString keyValue: walEdit.getKeyValueBytesList()) { edit.add(new KeyValue(keyValue.toByteArray())); } if (walEdit.getFamilyScopeCount() > 0) { @@ -721,4 +786,333 @@ public final class ProtobufUtil { } return builder.build(); } + +// Start helpers for Client + + /** + * A helper to invoke a Get using client protocol. + * + * @param client + * @param regionName + * @param get + * @return the result of the Get + * @throws IOException + */ + public static Result get(final ClientProtocol client, + final byte[] regionName, final Get get) throws IOException { + GetRequest request = + RequestConverter.buildGetRequest(regionName, get); + try { + GetResponse response = client.get(null, request); + return toResult(response.getResult()); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to get a row of the closet one before using client protocol. + * + * @param client + * @param regionName + * @param row + * @param family + * @return the row or the closestRowBefore if it doesn't exist + * @throws IOException + */ + public static Result getRowOrBefore(final ClientProtocol client, + final byte[] regionName, final byte[] row, + final byte[] family) throws IOException { + GetRequest request = + RequestConverter.buildGetRowOrBeforeRequest( + regionName, row, family); + try { + GetResponse response = client.get(null, request); + if (!response.hasResult()) return null; + return toResult(response.getResult()); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to invoke a multi action using client protocol. + * + * @param client + * @param multi + * @return a multi response + * @throws IOException + */ + public static MultiResponse multi(final ClientProtocol client, + final MultiAction multi) throws IOException { + try { + MultiResponse response = new MultiResponse(); + for (Map.Entry>> e: multi.actions.entrySet()) { + byte[] regionName = e.getKey(); + int rowMutations = 0; + List> actions = e.getValue(); + for (Action action: actions) { + Row row = action.getAction(); + if (row instanceof RowMutations) { + MultiRequest request = + RequestConverter.buildMultiRequest(regionName, (RowMutations)row); + client.multi(null, request); + response.add(regionName, action.getOriginalIndex(), new Result()); + rowMutations++; + } + } + if (actions.size() > rowMutations) { + MultiRequest request = + RequestConverter.buildMultiRequest(regionName, actions); + ClientProtos.MultiResponse + proto = client.multi(null, request); + List results = ResponseConverter.getResults(proto); + for (int i = 0, n = results.size(); i < n; i++) { + int originalIndex = actions.get(i).getOriginalIndex(); + response.add(regionName, originalIndex, results.get(i)); + } + } + } + return response; + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to bulk load a list of HFiles using client protocol. + * + * @param client + * @param familyPaths + * @param regionName + * @return true if all are loaded + * @throws IOException + */ + public static boolean bulkLoadHFile(final ClientProtocol client, + final List> familyPaths, + final byte[] regionName) throws IOException { + BulkLoadHFileRequest request = + RequestConverter.buildBulkLoadHFileRequest(familyPaths, regionName); + try { + BulkLoadHFileResponse response = + client.bulkLoadHFile(null, request); + return response.getLoaded(); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to exec a coprocessor Exec using client protocol. + * + * @param client + * @param exec + * @param regionName + * @return the exec result + * @throws IOException + */ + public static ExecResult execCoprocessor(final ClientProtocol client, + final Exec exec, final byte[] regionName) throws IOException { + ExecCoprocessorRequest request = + RequestConverter.buildExecCoprocessorRequest(regionName, exec); + try { + ExecCoprocessorResponse response = + client.execCoprocessor(null, request); + Object value = ProtobufUtil.toObject(response.getValue()); + return new ExecResult(regionName, value); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + +// End helpers for Client +// Start helpers for Admin + + /** + * A helper to retrieve region info given a region name + * using admin protocol. + * + * @param admin + * @param regionName + * @return the retrieved region info + * @throws IOException + */ + public static HRegionInfo getRegionInfo(final AdminProtocol admin, + final byte[] regionName) throws IOException { + try { + GetRegionInfoRequest request = + RequestConverter.buildGetRegionInfoRequest(regionName); + GetRegionInfoResponse response = + admin.getRegionInfo(null, request); + return toRegionInfo(response.getRegionInfo()); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to close a region given a region name + * using admin protocol. + * + * @param admin + * @param regionName + * @param transitionInZK + * @throws IOException + */ + public static void closeRegion(final AdminProtocol admin, + final byte[] regionName, final boolean transitionInZK) throws IOException { + CloseRegionRequest closeRegionRequest = + RequestConverter.buildCloseRegionRequest(regionName, transitionInZK); + try { + admin.closeRegion(null, closeRegionRequest); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to close a region given a region name + * using admin protocol. + * + * @param admin + * @param regionName + * @param versionOfClosingNode + * @return true if the region is closed + * @throws IOException + */ + public static boolean closeRegion(final AdminProtocol admin, + final byte[] regionName, final int versionOfClosingNode) throws IOException { + CloseRegionRequest closeRegionRequest = + RequestConverter.buildCloseRegionRequest(regionName, versionOfClosingNode); + try { + CloseRegionResponse response = admin.closeRegion(null, closeRegionRequest); + return ResponseConverter.isClosed(response); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to open a region using admin protocol. + * + * @param admin + * @param region + * @param versionOfOfflineNode + * @return the region opening state + * @throws IOException + */ + public static RegionOpeningState openRegion(final AdminProtocol admin, + final HRegionInfo region, final int versionOfOfflineNode) throws IOException { + OpenRegionRequest request = + RequestConverter.buildOpenRegionRequest(region, versionOfOfflineNode); + try { + OpenRegionResponse response = admin.openRegion(null, request); + return ResponseConverter.getRegionOpeningState(response); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to open a list of regions using admin protocol. + * + * @param admin + * @param regions + * @throws IOException + */ + public static void openRegion(final AdminProtocol admin, + final List regions) throws IOException { + OpenRegionRequest request = + RequestConverter.buildOpenRegionRequest(regions); + try { + admin.openRegion(null, request); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to get the all the online regions on a region + * server using admin protocol. + * + * @param admin + * @return a list of online region info + * @throws IOException + */ + public static List getOnlineRegions( + final AdminProtocol admin) throws IOException { + GetOnlineRegionRequest request = RequestConverter.buildGetOnlineRegionRequest(); + List regions = null; + try { + GetOnlineRegionResponse response = + admin.getOnlineRegion(null, request); + regions = new ArrayList(); + for (RegionInfo regionInfo: response.getRegionInfoList()) { + regions.add(toRegionInfo(regionInfo)); + } + return regions; + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to get the info of a region server using admin protocol. + * + * @param admin + * @return the server name + * @throws IOException + */ + public static ServerName getServerInfo( + final AdminProtocol admin) throws IOException { + GetServerInfoRequest request = RequestConverter.buildGetServerInfoRequest(); + try { + GetServerInfoResponse response = admin.getServerInfo(null, request); + return toServerName(response.getServerName()); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** + * A helper to replicate a list of HLog entries using admin protocol. + * + * @param admin + * @param entries + * @throws IOException + */ + public static void replicateWALEntry(final AdminProtocol admin, + final HLog.Entry[] entries) throws IOException { + ReplicateWALEntryRequest request = + RequestConverter.buildReplicateWALEntryRequest(entries); + try { + admin.replicateWALEntry(null, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + + /** + * A helper to get the list of files of a column family + * on a given region using admin protocol. + * + * @param admin + * @param regionName + * @param family + * @return the list of store files + * @throws IOException + */ + public static List getStoreFiles(final AdminProtocol admin, + final byte[] regionName, final byte[] family) throws IOException { + GetStoreFileRequest request = + RequestConverter.buildGetStoreFileRequest(regionName, family); + try { + GetStoreFileResponse response = admin.getStoreFile(null, request); + return response.getStoreFileList(); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + +// End helpers for Admin } \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index a912cc33dcf..9b594aa1e64 100644 --- a/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -24,10 +24,12 @@ import java.util.Map; import java.util.Map.Entry; import java.util.NavigableMap; import java.util.NavigableSet; +import java.util.UUID; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Action; import org.apache.hadoop.hbase.client.Append; @@ -42,6 +44,23 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.coprocessor.Exec; import org.apache.hadoop.hbase.filter.WritableByteArrayComparable; import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit.ScopeType; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit.FamilyScope; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath; @@ -65,6 +84,9 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; +import org.apache.hadoop.hbase.regionserver.wal.HLog; +import org.apache.hadoop.hbase.regionserver.wal.HLogKey; +import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; @@ -90,16 +112,15 @@ public final class RequestConverter { * @param regionName the name of the region to get * @param row the row to get * @param family the column family to get - * @param closestRowBefore if the requested row doesn't exist, * should return the immediate row before * @return a protocol buffer GetReuqest */ - public static GetRequest buildGetRequest(final byte[] regionName, - final byte[] row, final byte[] family, boolean closestRowBefore) { + public static GetRequest buildGetRowOrBeforeRequest( + final byte[] regionName, final byte[] row, final byte[] family) { GetRequest.Builder builder = GetRequest.newBuilder(); RegionSpecifier region = buildRegionSpecifier( RegionSpecifierType.REGION_NAME, regionName); - builder.setClosestRowBefore(closestRowBefore); + builder.setClosestRowBefore(true); builder.setRegion(region); Column.Builder columnBuilder = Column.newBuilder(); @@ -542,6 +563,294 @@ public final class RequestConverter { } // End utilities for Client +//Start utilities for Admin + + /** + * Create a protocol buffer GetRegionInfoRequest for a given region name + * + * @param regionName the name of the region to get info + * @return a protocol buffer GetRegionInfoRequest + */ + public static GetRegionInfoRequest + buildGetRegionInfoRequest(final byte[] regionName) { + GetRegionInfoRequest.Builder builder = GetRegionInfoRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + return builder.build(); + } + + /** + * Create a protocol buffer GetStoreFileRequest for a given region name + * + * @param regionName the name of the region to get info + * @param family the family to get store file list + * @return a protocol buffer GetStoreFileRequest + */ + public static GetStoreFileRequest + buildGetStoreFileRequest(final byte[] regionName, final byte[] family) { + GetStoreFileRequest.Builder builder = GetStoreFileRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.addFamily(ByteString.copyFrom(family)); + return builder.build(); + } + + /** + * Create a protocol buffer GetOnlineRegionRequest + * + * @return a protocol buffer GetOnlineRegionRequest + */ + public static GetOnlineRegionRequest buildGetOnlineRegionRequest() { + return GetOnlineRegionRequest.newBuilder().build(); + } + + /** + * Create a protocol buffer FlushRegionRequest for a given region name + * + * @param regionName the name of the region to get info + * @return a protocol buffer FlushRegionRequest + */ + public static FlushRegionRequest + buildFlushRegionRequest(final byte[] regionName) { + FlushRegionRequest.Builder builder = FlushRegionRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + return builder.build(); + } + + /** + * Create a protocol buffer OpenRegionRequest to open a list of regions + * + * @param regions the list of regions to open + * @return a protocol buffer OpenRegionRequest + */ + public static OpenRegionRequest + buildOpenRegionRequest(final List regions) { + OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder(); + for (HRegionInfo region: regions) { + builder.addRegion(ProtobufUtil.toRegionInfo(region)); + } + return builder.build(); + } + + /** + * Create a protocol buffer OpenRegionRequest for a given region + * + * @param region the region to open + * @return a protocol buffer OpenRegionRequest + */ + public static OpenRegionRequest + buildOpenRegionRequest(final HRegionInfo region) { + return buildOpenRegionRequest(region, -1); + } + + /** + * Create a protocol buffer OpenRegionRequest for a given region + * + * @param region the region to open + * @param versionOfOfflineNode that needs to be present in the offline node + * @return a protocol buffer OpenRegionRequest + */ + public static OpenRegionRequest buildOpenRegionRequest( + final HRegionInfo region, final int versionOfOfflineNode) { + OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder(); + builder.addRegion(ProtobufUtil.toRegionInfo(region)); + if (versionOfOfflineNode >= 0) { + builder.setVersionOfOfflineNode(versionOfOfflineNode); + } + return builder.build(); + } + + /** + * Create a CloseRegionRequest for a given region name + * + * @param regionName the name of the region to close + * @param transitionInZK indicator if to transition in ZK + * @return a CloseRegionRequest + */ + public static CloseRegionRequest buildCloseRegionRequest( + final byte[] regionName, final boolean transitionInZK) { + CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.setTransitionInZK(transitionInZK); + return builder.build(); + } + + /** + * Create a CloseRegionRequest for a given region name + * + * @param regionName the name of the region to close + * @param versionOfClosingNode + * the version of znode to compare when RS transitions the znode from + * CLOSING state. + * @return a CloseRegionRequest + */ + public static CloseRegionRequest buildCloseRegionRequest( + final byte[] regionName, final int versionOfClosingNode) { + CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.setVersionOfClosingNode(versionOfClosingNode); + return builder.build(); + } + + /** + * Create a CloseRegionRequest for a given encoded region name + * + * @param encodedRegionName the name of the region to close + * @param transitionInZK indicator if to transition in ZK + * @return a CloseRegionRequest + */ + public static CloseRegionRequest + buildCloseRegionRequest(final String encodedRegionName, + final boolean transitionInZK) { + CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.ENCODED_REGION_NAME, + Bytes.toBytes(encodedRegionName)); + builder.setRegion(region); + builder.setTransitionInZK(transitionInZK); + return builder.build(); + } + + /** + * Create a SplitRegionRequest for a given region name + * + * @param regionName the name of the region to split + * @param splitPoint the split point + * @return a SplitRegionRequest + */ + public static SplitRegionRequest buildSplitRegionRequest( + final byte[] regionName, final byte[] splitPoint) { + SplitRegionRequest.Builder builder = SplitRegionRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + if (splitPoint != null) { + builder.setSplitPoint(ByteString.copyFrom(splitPoint)); + } + return builder.build(); + } + + /** + * Create a CompactRegionRequest for a given region name + * + * @param regionName the name of the region to get info + * @param major indicator if it is a major compaction + * @return a CompactRegionRequest + */ + public static CompactRegionRequest buildCompactRegionRequest( + final byte[] regionName, final boolean major) { + CompactRegionRequest.Builder builder = CompactRegionRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.setMajor(major); + return builder.build(); + } + + /** + * Create a new ReplicateWALEntryRequest from a list of HLog entries + * + * @param entries the HLog entries to be replicated + * @return a ReplicateWALEntryRequest + */ + public static ReplicateWALEntryRequest + buildReplicateWALEntryRequest(final HLog.Entry[] entries) { + FamilyScope.Builder scopeBuilder = FamilyScope.newBuilder(); + WALEntry.Builder entryBuilder = WALEntry.newBuilder(); + ReplicateWALEntryRequest.Builder builder = + ReplicateWALEntryRequest.newBuilder(); + for (HLog.Entry entry: entries) { + entryBuilder.clear(); + WALKey.Builder keyBuilder = entryBuilder.getKeyBuilder(); + HLogKey key = entry.getKey(); + keyBuilder.setEncodedRegionName( + ByteString.copyFrom(key.getEncodedRegionName())); + keyBuilder.setTableName(ByteString.copyFrom(key.getTablename())); + keyBuilder.setLogSequenceNumber(key.getLogSeqNum()); + keyBuilder.setWriteTime(key.getWriteTime()); + UUID clusterId = key.getClusterId(); + if (clusterId != null) { + AdminProtos.UUID.Builder uuidBuilder = keyBuilder.getClusterIdBuilder(); + uuidBuilder.setLeastSigBits(clusterId.getLeastSignificantBits()); + uuidBuilder.setMostSigBits(clusterId.getMostSignificantBits()); + } + WALEdit edit = entry.getEdit(); + WALEntry.WALEdit.Builder editBuilder = entryBuilder.getEditBuilder(); + NavigableMap scopes = edit.getScopes(); + if (scopes != null && !scopes.isEmpty()) { + for (Map.Entry scope: scopes.entrySet()) { + scopeBuilder.setFamily(ByteString.copyFrom(scope.getKey())); + ScopeType scopeType = ScopeType.valueOf(scope.getValue().intValue()); + scopeBuilder.setScopeType(scopeType); + editBuilder.addFamilyScope(scopeBuilder.build()); + } + } + List keyValues = edit.getKeyValues(); + for (KeyValue value: keyValues) { + editBuilder.addKeyValueBytes(ByteString.copyFrom( + value.getBuffer(), value.getOffset(), value.getLength())); + } + builder.addEntry(entryBuilder.build()); + } + return builder.build(); + } + + /** + * Create a new RollWALWriterRequest + * + * @return a ReplicateWALEntryRequest + */ + public static RollWALWriterRequest buildRollWALWriterRequest() { + RollWALWriterRequest.Builder builder = RollWALWriterRequest.newBuilder(); + return builder.build(); + } + + /** + * Create a new GetServerInfoRequest + * + * @return a GetServerInfoRequest + */ + public static GetServerInfoRequest buildGetServerInfoRequest() { + GetServerInfoRequest.Builder builder = GetServerInfoRequest.newBuilder(); + return builder.build(); + } + + /** + * Create a new StopServerRequest + * + * @param reason the reason to stop the server + * @return a StopServerRequest + */ + public static StopServerRequest buildStopServerRequest(final String reason) { + StopServerRequest.Builder builder = StopServerRequest.newBuilder(); + builder.setReason(reason); + return builder.build(); + } + +//End utilities for Admin + + /** + * Convert a byte array to a protocol buffer RegionSpecifier + * + * @param type the region specifier type + * @param value the region specifier byte array value + * @return a protocol buffer RegionSpecifier + */ + public static RegionSpecifier buildRegionSpecifier( + final RegionSpecifierType type, final byte[] value) { + RegionSpecifier.Builder regionBuilder = RegionSpecifier.newBuilder(); + regionBuilder.setValue(ByteString.copyFrom(value)); + regionBuilder.setType(type); + return regionBuilder.build(); + } /** * Create a protocol buffer Condition @@ -744,21 +1053,6 @@ public final class RequestConverter { return mutateBuilder.build(); } - /** - * Convert a byte array to a protocol buffer RegionSpecifier - * - * @param type the region specifier type - * @param value the region specifier byte array value - * @return a protocol buffer RegionSpecifier - */ - private static RegionSpecifier buildRegionSpecifier( - final RegionSpecifierType type, final byte[] value) { - RegionSpecifier.Builder regionBuilder = RegionSpecifier.newBuilder(); - regionBuilder.setValue(ByteString.copyFrom(value)); - regionBuilder.setType(type); - return regionBuilder.build(); - } - /** * Convert a delete KeyValue type to protocol buffer DeleteType. * diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java b/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java index ecaf9febb8d..81603af1e21 100644 --- a/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java +++ b/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java @@ -24,16 +24,15 @@ import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ActionResult; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.util.StringUtils; @@ -146,18 +145,6 @@ public final class ResponseConverter { return regionInfos; } - /** - * Get the region info from a GetRegionInfoResponse - * - * @param proto the GetRegionInfoResponse - * @return the region info - */ - public static HRegionInfo getRegionInfo - (final GetRegionInfoResponse proto) { - if (proto == null || proto.getRegionInfo() == null) return null; - return ProtobufUtil.toRegionInfo(proto.getRegionInfo()); - } - /** * Get the region opening state from a OpenRegionResponse * diff --git a/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java b/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java index e78e56de166..f67d4205bd4 100644 --- a/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java +++ b/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java @@ -954,7 +954,7 @@ public final class AdminProtos { // @@protoc_insertion_point(class_scope:GetRegionInfoResponse) } - public interface GetStoreFileListRequestOrBuilder + public interface GetStoreFileRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { // required .RegionSpecifier region = 1; @@ -962,37 +962,37 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(); org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(); - // repeated bytes columnFamily = 2; - java.util.List getColumnFamilyList(); - int getColumnFamilyCount(); - com.google.protobuf.ByteString getColumnFamily(int index); + // repeated bytes family = 2; + java.util.List getFamilyList(); + int getFamilyCount(); + com.google.protobuf.ByteString getFamily(int index); } - public static final class GetStoreFileListRequest extends + public static final class GetStoreFileRequest extends com.google.protobuf.GeneratedMessage - implements GetStoreFileListRequestOrBuilder { - // Use GetStoreFileListRequest.newBuilder() to construct. - private GetStoreFileListRequest(Builder builder) { + implements GetStoreFileRequestOrBuilder { + // Use GetStoreFileRequest.newBuilder() to construct. + private GetStoreFileRequest(Builder builder) { super(builder); } - private GetStoreFileListRequest(boolean noInit) {} + private GetStoreFileRequest(boolean noInit) {} - private static final GetStoreFileListRequest defaultInstance; - public static GetStoreFileListRequest getDefaultInstance() { + private static final GetStoreFileRequest defaultInstance; + public static GetStoreFileRequest getDefaultInstance() { return defaultInstance; } - public GetStoreFileListRequest getDefaultInstanceForType() { + public GetStoreFileRequest getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetStoreFileListRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetStoreFileRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetStoreFileListRequest_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetStoreFileRequest_fieldAccessorTable; } private int bitField0_; @@ -1009,23 +1009,23 @@ public final class AdminProtos { return region_; } - // repeated bytes columnFamily = 2; - public static final int COLUMNFAMILY_FIELD_NUMBER = 2; - private java.util.List columnFamily_; + // repeated bytes family = 2; + public static final int FAMILY_FIELD_NUMBER = 2; + private java.util.List family_; public java.util.List - getColumnFamilyList() { - return columnFamily_; + getFamilyList() { + return family_; } - public int getColumnFamilyCount() { - return columnFamily_.size(); + public int getFamilyCount() { + return family_.size(); } - public com.google.protobuf.ByteString getColumnFamily(int index) { - return columnFamily_.get(index); + public com.google.protobuf.ByteString getFamily(int index) { + return family_.get(index); } private void initFields() { region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); - columnFamily_ = java.util.Collections.emptyList();; + family_ = java.util.Collections.emptyList();; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -1050,8 +1050,8 @@ public final class AdminProtos { if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, region_); } - for (int i = 0; i < columnFamily_.size(); i++) { - output.writeBytes(2, columnFamily_.get(i)); + for (int i = 0; i < family_.size(); i++) { + output.writeBytes(2, family_.get(i)); } getUnknownFields().writeTo(output); } @@ -1068,12 +1068,12 @@ public final class AdminProtos { } { int dataSize = 0; - for (int i = 0; i < columnFamily_.size(); i++) { + for (int i = 0; i < family_.size(); i++) { dataSize += com.google.protobuf.CodedOutputStream - .computeBytesSizeNoTag(columnFamily_.get(i)); + .computeBytesSizeNoTag(family_.get(i)); } size += dataSize; - size += 1 * getColumnFamilyList().size(); + size += 1 * getFamilyList().size(); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -1092,10 +1092,10 @@ public final class AdminProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest) obj; boolean result = true; result = result && (hasRegion() == other.hasRegion()); @@ -1103,8 +1103,8 @@ public final class AdminProtos { result = result && getRegion() .equals(other.getRegion()); } - result = result && getColumnFamilyList() - .equals(other.getColumnFamilyList()); + result = result && getFamilyList() + .equals(other.getFamilyList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -1118,49 +1118,49 @@ public final class AdminProtos { hash = (37 * hash) + REGION_FIELD_NUMBER; hash = (53 * hash) + getRegion().hashCode(); } - if (getColumnFamilyCount() > 0) { - hash = (37 * hash) + COLUMNFAMILY_FIELD_NUMBER; - hash = (53 * hash) + getColumnFamilyList().hashCode(); + if (getFamilyCount() > 0) { + hash = (37 * hash) + FAMILY_FIELD_NUMBER; + hash = (53 * hash) + getFamilyList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -1169,7 +1169,7 @@ public final class AdminProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -1180,12 +1180,12 @@ public final class AdminProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -1195,7 +1195,7 @@ public final class AdminProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -1208,18 +1208,18 @@ public final class AdminProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetStoreFileListRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetStoreFileRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetStoreFileListRequest_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetStoreFileRequest_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -1245,7 +1245,7 @@ public final class AdminProtos { regionBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); - columnFamily_ = java.util.Collections.emptyList();; + family_ = java.util.Collections.emptyList();; bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -1256,24 +1256,24 @@ public final class AdminProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest build() { - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -1281,8 +1281,8 @@ public final class AdminProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { @@ -1294,36 +1294,36 @@ public final class AdminProtos { result.region_ = regionBuilder_.build(); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - columnFamily_ = java.util.Collections.unmodifiableList(columnFamily_); + family_ = java.util.Collections.unmodifiableList(family_); bitField0_ = (bitField0_ & ~0x00000002); } - result.columnFamily_ = columnFamily_; + result.family_ = family_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest.getDefaultInstance()) return this; if (other.hasRegion()) { mergeRegion(other.getRegion()); } - if (!other.columnFamily_.isEmpty()) { - if (columnFamily_.isEmpty()) { - columnFamily_ = other.columnFamily_; + if (!other.family_.isEmpty()) { + if (family_.isEmpty()) { + family_ = other.family_; bitField0_ = (bitField0_ & ~0x00000002); } else { - ensureColumnFamilyIsMutable(); - columnFamily_.addAll(other.columnFamily_); + ensureFamilyIsMutable(); + family_.addAll(other.family_); } onChanged(); } @@ -1376,8 +1376,8 @@ public final class AdminProtos { break; } case 18: { - ensureColumnFamilyIsMutable(); - columnFamily_.add(input.readBytes()); + ensureFamilyIsMutable(); + family_.add(input.readBytes()); break; } } @@ -1476,69 +1476,69 @@ public final class AdminProtos { return regionBuilder_; } - // repeated bytes columnFamily = 2; - private java.util.List columnFamily_ = java.util.Collections.emptyList();; - private void ensureColumnFamilyIsMutable() { + // repeated bytes family = 2; + private java.util.List family_ = java.util.Collections.emptyList();; + private void ensureFamilyIsMutable() { if (!((bitField0_ & 0x00000002) == 0x00000002)) { - columnFamily_ = new java.util.ArrayList(columnFamily_); + family_ = new java.util.ArrayList(family_); bitField0_ |= 0x00000002; } } public java.util.List - getColumnFamilyList() { - return java.util.Collections.unmodifiableList(columnFamily_); + getFamilyList() { + return java.util.Collections.unmodifiableList(family_); } - public int getColumnFamilyCount() { - return columnFamily_.size(); + public int getFamilyCount() { + return family_.size(); } - public com.google.protobuf.ByteString getColumnFamily(int index) { - return columnFamily_.get(index); + public com.google.protobuf.ByteString getFamily(int index) { + return family_.get(index); } - public Builder setColumnFamily( + public Builder setFamily( int index, com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - ensureColumnFamilyIsMutable(); - columnFamily_.set(index, value); + ensureFamilyIsMutable(); + family_.set(index, value); onChanged(); return this; } - public Builder addColumnFamily(com.google.protobuf.ByteString value) { + public Builder addFamily(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - ensureColumnFamilyIsMutable(); - columnFamily_.add(value); + ensureFamilyIsMutable(); + family_.add(value); onChanged(); return this; } - public Builder addAllColumnFamily( + public Builder addAllFamily( java.lang.Iterable values) { - ensureColumnFamilyIsMutable(); - super.addAll(values, columnFamily_); + ensureFamilyIsMutable(); + super.addAll(values, family_); onChanged(); return this; } - public Builder clearColumnFamily() { - columnFamily_ = java.util.Collections.emptyList();; + public Builder clearFamily() { + family_ = java.util.Collections.emptyList();; bitField0_ = (bitField0_ & ~0x00000002); onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:GetStoreFileListRequest) + // @@protoc_insertion_point(builder_scope:GetStoreFileRequest) } static { - defaultInstance = new GetStoreFileListRequest(true); + defaultInstance = new GetStoreFileRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:GetStoreFileListRequest) + // @@protoc_insertion_point(class_scope:GetStoreFileRequest) } - public interface GetStoreFileListResponseOrBuilder + public interface GetStoreFileResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { // repeated string storeFile = 1; @@ -1546,32 +1546,32 @@ public final class AdminProtos { int getStoreFileCount(); String getStoreFile(int index); } - public static final class GetStoreFileListResponse extends + public static final class GetStoreFileResponse extends com.google.protobuf.GeneratedMessage - implements GetStoreFileListResponseOrBuilder { - // Use GetStoreFileListResponse.newBuilder() to construct. - private GetStoreFileListResponse(Builder builder) { + implements GetStoreFileResponseOrBuilder { + // Use GetStoreFileResponse.newBuilder() to construct. + private GetStoreFileResponse(Builder builder) { super(builder); } - private GetStoreFileListResponse(boolean noInit) {} + private GetStoreFileResponse(boolean noInit) {} - private static final GetStoreFileListResponse defaultInstance; - public static GetStoreFileListResponse getDefaultInstance() { + private static final GetStoreFileResponse defaultInstance; + public static GetStoreFileResponse getDefaultInstance() { return defaultInstance; } - public GetStoreFileListResponse getDefaultInstanceForType() { + public GetStoreFileResponse getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetStoreFileListResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetStoreFileResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetStoreFileListResponse_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetStoreFileResponse_fieldAccessorTable; } // repeated string storeFile = 1; @@ -1641,10 +1641,10 @@ public final class AdminProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse) obj; boolean result = true; result = result && getStoreFileList() @@ -1666,41 +1666,41 @@ public final class AdminProtos { return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -1709,7 +1709,7 @@ public final class AdminProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -1720,12 +1720,12 @@ public final class AdminProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -1735,7 +1735,7 @@ public final class AdminProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -1748,18 +1748,18 @@ public final class AdminProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetStoreFileListResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetStoreFileResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetStoreFileListResponse_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_GetStoreFileResponse_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -1789,24 +1789,24 @@ public final class AdminProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse build() { - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -1814,8 +1814,8 @@ public final class AdminProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse(this); int from_bitField0_ = bitField0_; if (((bitField0_ & 0x00000001) == 0x00000001)) { storeFile_ = new com.google.protobuf.UnmodifiableLazyStringList( @@ -1828,16 +1828,16 @@ public final class AdminProtos { } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance()) return this; if (!other.storeFile_.isEmpty()) { if (storeFile_.isEmpty()) { storeFile_ = other.storeFile_; @@ -1946,15 +1946,15 @@ public final class AdminProtos { onChanged(); } - // @@protoc_insertion_point(builder_scope:GetStoreFileListResponse) + // @@protoc_insertion_point(builder_scope:GetStoreFileResponse) } static { - defaultInstance = new GetStoreFileListResponse(true); + defaultInstance = new GetStoreFileResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:GetStoreFileListResponse) + // @@protoc_insertion_point(class_scope:GetStoreFileResponse) } public interface GetOnlineRegionRequestOrBuilder @@ -2853,14 +2853,14 @@ public final class AdminProtos { public interface OpenRegionRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // repeated .RegionSpecifier region = 1; - java.util.List + // repeated .RegionInfo region = 1; + java.util.List getRegionList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion(int index); int getRegionCount(); - java.util.List + java.util.List getRegionOrBuilderList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder( int index); // optional uint32 versionOfOfflineNode = 2; @@ -2896,23 +2896,23 @@ public final class AdminProtos { } private int bitField0_; - // repeated .RegionSpecifier region = 1; + // repeated .RegionInfo region = 1; public static final int REGION_FIELD_NUMBER = 1; - private java.util.List region_; - public java.util.List getRegionList() { + private java.util.List region_; + public java.util.List getRegionList() { return region_; } - public java.util.List + public java.util.List getRegionOrBuilderList() { return region_; } public int getRegionCount() { return region_.size(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index) { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion(int index) { return region_.get(index); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder( int index) { return region_.get(index); } @@ -3278,7 +3278,7 @@ public final class AdminProtos { break; } case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(); input.readMessage(subBuilder, extensionRegistry); addRegion(subBuilder.buildPartial()); break; @@ -3294,20 +3294,20 @@ public final class AdminProtos { private int bitField0_; - // repeated .RegionSpecifier region = 1; - private java.util.List region_ = + // repeated .RegionInfo region = 1; + private java.util.List region_ = java.util.Collections.emptyList(); private void ensureRegionIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { - region_ = new java.util.ArrayList(region_); + region_ = new java.util.ArrayList(region_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionBuilder_; - public java.util.List getRegionList() { + public java.util.List getRegionList() { if (regionBuilder_ == null) { return java.util.Collections.unmodifiableList(region_); } else { @@ -3321,7 +3321,7 @@ public final class AdminProtos { return regionBuilder_.getCount(); } } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index) { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion(int index) { if (regionBuilder_ == null) { return region_.get(index); } else { @@ -3329,7 +3329,7 @@ public final class AdminProtos { } } public Builder setRegion( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { if (regionBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -3343,7 +3343,7 @@ public final class AdminProtos { return this; } public Builder setRegion( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { if (regionBuilder_ == null) { ensureRegionIsMutable(); region_.set(index, builderForValue.build()); @@ -3353,7 +3353,7 @@ public final class AdminProtos { } return this; } - public Builder addRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + public Builder addRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { if (regionBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -3367,7 +3367,7 @@ public final class AdminProtos { return this; } public Builder addRegion( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) { + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { if (regionBuilder_ == null) { if (value == null) { throw new NullPointerException(); @@ -3381,7 +3381,7 @@ public final class AdminProtos { return this; } public Builder addRegion( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { if (regionBuilder_ == null) { ensureRegionIsMutable(); region_.add(builderForValue.build()); @@ -3392,7 +3392,7 @@ public final class AdminProtos { return this; } public Builder addRegion( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) { + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { if (regionBuilder_ == null) { ensureRegionIsMutable(); region_.add(index, builderForValue.build()); @@ -3403,7 +3403,7 @@ public final class AdminProtos { return this; } public Builder addAllRegion( - java.lang.Iterable values) { + java.lang.Iterable values) { if (regionBuilder_ == null) { ensureRegionIsMutable(); super.addAll(values, region_); @@ -3433,18 +3433,18 @@ public final class AdminProtos { } return this; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionBuilder( int index) { return getRegionFieldBuilder().getBuilder(index); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder( int index) { if (regionBuilder_ == null) { return region_.get(index); } else { return regionBuilder_.getMessageOrBuilder(index); } } - public java.util.List + public java.util.List getRegionOrBuilderList() { if (regionBuilder_ != null) { return regionBuilder_.getMessageOrBuilderList(); @@ -3452,25 +3452,25 @@ public final class AdminProtos { return java.util.Collections.unmodifiableList(region_); } } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder addRegionBuilder() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionBuilder() { return getRegionFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder addRegionBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionBuilder( int index) { return getRegionFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()); + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); } - public java.util.List + public java.util.List getRegionBuilderList() { return getRegionFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> getRegionFieldBuilder() { if (regionBuilder_ == null) { regionBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( region_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), @@ -8121,10 +8121,10 @@ public final class AdminProtos { public interface WALEntryOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .WALEntry.WALKey walKey = 1; - boolean hasWalKey(); - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey getWalKey(); - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKeyOrBuilder getWalKeyOrBuilder(); + // required .WALEntry.WALKey key = 1; + boolean hasKey(); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey getKey(); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKeyOrBuilder getKeyOrBuilder(); // required .WALEntry.WALEdit edit = 2; boolean hasEdit(); @@ -8933,10 +8933,10 @@ public final class AdminProtos { public interface WALEditOrBuilder extends com.google.protobuf.MessageOrBuilder { - // repeated bytes keyValue = 1; - java.util.List getKeyValueList(); - int getKeyValueCount(); - com.google.protobuf.ByteString getKeyValue(int index); + // repeated bytes keyValueBytes = 1; + java.util.List getKeyValueBytesList(); + int getKeyValueBytesCount(); + com.google.protobuf.ByteString getKeyValueBytes(int index); // repeated .WALEntry.WALEdit.FamilyScope familyScope = 2; java.util.List @@ -9510,18 +9510,18 @@ public final class AdminProtos { // @@protoc_insertion_point(class_scope:WALEntry.WALEdit.FamilyScope) } - // repeated bytes keyValue = 1; - public static final int KEYVALUE_FIELD_NUMBER = 1; - private java.util.List keyValue_; + // repeated bytes keyValueBytes = 1; + public static final int KEYVALUEBYTES_FIELD_NUMBER = 1; + private java.util.List keyValueBytes_; public java.util.List - getKeyValueList() { - return keyValue_; + getKeyValueBytesList() { + return keyValueBytes_; } - public int getKeyValueCount() { - return keyValue_.size(); + public int getKeyValueBytesCount() { + return keyValueBytes_.size(); } - public com.google.protobuf.ByteString getKeyValue(int index) { - return keyValue_.get(index); + public com.google.protobuf.ByteString getKeyValueBytes(int index) { + return keyValueBytes_.get(index); } // repeated .WALEntry.WALEdit.FamilyScope familyScope = 2; @@ -9546,7 +9546,7 @@ public final class AdminProtos { } private void initFields() { - keyValue_ = java.util.Collections.emptyList();; + keyValueBytes_ = java.util.Collections.emptyList();; familyScope_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; @@ -9567,8 +9567,8 @@ public final class AdminProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - for (int i = 0; i < keyValue_.size(); i++) { - output.writeBytes(1, keyValue_.get(i)); + for (int i = 0; i < keyValueBytes_.size(); i++) { + output.writeBytes(1, keyValueBytes_.get(i)); } for (int i = 0; i < familyScope_.size(); i++) { output.writeMessage(2, familyScope_.get(i)); @@ -9584,12 +9584,12 @@ public final class AdminProtos { size = 0; { int dataSize = 0; - for (int i = 0; i < keyValue_.size(); i++) { + for (int i = 0; i < keyValueBytes_.size(); i++) { dataSize += com.google.protobuf.CodedOutputStream - .computeBytesSizeNoTag(keyValue_.get(i)); + .computeBytesSizeNoTag(keyValueBytes_.get(i)); } size += dataSize; - size += 1 * getKeyValueList().size(); + size += 1 * getKeyValueBytesList().size(); } for (int i = 0; i < familyScope_.size(); i++) { size += com.google.protobuf.CodedOutputStream @@ -9618,8 +9618,8 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit) obj; boolean result = true; - result = result && getKeyValueList() - .equals(other.getKeyValueList()); + result = result && getKeyValueBytesList() + .equals(other.getKeyValueBytesList()); result = result && getFamilyScopeList() .equals(other.getFamilyScopeList()); result = result && @@ -9631,9 +9631,9 @@ public final class AdminProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getKeyValueCount() > 0) { - hash = (37 * hash) + KEYVALUE_FIELD_NUMBER; - hash = (53 * hash) + getKeyValueList().hashCode(); + if (getKeyValueBytesCount() > 0) { + hash = (37 * hash) + KEYVALUEBYTES_FIELD_NUMBER; + hash = (53 * hash) + getKeyValueBytesList().hashCode(); } if (getFamilyScopeCount() > 0) { hash = (37 * hash) + FAMILYSCOPE_FIELD_NUMBER; @@ -9756,7 +9756,7 @@ public final class AdminProtos { public Builder clear() { super.clear(); - keyValue_ = java.util.Collections.emptyList();; + keyValueBytes_ = java.util.Collections.emptyList();; bitField0_ = (bitField0_ & ~0x00000001); if (familyScopeBuilder_ == null) { familyScope_ = java.util.Collections.emptyList(); @@ -9802,10 +9802,10 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit(this); int from_bitField0_ = bitField0_; if (((bitField0_ & 0x00000001) == 0x00000001)) { - keyValue_ = java.util.Collections.unmodifiableList(keyValue_); + keyValueBytes_ = java.util.Collections.unmodifiableList(keyValueBytes_); bitField0_ = (bitField0_ & ~0x00000001); } - result.keyValue_ = keyValue_; + result.keyValueBytes_ = keyValueBytes_; if (familyScopeBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002)) { familyScope_ = java.util.Collections.unmodifiableList(familyScope_); @@ -9830,13 +9830,13 @@ public final class AdminProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit other) { if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit.getDefaultInstance()) return this; - if (!other.keyValue_.isEmpty()) { - if (keyValue_.isEmpty()) { - keyValue_ = other.keyValue_; + if (!other.keyValueBytes_.isEmpty()) { + if (keyValueBytes_.isEmpty()) { + keyValueBytes_ = other.keyValueBytes_; bitField0_ = (bitField0_ & ~0x00000001); } else { - ensureKeyValueIsMutable(); - keyValue_.addAll(other.keyValue_); + ensureKeyValueBytesIsMutable(); + keyValueBytes_.addAll(other.keyValueBytes_); } onChanged(); } @@ -9904,8 +9904,8 @@ public final class AdminProtos { break; } case 10: { - ensureKeyValueIsMutable(); - keyValue_.add(input.readBytes()); + ensureKeyValueBytesIsMutable(); + keyValueBytes_.add(input.readBytes()); break; } case 18: { @@ -9920,52 +9920,52 @@ public final class AdminProtos { private int bitField0_; - // repeated bytes keyValue = 1; - private java.util.List keyValue_ = java.util.Collections.emptyList();; - private void ensureKeyValueIsMutable() { + // repeated bytes keyValueBytes = 1; + private java.util.List keyValueBytes_ = java.util.Collections.emptyList();; + private void ensureKeyValueBytesIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { - keyValue_ = new java.util.ArrayList(keyValue_); + keyValueBytes_ = new java.util.ArrayList(keyValueBytes_); bitField0_ |= 0x00000001; } } public java.util.List - getKeyValueList() { - return java.util.Collections.unmodifiableList(keyValue_); + getKeyValueBytesList() { + return java.util.Collections.unmodifiableList(keyValueBytes_); } - public int getKeyValueCount() { - return keyValue_.size(); + public int getKeyValueBytesCount() { + return keyValueBytes_.size(); } - public com.google.protobuf.ByteString getKeyValue(int index) { - return keyValue_.get(index); + public com.google.protobuf.ByteString getKeyValueBytes(int index) { + return keyValueBytes_.get(index); } - public Builder setKeyValue( + public Builder setKeyValueBytes( int index, com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - ensureKeyValueIsMutable(); - keyValue_.set(index, value); + ensureKeyValueBytesIsMutable(); + keyValueBytes_.set(index, value); onChanged(); return this; } - public Builder addKeyValue(com.google.protobuf.ByteString value) { + public Builder addKeyValueBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - ensureKeyValueIsMutable(); - keyValue_.add(value); + ensureKeyValueBytesIsMutable(); + keyValueBytes_.add(value); onChanged(); return this; } - public Builder addAllKeyValue( + public Builder addAllKeyValueBytes( java.lang.Iterable values) { - ensureKeyValueIsMutable(); - super.addAll(values, keyValue_); + ensureKeyValueBytesIsMutable(); + super.addAll(values, keyValueBytes_); onChanged(); return this; } - public Builder clearKeyValue() { - keyValue_ = java.util.Collections.emptyList();; + public Builder clearKeyValueBytes() { + keyValueBytes_ = java.util.Collections.emptyList();; bitField0_ = (bitField0_ & ~0x00000001); onChanged(); return this; @@ -10169,17 +10169,17 @@ public final class AdminProtos { } private int bitField0_; - // required .WALEntry.WALKey walKey = 1; - public static final int WALKEY_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey walKey_; - public boolean hasWalKey() { + // required .WALEntry.WALKey key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey key_; + public boolean hasKey() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey getWalKey() { - return walKey_; + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey getKey() { + return key_; } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKeyOrBuilder getWalKeyOrBuilder() { - return walKey_; + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKeyOrBuilder getKeyOrBuilder() { + return key_; } // required .WALEntry.WALEdit edit = 2; @@ -10196,7 +10196,7 @@ public final class AdminProtos { } private void initFields() { - walKey_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.getDefaultInstance(); + key_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.getDefaultInstance(); edit_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit.getDefaultInstance(); } private byte memoizedIsInitialized = -1; @@ -10204,7 +10204,7 @@ public final class AdminProtos { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasWalKey()) { + if (!hasKey()) { memoizedIsInitialized = 0; return false; } @@ -10212,7 +10212,7 @@ public final class AdminProtos { memoizedIsInitialized = 0; return false; } - if (!getWalKey().isInitialized()) { + if (!getKey().isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -10228,7 +10228,7 @@ public final class AdminProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, walKey_); + output.writeMessage(1, key_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, edit_); @@ -10244,7 +10244,7 @@ public final class AdminProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, walKey_); + .computeMessageSize(1, key_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream @@ -10273,10 +10273,10 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry) obj; boolean result = true; - result = result && (hasWalKey() == other.hasWalKey()); - if (hasWalKey()) { - result = result && getWalKey() - .equals(other.getWalKey()); + result = result && (hasKey() == other.hasKey()); + if (hasKey()) { + result = result && getKey() + .equals(other.getKey()); } result = result && (hasEdit() == other.hasEdit()); if (hasEdit()) { @@ -10292,9 +10292,9 @@ public final class AdminProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasWalKey()) { - hash = (37 * hash) + WALKEY_FIELD_NUMBER; - hash = (53 * hash) + getWalKey().hashCode(); + if (hasKey()) { + hash = (37 * hash) + KEY_FIELD_NUMBER; + hash = (53 * hash) + getKey().hashCode(); } if (hasEdit()) { hash = (37 * hash) + EDIT_FIELD_NUMBER; @@ -10408,7 +10408,7 @@ public final class AdminProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getWalKeyFieldBuilder(); + getKeyFieldBuilder(); getEditFieldBuilder(); } } @@ -10418,10 +10418,10 @@ public final class AdminProtos { public Builder clear() { super.clear(); - if (walKeyBuilder_ == null) { - walKey_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.getDefaultInstance(); + if (keyBuilder_ == null) { + key_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.getDefaultInstance(); } else { - walKeyBuilder_.clear(); + keyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); if (editBuilder_ == null) { @@ -10471,10 +10471,10 @@ public final class AdminProtos { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (walKeyBuilder_ == null) { - result.walKey_ = walKey_; + if (keyBuilder_ == null) { + result.key_ = key_; } else { - result.walKey_ = walKeyBuilder_.build(); + result.key_ = keyBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; @@ -10500,8 +10500,8 @@ public final class AdminProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry other) { if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.getDefaultInstance()) return this; - if (other.hasWalKey()) { - mergeWalKey(other.getWalKey()); + if (other.hasKey()) { + mergeKey(other.getKey()); } if (other.hasEdit()) { mergeEdit(other.getEdit()); @@ -10511,7 +10511,7 @@ public final class AdminProtos { } public final boolean isInitialized() { - if (!hasWalKey()) { + if (!hasKey()) { return false; } @@ -10519,7 +10519,7 @@ public final class AdminProtos { return false; } - if (!getWalKey().isInitialized()) { + if (!getKey().isInitialized()) { return false; } @@ -10555,11 +10555,11 @@ public final class AdminProtos { } case 10: { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.newBuilder(); - if (hasWalKey()) { - subBuilder.mergeFrom(getWalKey()); + if (hasKey()) { + subBuilder.mergeFrom(getKey()); } input.readMessage(subBuilder, extensionRegistry); - setWalKey(subBuilder.buildPartial()); + setKey(subBuilder.buildPartial()); break; } case 18: { @@ -10577,94 +10577,94 @@ public final class AdminProtos { private int bitField0_; - // required .WALEntry.WALKey walKey = 1; - private org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey walKey_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.getDefaultInstance(); + // required .WALEntry.WALKey key = 1; + private org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey key_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKeyOrBuilder> walKeyBuilder_; - public boolean hasWalKey() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKeyOrBuilder> keyBuilder_; + public boolean hasKey() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey getWalKey() { - if (walKeyBuilder_ == null) { - return walKey_; + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey getKey() { + if (keyBuilder_ == null) { + return key_; } else { - return walKeyBuilder_.getMessage(); + return keyBuilder_.getMessage(); } } - public Builder setWalKey(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey value) { - if (walKeyBuilder_ == null) { + public Builder setKey(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey value) { + if (keyBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - walKey_ = value; + key_ = value; onChanged(); } else { - walKeyBuilder_.setMessage(value); + keyBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } - public Builder setWalKey( + public Builder setKey( org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.Builder builderForValue) { - if (walKeyBuilder_ == null) { - walKey_ = builderForValue.build(); + if (keyBuilder_ == null) { + key_ = builderForValue.build(); onChanged(); } else { - walKeyBuilder_.setMessage(builderForValue.build()); + keyBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } - public Builder mergeWalKey(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey value) { - if (walKeyBuilder_ == null) { + public Builder mergeKey(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey value) { + if (keyBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && - walKey_ != org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.getDefaultInstance()) { - walKey_ = - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.newBuilder(walKey_).mergeFrom(value).buildPartial(); + key_ != org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.getDefaultInstance()) { + key_ = + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.newBuilder(key_).mergeFrom(value).buildPartial(); } else { - walKey_ = value; + key_ = value; } onChanged(); } else { - walKeyBuilder_.mergeFrom(value); + keyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } - public Builder clearWalKey() { - if (walKeyBuilder_ == null) { - walKey_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.getDefaultInstance(); + public Builder clearKey() { + if (keyBuilder_ == null) { + key_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.getDefaultInstance(); onChanged(); } else { - walKeyBuilder_.clear(); + keyBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.Builder getWalKeyBuilder() { + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.Builder getKeyBuilder() { bitField0_ |= 0x00000001; onChanged(); - return getWalKeyFieldBuilder().getBuilder(); + return getKeyFieldBuilder().getBuilder(); } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKeyOrBuilder getWalKeyOrBuilder() { - if (walKeyBuilder_ != null) { - return walKeyBuilder_.getMessageOrBuilder(); + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKeyOrBuilder getKeyOrBuilder() { + if (keyBuilder_ != null) { + return keyBuilder_.getMessageOrBuilder(); } else { - return walKey_; + return key_; } } private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKeyOrBuilder> - getWalKeyFieldBuilder() { - if (walKeyBuilder_ == null) { - walKeyBuilder_ = new com.google.protobuf.SingleFieldBuilder< + getKeyFieldBuilder() { + if (keyBuilder_ == null) { + keyBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKey.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALKeyOrBuilder>( - walKey_, + key_, getParentForChildren(), isClean()); - walKey_ = null; + key_ = null; } - return walKeyBuilder_; + return keyBuilder_; } // required .WALEntry.WALEdit edit = 2; @@ -10771,14 +10771,14 @@ public final class AdminProtos { public interface ReplicateWALEntryRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // repeated .WALEntry walEntry = 1; + // repeated .WALEntry entry = 1; java.util.List - getWalEntryList(); - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry getWalEntry(int index); - int getWalEntryCount(); + getEntryList(); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry getEntry(int index); + int getEntryCount(); java.util.List - getWalEntryOrBuilderList(); - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntryOrBuilder getWalEntryOrBuilder( + getEntryOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntryOrBuilder getEntryOrBuilder( int index); } public static final class ReplicateWALEntryRequest extends @@ -10809,37 +10809,37 @@ public final class AdminProtos { return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_ReplicateWALEntryRequest_fieldAccessorTable; } - // repeated .WALEntry walEntry = 1; - public static final int WALENTRY_FIELD_NUMBER = 1; - private java.util.List walEntry_; - public java.util.List getWalEntryList() { - return walEntry_; + // repeated .WALEntry entry = 1; + public static final int ENTRY_FIELD_NUMBER = 1; + private java.util.List entry_; + public java.util.List getEntryList() { + return entry_; } public java.util.List - getWalEntryOrBuilderList() { - return walEntry_; + getEntryOrBuilderList() { + return entry_; } - public int getWalEntryCount() { - return walEntry_.size(); + public int getEntryCount() { + return entry_.size(); } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry getWalEntry(int index) { - return walEntry_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry getEntry(int index) { + return entry_.get(index); } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntryOrBuilder getWalEntryOrBuilder( + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntryOrBuilder getEntryOrBuilder( int index) { - return walEntry_.get(index); + return entry_.get(index); } private void initFields() { - walEntry_ = java.util.Collections.emptyList(); + entry_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getWalEntryCount(); i++) { - if (!getWalEntry(i).isInitialized()) { + for (int i = 0; i < getEntryCount(); i++) { + if (!getEntry(i).isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -10851,8 +10851,8 @@ public final class AdminProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - for (int i = 0; i < walEntry_.size(); i++) { - output.writeMessage(1, walEntry_.get(i)); + for (int i = 0; i < entry_.size(); i++) { + output.writeMessage(1, entry_.get(i)); } getUnknownFields().writeTo(output); } @@ -10863,9 +10863,9 @@ public final class AdminProtos { if (size != -1) return size; size = 0; - for (int i = 0; i < walEntry_.size(); i++) { + for (int i = 0; i < entry_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, walEntry_.get(i)); + .computeMessageSize(1, entry_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -10890,8 +10890,8 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest) obj; boolean result = true; - result = result && getWalEntryList() - .equals(other.getWalEntryList()); + result = result && getEntryList() + .equals(other.getEntryList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -10901,9 +10901,9 @@ public final class AdminProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getWalEntryCount() > 0) { - hash = (37 * hash) + WALENTRY_FIELD_NUMBER; - hash = (53 * hash) + getWalEntryList().hashCode(); + if (getEntryCount() > 0) { + hash = (37 * hash) + ENTRY_FIELD_NUMBER; + hash = (53 * hash) + getEntryList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; @@ -11013,7 +11013,7 @@ public final class AdminProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getWalEntryFieldBuilder(); + getEntryFieldBuilder(); } } private static Builder create() { @@ -11022,11 +11022,11 @@ public final class AdminProtos { public Builder clear() { super.clear(); - if (walEntryBuilder_ == null) { - walEntry_ = java.util.Collections.emptyList(); + if (entryBuilder_ == null) { + entry_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { - walEntryBuilder_.clear(); + entryBuilder_.clear(); } return this; } @@ -11065,14 +11065,14 @@ public final class AdminProtos { public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest buildPartial() { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest(this); int from_bitField0_ = bitField0_; - if (walEntryBuilder_ == null) { + if (entryBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { - walEntry_ = java.util.Collections.unmodifiableList(walEntry_); + entry_ = java.util.Collections.unmodifiableList(entry_); bitField0_ = (bitField0_ & ~0x00000001); } - result.walEntry_ = walEntry_; + result.entry_ = entry_; } else { - result.walEntry_ = walEntryBuilder_.build(); + result.entry_ = entryBuilder_.build(); } onBuilt(); return result; @@ -11089,29 +11089,29 @@ public final class AdminProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance()) return this; - if (walEntryBuilder_ == null) { - if (!other.walEntry_.isEmpty()) { - if (walEntry_.isEmpty()) { - walEntry_ = other.walEntry_; + if (entryBuilder_ == null) { + if (!other.entry_.isEmpty()) { + if (entry_.isEmpty()) { + entry_ = other.entry_; bitField0_ = (bitField0_ & ~0x00000001); } else { - ensureWalEntryIsMutable(); - walEntry_.addAll(other.walEntry_); + ensureEntryIsMutable(); + entry_.addAll(other.entry_); } onChanged(); } } else { - if (!other.walEntry_.isEmpty()) { - if (walEntryBuilder_.isEmpty()) { - walEntryBuilder_.dispose(); - walEntryBuilder_ = null; - walEntry_ = other.walEntry_; + if (!other.entry_.isEmpty()) { + if (entryBuilder_.isEmpty()) { + entryBuilder_.dispose(); + entryBuilder_ = null; + entry_ = other.entry_; bitField0_ = (bitField0_ & ~0x00000001); - walEntryBuilder_ = + entryBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getWalEntryFieldBuilder() : null; + getEntryFieldBuilder() : null; } else { - walEntryBuilder_.addAllMessages(other.walEntry_); + entryBuilder_.addAllMessages(other.entry_); } } } @@ -11120,8 +11120,8 @@ public final class AdminProtos { } public final boolean isInitialized() { - for (int i = 0; i < getWalEntryCount(); i++) { - if (!getWalEntry(i).isInitialized()) { + for (int i = 0; i < getEntryCount(); i++) { + if (!getEntry(i).isInitialized()) { return false; } @@ -11155,7 +11155,7 @@ public final class AdminProtos { case 10: { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.newBuilder(); input.readMessage(subBuilder, extensionRegistry); - addWalEntry(subBuilder.buildPartial()); + addEntry(subBuilder.buildPartial()); break; } } @@ -11164,190 +11164,190 @@ public final class AdminProtos { private int bitField0_; - // repeated .WALEntry walEntry = 1; - private java.util.List walEntry_ = + // repeated .WALEntry entry = 1; + private java.util.List entry_ = java.util.Collections.emptyList(); - private void ensureWalEntryIsMutable() { + private void ensureEntryIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { - walEntry_ = new java.util.ArrayList(walEntry_); + entry_ = new java.util.ArrayList(entry_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntryOrBuilder> walEntryBuilder_; + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntryOrBuilder> entryBuilder_; - public java.util.List getWalEntryList() { - if (walEntryBuilder_ == null) { - return java.util.Collections.unmodifiableList(walEntry_); + public java.util.List getEntryList() { + if (entryBuilder_ == null) { + return java.util.Collections.unmodifiableList(entry_); } else { - return walEntryBuilder_.getMessageList(); + return entryBuilder_.getMessageList(); } } - public int getWalEntryCount() { - if (walEntryBuilder_ == null) { - return walEntry_.size(); + public int getEntryCount() { + if (entryBuilder_ == null) { + return entry_.size(); } else { - return walEntryBuilder_.getCount(); + return entryBuilder_.getCount(); } } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry getWalEntry(int index) { - if (walEntryBuilder_ == null) { - return walEntry_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry getEntry(int index) { + if (entryBuilder_ == null) { + return entry_.get(index); } else { - return walEntryBuilder_.getMessage(index); + return entryBuilder_.getMessage(index); } } - public Builder setWalEntry( + public Builder setEntry( int index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry value) { - if (walEntryBuilder_ == null) { + if (entryBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureWalEntryIsMutable(); - walEntry_.set(index, value); + ensureEntryIsMutable(); + entry_.set(index, value); onChanged(); } else { - walEntryBuilder_.setMessage(index, value); + entryBuilder_.setMessage(index, value); } return this; } - public Builder setWalEntry( + public Builder setEntry( int index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder builderForValue) { - if (walEntryBuilder_ == null) { - ensureWalEntryIsMutable(); - walEntry_.set(index, builderForValue.build()); + if (entryBuilder_ == null) { + ensureEntryIsMutable(); + entry_.set(index, builderForValue.build()); onChanged(); } else { - walEntryBuilder_.setMessage(index, builderForValue.build()); + entryBuilder_.setMessage(index, builderForValue.build()); } return this; } - public Builder addWalEntry(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry value) { - if (walEntryBuilder_ == null) { + public Builder addEntry(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry value) { + if (entryBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureWalEntryIsMutable(); - walEntry_.add(value); + ensureEntryIsMutable(); + entry_.add(value); onChanged(); } else { - walEntryBuilder_.addMessage(value); + entryBuilder_.addMessage(value); } return this; } - public Builder addWalEntry( + public Builder addEntry( int index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry value) { - if (walEntryBuilder_ == null) { + if (entryBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureWalEntryIsMutable(); - walEntry_.add(index, value); + ensureEntryIsMutable(); + entry_.add(index, value); onChanged(); } else { - walEntryBuilder_.addMessage(index, value); + entryBuilder_.addMessage(index, value); } return this; } - public Builder addWalEntry( + public Builder addEntry( org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder builderForValue) { - if (walEntryBuilder_ == null) { - ensureWalEntryIsMutable(); - walEntry_.add(builderForValue.build()); + if (entryBuilder_ == null) { + ensureEntryIsMutable(); + entry_.add(builderForValue.build()); onChanged(); } else { - walEntryBuilder_.addMessage(builderForValue.build()); + entryBuilder_.addMessage(builderForValue.build()); } return this; } - public Builder addWalEntry( + public Builder addEntry( int index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder builderForValue) { - if (walEntryBuilder_ == null) { - ensureWalEntryIsMutable(); - walEntry_.add(index, builderForValue.build()); + if (entryBuilder_ == null) { + ensureEntryIsMutable(); + entry_.add(index, builderForValue.build()); onChanged(); } else { - walEntryBuilder_.addMessage(index, builderForValue.build()); + entryBuilder_.addMessage(index, builderForValue.build()); } return this; } - public Builder addAllWalEntry( + public Builder addAllEntry( java.lang.Iterable values) { - if (walEntryBuilder_ == null) { - ensureWalEntryIsMutable(); - super.addAll(values, walEntry_); + if (entryBuilder_ == null) { + ensureEntryIsMutable(); + super.addAll(values, entry_); onChanged(); } else { - walEntryBuilder_.addAllMessages(values); + entryBuilder_.addAllMessages(values); } return this; } - public Builder clearWalEntry() { - if (walEntryBuilder_ == null) { - walEntry_ = java.util.Collections.emptyList(); + public Builder clearEntry() { + if (entryBuilder_ == null) { + entry_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { - walEntryBuilder_.clear(); + entryBuilder_.clear(); } return this; } - public Builder removeWalEntry(int index) { - if (walEntryBuilder_ == null) { - ensureWalEntryIsMutable(); - walEntry_.remove(index); + public Builder removeEntry(int index) { + if (entryBuilder_ == null) { + ensureEntryIsMutable(); + entry_.remove(index); onChanged(); } else { - walEntryBuilder_.remove(index); + entryBuilder_.remove(index); } return this; } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder getWalEntryBuilder( + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder getEntryBuilder( int index) { - return getWalEntryFieldBuilder().getBuilder(index); + return getEntryFieldBuilder().getBuilder(index); } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntryOrBuilder getWalEntryOrBuilder( + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntryOrBuilder getEntryOrBuilder( int index) { - if (walEntryBuilder_ == null) { - return walEntry_.get(index); } else { - return walEntryBuilder_.getMessageOrBuilder(index); + if (entryBuilder_ == null) { + return entry_.get(index); } else { + return entryBuilder_.getMessageOrBuilder(index); } } public java.util.List - getWalEntryOrBuilderList() { - if (walEntryBuilder_ != null) { - return walEntryBuilder_.getMessageOrBuilderList(); + getEntryOrBuilderList() { + if (entryBuilder_ != null) { + return entryBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(walEntry_); + return java.util.Collections.unmodifiableList(entry_); } } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder addWalEntryBuilder() { - return getWalEntryFieldBuilder().addBuilder( + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder addEntryBuilder() { + return getEntryFieldBuilder().addBuilder( org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder addWalEntryBuilder( + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder addEntryBuilder( int index) { - return getWalEntryFieldBuilder().addBuilder( + return getEntryFieldBuilder().addBuilder( index, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.getDefaultInstance()); } public java.util.List - getWalEntryBuilderList() { - return getWalEntryFieldBuilder().getBuilderList(); + getEntryBuilderList() { + return getEntryFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntryOrBuilder> - getWalEntryFieldBuilder() { - if (walEntryBuilder_ == null) { - walEntryBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + getEntryFieldBuilder() { + if (entryBuilder_ == null) { + entryBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntryOrBuilder>( - walEntry_, + entry_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); - walEntry_ = null; + entry_ = null; } - return walEntryBuilder_; + return entryBuilder_; } // @@protoc_insertion_point(builder_scope:ReplicateWALEntryRequest) @@ -13873,10 +13873,10 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest request, com.google.protobuf.RpcCallback done); - public abstract void getStoreFileList( + public abstract void getStoreFile( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest request, - com.google.protobuf.RpcCallback done); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest request, + com.google.protobuf.RpcCallback done); public abstract void getOnlineRegion( com.google.protobuf.RpcController controller, @@ -13942,11 +13942,11 @@ public final class AdminProtos { } @java.lang.Override - public void getStoreFileList( + public void getStoreFile( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest request, - com.google.protobuf.RpcCallback done) { - impl.getStoreFileList(controller, request, done); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest request, + com.google.protobuf.RpcCallback done) { + impl.getStoreFile(controller, request, done); } @java.lang.Override @@ -14054,7 +14054,7 @@ public final class AdminProtos { case 0: return impl.getRegionInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest)request); case 1: - return impl.getStoreFileList(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest)request); + return impl.getStoreFile(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest)request); case 2: return impl.getOnlineRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest)request); case 3: @@ -14092,7 +14092,7 @@ public final class AdminProtos { case 0: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest.getDefaultInstance(); case 1: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest.getDefaultInstance(); case 2: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest.getDefaultInstance(); case 3: @@ -14130,7 +14130,7 @@ public final class AdminProtos { case 0: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.getDefaultInstance(); case 1: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance(); case 2: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse.getDefaultInstance(); case 3: @@ -14164,10 +14164,10 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest request, com.google.protobuf.RpcCallback done); - public abstract void getStoreFileList( + public abstract void getStoreFile( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest request, - com.google.protobuf.RpcCallback done); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest request, + com.google.protobuf.RpcCallback done); public abstract void getOnlineRegion( com.google.protobuf.RpcController controller, @@ -14247,8 +14247,8 @@ public final class AdminProtos { done)); return; case 1: - this.getStoreFileList(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest)request, - com.google.protobuf.RpcUtil.specializeCallback( + this.getStoreFile(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( done)); return; case 2: @@ -14318,7 +14318,7 @@ public final class AdminProtos { case 0: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest.getDefaultInstance(); case 1: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest.getDefaultInstance(); case 2: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest.getDefaultInstance(); case 3: @@ -14356,7 +14356,7 @@ public final class AdminProtos { case 0: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.getDefaultInstance(); case 1: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance(); case 2: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse.getDefaultInstance(); case 3: @@ -14413,19 +14413,19 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.getDefaultInstance())); } - public void getStoreFileList( + public void getStoreFile( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest request, - com.google.protobuf.RpcCallback done) { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest request, + com.google.protobuf.RpcCallback done) { channel.callMethod( getDescriptor().getMethods().get(1), controller, request, - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse.getDefaultInstance(), + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse.class, - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse.getDefaultInstance())); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance())); } public void getOnlineRegion( @@ -14590,9 +14590,9 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse getStoreFileList( + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse getStoreFile( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest request) + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest request) throws com.google.protobuf.ServiceException; public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse getOnlineRegion( @@ -14665,15 +14665,15 @@ public final class AdminProtos { } - public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse getStoreFileList( + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse getStoreFile( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest request) + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest request) throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse) channel.callBlockingMethod( + return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse) channel.callBlockingMethod( getDescriptor().getMethods().get(1), controller, request, - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse.getDefaultInstance()); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.getDefaultInstance()); } @@ -14810,15 +14810,15 @@ public final class AdminProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_GetRegionInfoResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetStoreFileListRequest_descriptor; + internal_static_GetStoreFileRequest_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetStoreFileListRequest_fieldAccessorTable; + internal_static_GetStoreFileRequest_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor - internal_static_GetStoreFileListResponse_descriptor; + internal_static_GetStoreFileResponse_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_GetStoreFileListResponse_fieldAccessorTable; + internal_static_GetStoreFileResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_GetOnlineRegionRequest_descriptor; private static @@ -14956,72 +14956,71 @@ public final class AdminProtos { "\n\013Admin.proto\032\013hbase.proto\"8\n\024GetRegionI" + "nfoRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpeci" + "fier\"8\n\025GetRegionInfoResponse\022\037\n\nregionI" + - "nfo\030\001 \002(\0132\013.RegionInfo\"Q\n\027GetStoreFileLi" + - "stRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecif" + - "ier\022\024\n\014columnFamily\030\002 \003(\014\"-\n\030GetStoreFil" + - "eListResponse\022\021\n\tstoreFile\030\001 \003(\t\"\030\n\026GetO" + - "nlineRegionRequest\":\n\027GetOnlineRegionRes" + - "ponse\022\037\n\nregionInfo\030\001 \003(\0132\013.RegionInfo\"S" + - "\n\021OpenRegionRequest\022 \n\006region\030\001 \003(\0132\020.Re", - "gionSpecifier\022\034\n\024versionOfOfflineNode\030\002 " + - "\001(\r\"\234\001\n\022OpenRegionResponse\022<\n\014openingSta" + - "te\030\001 \003(\0162&.OpenRegionResponse.RegionOpen" + - "ingState\"H\n\022RegionOpeningState\022\n\n\006OPENED" + - "\020\000\022\022\n\016ALREADY_OPENED\020\001\022\022\n\016FAILED_OPENING" + - "\020\002\"r\n\022CloseRegionRequest\022 \n\006region\030\001 \002(\013" + - "2\020.RegionSpecifier\022\034\n\024versionOfClosingNo" + - "de\030\002 \001(\r\022\034\n\016transitionInZK\030\003 \001(\010:\004true\"%" + - "\n\023CloseRegionResponse\022\016\n\006closed\030\001 \002(\010\"M\n" + - "\022FlushRegionRequest\022 \n\006region\030\001 \002(\0132\020.Re", - "gionSpecifier\022\025\n\rifOlderThanTs\030\002 \001(\004\"=\n\023" + - "FlushRegionResponse\022\025\n\rlastFlushTime\030\001 \002" + - "(\004\022\017\n\007flushed\030\002 \001(\010\"J\n\022SplitRegionReques" + - "t\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\022\n\ns" + - "plitPoint\030\002 \001(\014\"\025\n\023SplitRegionResponse\"G" + - "\n\024CompactRegionRequest\022 \n\006region\030\001 \002(\0132\020" + - ".RegionSpecifier\022\r\n\005major\030\002 \001(\010\"\027\n\025Compa" + - "ctRegionResponse\"1\n\004UUID\022\024\n\014leastSigBits" + - "\030\001 \002(\004\022\023\n\013mostSigBits\030\002 \002(\004\"\266\003\n\010WALEntry" + - "\022 \n\006walKey\030\001 \002(\0132\020.WALEntry.WALKey\022\037\n\004ed", - "it\030\002 \002(\0132\021.WALEntry.WALEdit\032~\n\006WALKey\022\031\n" + - "\021encodedRegionName\030\001 \002(\014\022\021\n\ttableName\030\002 " + - "\002(\014\022\031\n\021logSequenceNumber\030\003 \002(\004\022\021\n\twriteT" + - "ime\030\004 \002(\004\022\030\n\tclusterId\030\005 \001(\0132\005.UUID\032\346\001\n\007" + - "WALEdit\022\020\n\010keyValue\030\001 \003(\014\0222\n\013familyScope" + - "\030\002 \003(\0132\035.WALEntry.WALEdit.FamilyScope\032M\n" + - "\013FamilyScope\022\016\n\006family\030\001 \002(\014\022.\n\tscopeTyp" + - "e\030\002 \002(\0162\033.WALEntry.WALEdit.ScopeType\"F\n\t" + - "ScopeType\022\033\n\027REPLICATION_SCOPE_LOCAL\020\000\022\034" + - "\n\030REPLICATION_SCOPE_GLOBAL\020\001\"7\n\030Replicat", - "eWALEntryRequest\022\033\n\010walEntry\030\001 \003(\0132\t.WAL" + - "Entry\"\033\n\031ReplicateWALEntryResponse\"\026\n\024Ro" + - "llWALWriterRequest\".\n\025RollWALWriterRespo" + - "nse\022\025\n\rregionToFlush\030\001 \003(\014\"#\n\021StopServer" + - "Request\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopServerRes" + - "ponse\"\026\n\024GetServerInfoRequest\"8\n\025GetServ" + - "erInfoResponse\022\037\n\nserverName\030\001 \002(\0132\013.Ser" + - "verName2\205\006\n\014AdminService\022>\n\rgetRegionInf" + - "o\022\025.GetRegionInfoRequest\032\026.GetRegionInfo" + - "Response\022G\n\020getStoreFileList\022\030.GetStoreF", - "ileListRequest\032\031.GetStoreFileListRespons" + - "e\022D\n\017getOnlineRegion\022\027.GetOnlineRegionRe" + - "quest\032\030.GetOnlineRegionResponse\0225\n\nopenR" + - "egion\022\022.OpenRegionRequest\032\023.OpenRegionRe" + - "sponse\0228\n\013closeRegion\022\023.CloseRegionReque" + - "st\032\024.CloseRegionResponse\0228\n\013flushRegion\022" + - "\023.FlushRegionRequest\032\024.FlushRegionRespon" + - "se\0228\n\013splitRegion\022\023.SplitRegionRequest\032\024" + - ".SplitRegionResponse\022>\n\rcompactRegion\022\025." + - "CompactRegionRequest\032\026.CompactRegionResp", - "onse\022J\n\021replicateWALEntry\022\031.ReplicateWAL" + - "EntryRequest\032\032.ReplicateWALEntryResponse" + - "\022>\n\rrollWALWriter\022\025.RollWALWriterRequest" + - "\032\026.RollWALWriterResponse\022>\n\rgetServerInf" + - "o\022\025.GetServerInfoRequest\032\026.GetServerInfo" + - "Response\0225\n\nstopServer\022\022.StopServerReque" + - "st\032\023.StopServerResponseBA\n*org.apache.ha" + - "doop.hbase.protobuf.generatedB\013AdminProt" + - "osH\001\210\001\001\240\001\001" + "nfo\030\001 \002(\0132\013.RegionInfo\"G\n\023GetStoreFileRe" + + "quest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022" + + "\016\n\006family\030\002 \003(\014\")\n\024GetStoreFileResponse\022" + + "\021\n\tstoreFile\030\001 \003(\t\"\030\n\026GetOnlineRegionReq" + + "uest\":\n\027GetOnlineRegionResponse\022\037\n\nregio" + + "nInfo\030\001 \003(\0132\013.RegionInfo\"N\n\021OpenRegionRe" + + "quest\022\033\n\006region\030\001 \003(\0132\013.RegionInfo\022\034\n\024ve", + "rsionOfOfflineNode\030\002 \001(\r\"\234\001\n\022OpenRegionR" + + "esponse\022<\n\014openingState\030\001 \003(\0162&.OpenRegi" + + "onResponse.RegionOpeningState\"H\n\022RegionO" + + "peningState\022\n\n\006OPENED\020\000\022\022\n\016ALREADY_OPENE" + + "D\020\001\022\022\n\016FAILED_OPENING\020\002\"r\n\022CloseRegionRe" + + "quest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022" + + "\034\n\024versionOfClosingNode\030\002 \001(\r\022\034\n\016transit" + + "ionInZK\030\003 \001(\010:\004true\"%\n\023CloseRegionRespon" + + "se\022\016\n\006closed\030\001 \002(\010\"M\n\022FlushRegionRequest" + + "\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\025\n\rif", + "OlderThanTs\030\002 \001(\004\"=\n\023FlushRegionResponse" + + "\022\025\n\rlastFlushTime\030\001 \002(\004\022\017\n\007flushed\030\002 \001(\010" + + "\"J\n\022SplitRegionRequest\022 \n\006region\030\001 \002(\0132\020" + + ".RegionSpecifier\022\022\n\nsplitPoint\030\002 \001(\014\"\025\n\023" + + "SplitRegionResponse\"G\n\024CompactRegionRequ" + + "est\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\r\n" + + "\005major\030\002 \001(\010\"\027\n\025CompactRegionResponse\"1\n" + + "\004UUID\022\024\n\014leastSigBits\030\001 \002(\004\022\023\n\013mostSigBi" + + "ts\030\002 \002(\004\"\270\003\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020.WAL" + + "Entry.WALKey\022\037\n\004edit\030\002 \002(\0132\021.WALEntry.WA", + "LEdit\032~\n\006WALKey\022\031\n\021encodedRegionName\030\001 \002" + + "(\014\022\021\n\ttableName\030\002 \002(\014\022\031\n\021logSequenceNumb" + + "er\030\003 \002(\004\022\021\n\twriteTime\030\004 \002(\004\022\030\n\tclusterId" + + "\030\005 \001(\0132\005.UUID\032\353\001\n\007WALEdit\022\025\n\rkeyValueByt" + + "es\030\001 \003(\014\0222\n\013familyScope\030\002 \003(\0132\035.WALEntry" + + ".WALEdit.FamilyScope\032M\n\013FamilyScope\022\016\n\006f" + + "amily\030\001 \002(\014\022.\n\tscopeType\030\002 \002(\0162\033.WALEntr" + + "y.WALEdit.ScopeType\"F\n\tScopeType\022\033\n\027REPL" + + "ICATION_SCOPE_LOCAL\020\000\022\034\n\030REPLICATION_SCO" + + "PE_GLOBAL\020\001\"4\n\030ReplicateWALEntryRequest\022", + "\030\n\005entry\030\001 \003(\0132\t.WALEntry\"\033\n\031ReplicateWA" + + "LEntryResponse\"\026\n\024RollWALWriterRequest\"." + + "\n\025RollWALWriterResponse\022\025\n\rregionToFlush" + + "\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006reason\030\001 " + + "\002(\t\"\024\n\022StopServerResponse\"\026\n\024GetServerIn" + + "foRequest\"8\n\025GetServerInfoResponse\022\037\n\nse" + + "rverName\030\001 \002(\0132\013.ServerName2\371\005\n\014AdminSer" + + "vice\022>\n\rgetRegionInfo\022\025.GetRegionInfoReq" + + "uest\032\026.GetRegionInfoResponse\022;\n\014getStore" + + "File\022\024.GetStoreFileRequest\032\025.GetStoreFil", + "eResponse\022D\n\017getOnlineRegion\022\027.GetOnline" + + "RegionRequest\032\030.GetOnlineRegionResponse\022" + + "5\n\nopenRegion\022\022.OpenRegionRequest\032\023.Open" + + "RegionResponse\0228\n\013closeRegion\022\023.CloseReg" + + "ionRequest\032\024.CloseRegionResponse\0228\n\013flus" + + "hRegion\022\023.FlushRegionRequest\032\024.FlushRegi" + + "onResponse\0228\n\013splitRegion\022\023.SplitRegionR" + + "equest\032\024.SplitRegionResponse\022>\n\rcompactR" + + "egion\022\025.CompactRegionRequest\032\026.CompactRe" + + "gionResponse\022J\n\021replicateWALEntry\022\031.Repl", + "icateWALEntryRequest\032\032.ReplicateWALEntry" + + "Response\022>\n\rrollWALWriter\022\025.RollWALWrite" + + "rRequest\032\026.RollWALWriterResponse\022>\n\rgetS" + + "erverInfo\022\025.GetServerInfoRequest\032\026.GetSe" + + "rverInfoResponse\0225\n\nstopServer\022\022.StopSer" + + "verRequest\032\023.StopServerResponseBA\n*org.a" + + "pache.hadoop.hbase.protobuf.generatedB\013A" + + "dminProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -15044,22 +15043,22 @@ public final class AdminProtos { new java.lang.String[] { "RegionInfo", }, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.Builder.class); - internal_static_GetStoreFileListRequest_descriptor = + internal_static_GetStoreFileRequest_descriptor = getDescriptor().getMessageTypes().get(2); - internal_static_GetStoreFileListRequest_fieldAccessorTable = new + internal_static_GetStoreFileRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetStoreFileListRequest_descriptor, - new java.lang.String[] { "Region", "ColumnFamily", }, - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest.class, - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListRequest.Builder.class); - internal_static_GetStoreFileListResponse_descriptor = + internal_static_GetStoreFileRequest_descriptor, + new java.lang.String[] { "Region", "Family", }, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest.class, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest.Builder.class); + internal_static_GetStoreFileResponse_descriptor = getDescriptor().getMessageTypes().get(3); - internal_static_GetStoreFileListResponse_fieldAccessorTable = new + internal_static_GetStoreFileResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_GetStoreFileListResponse_descriptor, + internal_static_GetStoreFileResponse_descriptor, new java.lang.String[] { "StoreFile", }, - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse.class, - org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileListResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse.Builder.class); internal_static_GetOnlineRegionRequest_descriptor = getDescriptor().getMessageTypes().get(4); internal_static_GetOnlineRegionRequest_fieldAccessorTable = new @@ -15169,7 +15168,7 @@ public final class AdminProtos { internal_static_WALEntry_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_WALEntry_descriptor, - new java.lang.String[] { "WalKey", "Edit", }, + new java.lang.String[] { "Key", "Edit", }, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.Builder.class); internal_static_WALEntry_WALKey_descriptor = @@ -15185,7 +15184,7 @@ public final class AdminProtos { internal_static_WALEntry_WALEdit_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_WALEntry_WALEdit_descriptor, - new java.lang.String[] { "KeyValue", "FamilyScope", }, + new java.lang.String[] { "KeyValueBytes", "FamilyScope", }, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit.Builder.class); internal_static_WALEntry_WALEdit_FamilyScope_descriptor = @@ -15201,7 +15200,7 @@ public final class AdminProtos { internal_static_ReplicateWALEntryRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicateWALEntryRequest_descriptor, - new java.lang.String[] { "WalEntry", }, + new java.lang.String[] { "Entry", }, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.Builder.class); internal_static_ReplicateWALEntryResponse_descriptor = diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 61a5988fe9f..949933b30e3 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -74,7 +74,6 @@ import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; -import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.UnknownRowLockException; import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.YouAreDeadException; @@ -82,7 +81,9 @@ import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.Action; +import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HConnectionManager; @@ -117,11 +118,7 @@ import org.apache.hadoop.hbase.ipc.Invocation; import org.apache.hadoop.hbase.ipc.ProtocolSignature; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; -import org.apache.hadoop.hbase.protobuf.ClientProtocol; import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; -import org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler; -import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler; -import org.apache.hadoop.hbase.regionserver.handler.CloseRootHandler; import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler; import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler; import org.apache.hadoop.hbase.regionserver.handler.OpenRootHandler; @@ -185,12 +182,6 @@ public class HRegionServer extends RegionServer private boolean useHBaseChecksum; // verify hbase checksums? private Path rootDir; - //RegionName vs current action in progress - //true - if open region action in progress - //false - if close region action in progress - private final ConcurrentSkipListMap regionsInTransitionInRS = - new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); - protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); final int numRetries; @@ -228,9 +219,6 @@ public class HRegionServer extends RegionServer @SuppressWarnings("unused") private RegionServerDynamicMetrics dynamicMetrics; - // Compactions - public CompactSplitThread compactSplitThread; - /* * Check for compactions requests. */ @@ -250,9 +238,6 @@ public class HRegionServer extends RegionServer // master address manager and watcher private MasterAddressTracker masterAddressManager; - // catalog tracker - private CatalogTracker catalogTracker; - // Cluster Status Tracker private ClusterStatusTracker clusterStatusTracker; @@ -264,14 +249,6 @@ public class HRegionServer extends RegionServer private final int rpcTimeout; - // Instance of the hbase executor service. - private ExecutorService service; - @SuppressWarnings("unused") - - // Replication services. If no replication, this handler will be null. - private ReplicationSourceService replicationSourceHandler; - private ReplicationSinkService replicationSinkHandler; - private final RegionServerAccounting regionServerAccounting; // Cache configuration and block cache reference @@ -296,18 +273,6 @@ public class HRegionServer extends RegionServer */ private final long startcode; - /** - * Go here to get table descriptors. - */ - private TableDescriptors tableDescriptors; - - /* - * Strings to be used in forming the exception message for - * RegionsAlreadyInTransitionException. - */ - private static final String OPEN = "OPEN"; - private static final String CLOSE = "CLOSE"; - /** * MX Bean for RegionServerInfo */ @@ -370,7 +335,7 @@ public class HRegionServer extends RegionServer this.rpcServer = HBaseRPC.getServer(this, new Class[]{HRegionInterface.class, ClientProtocol.class, - HBaseRPCErrorHandler.class, + AdminProtocol.class, HBaseRPCErrorHandler.class, OnlineRegions.class}, initialIsa.getHostName(), // BindAddress is IP we got for this server. initialIsa.getPort(), @@ -2490,19 +2455,6 @@ public class HRegionServer extends RegionServer return RegionOpeningState.OPENED; } - private void checkIfRegionInTransition(HRegionInfo region, - String currentAction) throws RegionAlreadyInTransitionException { - byte[] encodedName = region.getEncodedNameAsBytes(); - if (this.regionsInTransitionInRS.containsKey(encodedName)) { - boolean openAction = this.regionsInTransitionInRS.get(encodedName); - // The below exception message will be used in master. - throw new RegionAlreadyInTransitionException("Received:" + currentAction + - " for the region:" + region.getRegionNameAsString() + - " ,which we are already trying to " + - (openAction ? OPEN : CLOSE)+ "."); - } - } - @Override @QosPriority(priority=HIGH_QOS) public void openRegions(List regions) @@ -2559,54 +2511,6 @@ public class HRegionServer extends RegionServer return closeRegion(encodedRegionName, false, zk); } - /** - * @param region Region to close - * @param abort True if we are aborting - * @param zk True if we are to update zk about the region close; if the close - * was orchestrated by master, then update zk. If the close is being run by - * the regionserver because its going down, don't update zk. - * @return True if closed a region. - */ - protected boolean closeRegion(HRegionInfo region, final boolean abort, - final boolean zk) { - return closeRegion(region, abort, zk, -1); - } - - - /** - * @param region Region to close - * @param abort True if we are aborting - * @param zk True if we are to update zk about the region close; if the close - * was orchestrated by master, then update zk. If the close is being run by - * the regionserver because its going down, don't update zk. - * @param versionOfClosingNode - * the version of znode to compare when RS transitions the znode from - * CLOSING state. - * @return True if closed a region. - */ - protected boolean closeRegion(HRegionInfo region, final boolean abort, - final boolean zk, final int versionOfClosingNode) { - if (this.regionsInTransitionInRS.containsKey(region.getEncodedNameAsBytes())) { - LOG.warn("Received close for region we are already opening or closing; " + - region.getEncodedName()); - return false; - } - this.regionsInTransitionInRS.putIfAbsent(region.getEncodedNameAsBytes(), false); - CloseRegionHandler crh = null; - if (region.isRootRegion()) { - crh = new CloseRootHandler(this, this, region, abort, zk, - versionOfClosingNode); - } else if (region.isMetaRegion()) { - crh = new CloseMetaHandler(this, this, region, abort, zk, - versionOfClosingNode); - } else { - crh = new CloseRegionHandler(this, this, region, abort, zk, - versionOfClosingNode); - } - this.service.submit(crh); - return true; - } - /** * @param encodedRegionName * encodedregionName to close @@ -2804,13 +2708,6 @@ public class HRegionServer extends RegionServer return sortedRegions; } - @Override - public HRegion getFromOnlineRegions(final String encodedRegionName) { - HRegion r = null; - r = this.onlineRegions.get(encodedRegionName); - return r; - } - /** @return the request count */ public AtomicInteger getRequestCount() { return this.requestCount; @@ -2858,6 +2755,8 @@ public class HRegionServer extends RegionServer return new ProtocolSignature(HRegionInterface.VERSION, null); } else if (protocol.equals(ClientProtocol.class.getName())) { return new ProtocolSignature(ClientProtocol.VERSION, null); + } else if (protocol.equals(AdminProtocol.class.getName())) { + return new ProtocolSignature(AdminProtocol.VERSION, null); } throw new IOException("Unknown protocol: " + protocol); } @@ -2870,6 +2769,8 @@ public class HRegionServer extends RegionServer return HRegionInterface.VERSION; } else if (protocol.equals(ClientProtocol.class.getName())) { return ClientProtocol.VERSION; + } else if (protocol.equals(AdminProtocol.class.getName())) { + return AdminProtocol.VERSION; } throw new IOException("Unknown protocol: " + protocol); } diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServer.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServer.java index 759633d4f49..4cb070e08a7 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServer.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionThriftServer.java @@ -37,16 +37,11 @@ import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.RequestConverter; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse; import org.apache.hadoop.hbase.thrift.ThriftServerRunner; import org.apache.hadoop.hbase.thrift.ThriftUtilities; import org.apache.hadoop.hbase.thrift.generated.IOError; import org.apache.hadoop.hbase.thrift.generated.TRowResult; -import com.google.protobuf.ServiceException; - /** * HRegionThriftServer - this class starts up a Thrift server in the same * JVM where the RegionServer is running. It inherits most of the @@ -136,10 +131,7 @@ public class HRegionThriftServer extends Thread { if (columns == null) { Get get = new Get(row); get.setTimeRange(Long.MIN_VALUE, timestamp); - GetRequest request = - RequestConverter.buildGetRequest(regionName, get); - GetResponse response = rs.get(null, request); - Result result = ProtobufUtil.toResult(response.getResult()); + Result result = ProtobufUtil.get(rs, regionName, get); return ThriftUtilities.rowResultFromHBase(result); } Get get = new Get(row); @@ -152,10 +144,7 @@ public class HRegionThriftServer extends Thread { } } get.setTimeRange(Long.MIN_VALUE, timestamp); - GetRequest request = - RequestConverter.buildGetRequest(regionName, get); - GetResponse response = rs.get(null, request); - Result result = ProtobufUtil.toResult(response.getResult()); + Result result = ProtobufUtil.get(rs, regionName, get); return ThriftUtilities.rowResultFromHBase(result); } catch (NotServingRegionException e) { if (!redirect) { @@ -165,10 +154,6 @@ public class HRegionThriftServer extends Thread { LOG.debug("ThriftServer redirecting getRowWithColumnsTs"); return super.getRowWithColumnsTs(tableName, rowb, columns, timestamp, attributes); - } catch (ServiceException se) { - IOException e = ProtobufUtil.getRemoteException(se); - LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); } catch (IOException e) { LOG.warn(e.getMessage(), e); throw new IOError(e.getMessage()); diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServer.java b/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServer.java index 7c59995b03f..038f2ee462f 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServer.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServer.java @@ -21,10 +21,14 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Random; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; @@ -33,12 +37,19 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RemoteExceptionHandler; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.UnknownRowLockException; import org.apache.hadoop.hbase.UnknownScannerException; +import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.catalog.MetaReader; +import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; @@ -48,13 +59,38 @@ import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.coprocessor.Exec; import org.apache.hadoop.hbase.client.coprocessor.ExecResult; +import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.filter.WritableByteArrayComparable; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.ipc.CoprocessorProtocol; -import org.apache.hadoop.hbase.protobuf.ClientProtocol; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ActionResult; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; @@ -78,15 +114,24 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.UnlockRowRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.UnlockRowResponse; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.regionserver.HRegionServer.QosPriority; import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException; +import org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler; +import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler; +import org.apache.hadoop.hbase.regionserver.handler.CloseRootHandler; +import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler; +import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler; +import org.apache.hadoop.hbase.regionserver.handler.OpenRootHandler; import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; +import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; +import com.google.protobuf.ByteString; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -101,17 +146,45 @@ import com.google.protobuf.ServiceException; */ @InterfaceAudience.Private public abstract class RegionServer implements - ClientProtocol, Runnable, RegionServerServices { + ClientProtocol, AdminProtocol, Runnable, RegionServerServices { private static final Log LOG = LogFactory.getLog(RegionServer.class); private final Random rand = new Random(); + /* + * Strings to be used in forming the exception message for + * RegionsAlreadyInTransitionException. + */ + protected static final String OPEN = "OPEN"; + protected static final String CLOSE = "CLOSE"; + + //RegionName vs current action in progress + //true - if open region action in progress + //false - if close region action in progress + protected final ConcurrentSkipListMap regionsInTransitionInRS = + new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); + protected long maxScannerResultSize; // Cache flushing protected MemStoreFlusher cacheFlusher; + // catalog tracker + protected CatalogTracker catalogTracker; + + /** + * Go here to get table descriptors. + */ + protected TableDescriptors tableDescriptors; + + // Replication services. If no replication, this handler will be null. + protected ReplicationSourceService replicationSourceHandler; + protected ReplicationSinkService replicationSinkHandler; + + // Compactions + public CompactSplitThread compactSplitThread; + final Map scanners = new ConcurrentHashMap(); @@ -125,6 +198,9 @@ public abstract class RegionServer implements // Leases protected Leases leases; + // Instance of the hbase executor service. + protected ExecutorService service; + // Request counter. // Do we need this? Can't we just sum region counters? St.Ack 20110412 protected AtomicInteger requestCount = new AtomicInteger(); @@ -244,6 +320,67 @@ public abstract class RegionServer implements } } + protected void checkIfRegionInTransition(HRegionInfo region, + String currentAction) throws RegionAlreadyInTransitionException { + byte[] encodedName = region.getEncodedNameAsBytes(); + if (this.regionsInTransitionInRS.containsKey(encodedName)) { + boolean openAction = this.regionsInTransitionInRS.get(encodedName); + // The below exception message will be used in master. + throw new RegionAlreadyInTransitionException("Received:" + currentAction + + " for the region:" + region.getRegionNameAsString() + + " ,which we are already trying to " + + (openAction ? OPEN : CLOSE)+ "."); + } + } + + /** + * @param region Region to close + * @param abort True if we are aborting + * @param zk True if we are to update zk about the region close; if the close + * was orchestrated by master, then update zk. If the close is being run by + * the regionserver because its going down, don't update zk. + * @return True if closed a region. + */ + protected boolean closeRegion(HRegionInfo region, final boolean abort, + final boolean zk) { + return closeRegion(region, abort, zk, -1); + } + + + /** + * @param region Region to close + * @param abort True if we are aborting + * @param zk True if we are to update zk about the region close; if the close + * was orchestrated by master, then update zk. If the close is being run by + * the regionserver because its going down, don't update zk. + * @param versionOfClosingNode + * the version of znode to compare when RS transitions the znode from + * CLOSING state. + * @return True if closed a region. + */ + protected boolean closeRegion(HRegionInfo region, final boolean abort, + final boolean zk, final int versionOfClosingNode) { + if (this.regionsInTransitionInRS.containsKey(region.getEncodedNameAsBytes())) { + LOG.warn("Received close for region we are already opening or closing; " + + region.getEncodedName()); + return false; + } + this.regionsInTransitionInRS.putIfAbsent(region.getEncodedNameAsBytes(), false); + CloseRegionHandler crh = null; + if (region.isRootRegion()) { + crh = new CloseRootHandler(this, this, region, abort, zk, + versionOfClosingNode); + } else if (region.isMetaRegion()) { + crh = new CloseMetaHandler(this, this, region, abort, zk, + versionOfClosingNode); + } else { + crh = new CloseRegionHandler(this, this, region, abort, zk, + versionOfClosingNode); + } + this.service.submit(crh); + return true; + } + /** * @param regionName * @return HRegion for the passed binary regionName or null if @@ -254,6 +391,11 @@ public abstract class RegionServer implements return this.onlineRegions.get(encodedRegionName); } + @Override + public HRegion getFromOnlineRegions(final String encodedRegionName) { + return this.onlineRegions.get(encodedRegionName); + } + /** * Protected utility method for safely obtaining an HRegion handle. * @@ -1002,6 +1144,352 @@ public abstract class RegionServer implements } // End Client methods +// Start Admin methods + + @Override + @QosPriority(priority=HIGH_QOS) + public GetRegionInfoResponse getRegionInfo(final RpcController controller, + final GetRegionInfoRequest request) throws ServiceException { + try { + checkOpen(); + requestCount.incrementAndGet(); + HRegion region = getRegion(request.getRegion()); + HRegionInfo info = region.getRegionInfo(); + GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); + builder.setRegionInfo(ProtobufUtil.toRegionInfo(info)); + return builder.build(); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } + + @Override + public GetStoreFileResponse getStoreFile(final RpcController controller, + final GetStoreFileRequest request) throws ServiceException { + try { + HRegion region = getRegion(request.getRegion()); + requestCount.incrementAndGet(); + Set columnFamilies = null; + if (request.getFamilyCount() == 0) { + columnFamilies = region.getStores().keySet(); + } else { + columnFamilies = new HashSet(); + for (ByteString cf: request.getFamilyList()) { + columnFamilies.add(cf.toByteArray()); + } + } + int nCF = columnFamilies.size(); + List fileList = region.getStoreFileList( + columnFamilies.toArray(new byte[nCF][])); + GetStoreFileResponse.Builder builder = GetStoreFileResponse.newBuilder(); + builder.addAllStoreFile(fileList); + return builder.build(); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } + + @Override + @QosPriority(priority=HIGH_QOS) + public GetOnlineRegionResponse getOnlineRegion(final RpcController controller, + final GetOnlineRegionRequest request) throws ServiceException { + try { + checkOpen(); + requestCount.incrementAndGet(); + List list = new ArrayList(onlineRegions.size()); + for (Map.Entry e: this.onlineRegions.entrySet()) { + list.add(e.getValue().getRegionInfo()); + } + Collections.sort(list); + GetOnlineRegionResponse.Builder builder = GetOnlineRegionResponse.newBuilder(); + for (HRegionInfo region: list) { + builder.addRegionInfo(ProtobufUtil.toRegionInfo(region)); + } + return builder.build(); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } + + + // Region open/close direct RPCs + + /** + * Open a region on the region server. + * + * @param controller the RPC controller + * @param request the request + * @throws ServiceException + */ + @Override + @QosPriority(priority=HIGH_QOS) + public OpenRegionResponse openRegion(final RpcController controller, + final OpenRegionRequest request) throws ServiceException { + int versionOfOfflineNode = -1; + if (request.hasVersionOfOfflineNode()) { + versionOfOfflineNode = request.getVersionOfOfflineNode(); + } + try { + checkOpen(); + requestCount.incrementAndGet(); + OpenRegionResponse.Builder + builder = OpenRegionResponse.newBuilder(); + for (RegionInfo regionInfo: request.getRegionList()) { + HRegionInfo region = ProtobufUtil.toRegionInfo(regionInfo); + checkIfRegionInTransition(region, OPEN); + + HRegion onlineRegion = getFromOnlineRegions(region.getEncodedName()); + if (null != onlineRegion) { + // See HBASE-5094. Cross check with META if still this RS is owning the + // region. + Pair p = MetaReader.getRegion( + this.catalogTracker, region.getRegionName()); + if (this.getServerName().equals(p.getSecond())) { + LOG.warn("Attempted open of " + region.getEncodedName() + + " but already online on this server"); + builder.addOpeningState(RegionOpeningState.ALREADY_OPENED); + continue; + } else { + LOG.warn("The region " + region.getEncodedName() + + " is online on this server but META does not have this server."); + removeFromOnlineRegions(region.getEncodedName()); + } + } + LOG.info("Received request to open region: " + region.getEncodedName()); + this.regionsInTransitionInRS.putIfAbsent(region.getEncodedNameAsBytes(), true); + HTableDescriptor htd = this.tableDescriptors.get(region.getTableName()); + // Need to pass the expected version in the constructor. + if (region.isRootRegion()) { + this.service.submit(new OpenRootHandler(this, this, region, htd, + versionOfOfflineNode)); + } else if (region.isMetaRegion()) { + this.service.submit(new OpenMetaHandler(this, this, region, htd, + versionOfOfflineNode)); + } else { + this.service.submit(new OpenRegionHandler(this, this, region, htd, + versionOfOfflineNode)); + } + builder.addOpeningState(RegionOpeningState.OPENED); + } + return builder.build(); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } + + /** + * Close a region on the region server. + * + * @param controller the RPC controller + * @param request the request + * @throws ServiceException + */ + @Override + @QosPriority(priority=HIGH_QOS) + public CloseRegionResponse closeRegion(final RpcController controller, + final CloseRegionRequest request) throws ServiceException { + int versionOfClosingNode = -1; + if (request.hasVersionOfClosingNode()) { + versionOfClosingNode = request.getVersionOfClosingNode(); + } + boolean zk = request.getTransitionInZK(); + try { + checkOpen(); + requestCount.incrementAndGet(); + HRegion region = getRegion(request.getRegion()); + CloseRegionResponse.Builder + builder = CloseRegionResponse.newBuilder(); + LOG.info("Received close region: " + region.getRegionNameAsString() + + ". Version of ZK closing node:" + versionOfClosingNode); + HRegionInfo regionInfo = region.getRegionInfo(); + checkIfRegionInTransition(regionInfo, CLOSE); + boolean closed = closeRegion( + regionInfo, false, zk, versionOfClosingNode); + builder.setClosed(closed); + return builder.build(); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } + + /** + * Flush a region on the region server. + * + * @param controller the RPC controller + * @param request the request + * @throws ServiceException + */ + @Override + @QosPriority(priority=HIGH_QOS) + public FlushRegionResponse flushRegion(final RpcController controller, + final FlushRegionRequest request) throws ServiceException { + try { + checkOpen(); + requestCount.incrementAndGet(); + HRegion region = getRegion(request.getRegion()); + LOG.info("Flushing " + region.getRegionNameAsString()); + boolean shouldFlush = true; + if (request.hasIfOlderThanTs()) { + shouldFlush = region.getLastFlushTime() < request.getIfOlderThanTs(); + } + FlushRegionResponse.Builder builder = FlushRegionResponse.newBuilder(); + if (shouldFlush) { + builder.setFlushed(region.flushcache()); + } + builder.setLastFlushTime(region.getLastFlushTime()); + return builder.build(); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } + + /** + * Split a region on the region server. + * + * @param controller the RPC controller + * @param request the request + * @throws ServiceException + */ + @Override + @QosPriority(priority=HIGH_QOS) + public SplitRegionResponse splitRegion(final RpcController controller, + final SplitRegionRequest request) throws ServiceException { + try { + checkOpen(); + requestCount.incrementAndGet(); + HRegion region = getRegion(request.getRegion()); + LOG.info("Splitting " + region.getRegionNameAsString()); + region.flushcache(); + byte[] splitPoint = null; + if (request.hasSplitPoint()) { + splitPoint = request.getSplitPoint().toByteArray(); + } + region.forceSplit(splitPoint); + compactSplitThread.requestSplit(region, region.checkSplit()); + return SplitRegionResponse.newBuilder().build(); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } + + /** + * Compact a region on the region server. + * + * @param controller the RPC controller + * @param request the request + * @throws ServiceException + */ + @Override + @QosPriority(priority=HIGH_QOS) + public CompactRegionResponse compactRegion(final RpcController controller, + final CompactRegionRequest request) throws ServiceException { + try { + checkOpen(); + requestCount.incrementAndGet(); + HRegion region = getRegion(request.getRegion()); + LOG.info("Compacting " + region.getRegionNameAsString()); + boolean major = false; + if (request.hasMajor()) { + major = request.getMajor(); + } + if (major) { + region.triggerMajorCompaction(); + } + compactSplitThread.requestCompaction(region, + "User-triggered " + (major ? "major " : "") + "compaction", + CompactSplitThread.PRIORITY_USER); + return CompactRegionResponse.newBuilder().build(); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } + + /** + * Replicate WAL entries on the region server. + * + * @param controller the RPC controller + * @param request the request + * @throws ServiceException + */ + @Override + @QosPriority(priority=HIGH_QOS) + public ReplicateWALEntryResponse replicateWALEntry(final RpcController controller, + final ReplicateWALEntryRequest request) throws ServiceException { + try { + if (replicationSinkHandler != null) { + checkOpen(); + requestCount.incrementAndGet(); + HLog.Entry[] entries = ProtobufUtil.toHLogEntries(request.getEntryList()); + if (entries != null && entries.length > 0) { + replicationSinkHandler.replicateLogEntries(entries); + } + } + return ReplicateWALEntryResponse.newBuilder().build(); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } + + /** + * Roll the WAL writer of the region server. + * + * @param controller the RPC controller + * @param request the request + * @throws ServiceException + */ + @Override + public RollWALWriterResponse rollWALWriter(final RpcController controller, + final RollWALWriterRequest request) throws ServiceException { + try { + requestCount.incrementAndGet(); + HLog wal = this.getWAL(); + byte[][] regionsToFlush = wal.rollWriter(true); + RollWALWriterResponse.Builder builder = RollWALWriterResponse.newBuilder(); + if (regionsToFlush != null) { + for (byte[] region: regionsToFlush) { + builder.addRegionToFlush(ByteString.copyFrom(region)); + } + } + return builder.build(); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } + + /** + * Stop the region server. + * + * @param controller the RPC controller + * @param request the request + * @throws ServiceException + */ + @Override + public StopServerResponse stopServer(final RpcController controller, + final StopServerRequest request) throws ServiceException { + requestCount.incrementAndGet(); + String reason = request.getReason(); + stop(reason); + return StopServerResponse.newBuilder().build(); + } + + /** + * Get some information of the region server. + * + * @param controller the RPC controller + * @param request the request + * @throws ServiceException + */ + @Override + public GetServerInfoResponse getServerInfo(final RpcController controller, + final GetServerInfoRequest request) throws ServiceException { + ServerName serverName = getServerName(); + requestCount.incrementAndGet(); + GetServerInfoResponse.Builder builder = GetServerInfoResponse.newBuilder(); + builder.setServerName(ProtobufUtil.toServerName(serverName)); + return builder.build(); + } + +// End Admin methods /** * Find the HRegion based on a region specifier diff --git a/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 04fe8b62085..5050df02688 100644 --- a/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -48,9 +48,10 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; -import org.apache.hadoop.hbase.ipc.HRegionInterface; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; @@ -605,9 +606,10 @@ public class ReplicationSource extends Thread continue; } try { - HRegionInterface rrs = getRS(); + AdminProtocol rrs = getRS(); LOG.debug("Replicating " + currentNbEntries); - rrs.replicateLogEntries(Arrays.copyOf(this.entriesArray, currentNbEntries)); + ProtobufUtil.replicateWALEntry(rrs, + Arrays.copyOf(this.entriesArray, currentNbEntries)); if (this.lastLoggedPosition != this.position) { this.manager.logPositionAndCleanOldLogs(this.currentPath, this.peerClusterZnode, this.position, queueRecovered); @@ -727,13 +729,13 @@ public class ReplicationSource extends Thread * @return * @throws IOException */ - private HRegionInterface getRS() throws IOException { + private AdminProtocol getRS() throws IOException { if (this.currentPeers.size() == 0) { throw new IOException(this.peerClusterZnode + " has 0 region servers"); } ServerName address = currentPeers.get(random.nextInt(this.currentPeers.size())); - return this.conn.getHRegionConnection(address.getHostname(), address.getPort()); + return this.conn.getAdmin(address.getHostname(), address.getPort()); } /** @@ -746,9 +748,9 @@ public class ReplicationSource extends Thread Thread pingThread = new Thread() { public void run() { try { - HRegionInterface rrs = getRS(); + AdminProtocol rrs = getRS(); // Dummy call which should fail - rrs.getHServerInfo(); + ProtobufUtil.getServerInfo(rrs); latch.countDown(); } catch (IOException ex) { if (ex instanceof RemoteException) { diff --git a/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 66156c2891c..8dcf4f9b71c 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.catalog.MetaReader; +import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -70,8 +71,8 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.master.MasterFileSystem; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE; @@ -2620,11 +2621,11 @@ public class HBaseFsck { public synchronized void run() { errors.progress(); try { - HRegionInterface server = - connection.getHRegionConnection(rsinfo.getHostname(), rsinfo.getPort()); + AdminProtocol server = + connection.getAdmin(rsinfo.getHostname(), rsinfo.getPort()); // list all online regions from this region server - List regions = server.getOnlineRegions(); + List regions = ProtobufUtil.getOnlineRegions(server); if (hbck.checkMetaOnly) { regions = filterOnlyMetaRegions(regions); } diff --git a/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java index 83a165c1a20..9c3c9efe194 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java +++ b/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java @@ -34,12 +34,13 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.master.AssignmentManager.RegionState; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.zookeeper.KeeperException; @@ -149,17 +150,16 @@ public class HBaseFsckRepair { public static void closeRegionSilentlyAndWait(HBaseAdmin admin, ServerName server, HRegionInfo region) throws IOException, InterruptedException { HConnection connection = admin.getConnection(); - HRegionInterface rs = connection.getHRegionConnection(server.getHostname(), - server.getPort()); - rs.closeRegion(region, false); + AdminProtocol rs = connection.getAdmin(server.getHostname(), server.getPort()); + ProtobufUtil.closeRegion(rs, region.getRegionName(), false); long timeout = admin.getConfiguration() .getLong("hbase.hbck.close.timeout", 120000); long expiration = timeout + System.currentTimeMillis(); while (System.currentTimeMillis() < expiration) { try { - HRegionInfo rsRegion = rs.getRegionInfo(region.getRegionName()); - if (rsRegion == null) - return; + HRegionInfo rsRegion = + ProtobufUtil.getRegionInfo(rs, region.getRegionName()); + if (rsRegion == null) return; } catch (IOException ioe) { return; } diff --git a/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java b/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java index d3d0372553e..e578ba0fef2 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java +++ b/src/main/java/org/apache/hadoop/hbase/util/SortedCopyOnWriteSet.java @@ -49,7 +49,7 @@ import org.apache.hadoop.classification.InterfaceStability; @InterfaceAudience.Public @InterfaceStability.Stable public class SortedCopyOnWriteSet implements SortedSet { - private SortedSet internalSet; + private volatile SortedSet internalSet; public SortedCopyOnWriteSet() { this.internalSet = new TreeSet(); diff --git a/src/main/protobuf/Admin.proto b/src/main/protobuf/Admin.proto index 132c5dd34c0..2ad6fb03e04 100644 --- a/src/main/protobuf/Admin.proto +++ b/src/main/protobuf/Admin.proto @@ -38,12 +38,12 @@ message GetRegionInfoResponse { * Get a list of store files for a set of column families in a particular region. * If no column family is specified, get the store files for all column families. */ -message GetStoreFileListRequest { +message GetStoreFileRequest { required RegionSpecifier region = 1; - repeated bytes columnFamily = 2; + repeated bytes family = 2; } -message GetStoreFileListResponse { +message GetStoreFileResponse { repeated string storeFile = 1; } @@ -55,7 +55,7 @@ message GetOnlineRegionResponse { } message OpenRegionRequest { - repeated RegionSpecifier region = 1; + repeated RegionInfo region = 1; optional uint32 versionOfOfflineNode = 2; } @@ -133,7 +133,7 @@ message UUID { // Protocol buffer version of HLog message WALEntry { - required WALKey walKey = 1; + required WALKey key = 1; required WALEdit edit = 2; // Protocol buffer version of HLogKey @@ -146,7 +146,7 @@ message WALEntry { } message WALEdit { - repeated bytes keyValue = 1; + repeated bytes keyValueBytes = 1; repeated FamilyScope familyScope = 2; enum ScopeType { @@ -168,7 +168,7 @@ message WALEntry { * hbase.replication has to be set to true for this to work. */ message ReplicateWALEntryRequest { - repeated WALEntry walEntry = 1; + repeated WALEntry entry = 1; } message ReplicateWALEntryResponse { @@ -201,8 +201,8 @@ service AdminService { rpc getRegionInfo(GetRegionInfoRequest) returns(GetRegionInfoResponse); - rpc getStoreFileList(GetStoreFileListRequest) - returns(GetStoreFileListResponse); + rpc getStoreFile(GetStoreFileRequest) + returns(GetStoreFileResponse); rpc getOnlineRegion(GetOnlineRegionRequest) returns(GetOnlineRegionResponse); diff --git a/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java b/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java index d6ae0e23bb9..3dfc94e8424 100644 --- a/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java +++ b/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java @@ -32,8 +32,18 @@ import junit.framework.Assert; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.AdminProtocol; +import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; @@ -41,7 +51,9 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.client.ServerCallable; import org.apache.hadoop.hbase.ipc.HRegionInterface; -import org.apache.hadoop.hbase.protobuf.ClientProtocol; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse; import org.apache.hadoop.hbase.util.Bytes; @@ -186,16 +198,19 @@ public class TestCatalogTracker { @Test public void testServerNotRunningIOException() throws IOException, InterruptedException, KeeperException, ServiceException { - // Mock an HRegionInterface. - final HRegionInterface implementation = Mockito.mock(HRegionInterface.class); + // Mock an Admin and a Client. + final AdminProtocol admin = Mockito.mock(AdminProtocol.class); final ClientProtocol client = Mockito.mock(ClientProtocol.class); - HConnection connection = mockConnection(implementation, client); + HConnection connection = mockConnection(admin, client); try { - // If a 'getRegionInfo' is called on mocked HRegionInterface, throw IOE + // If a 'getRegionInfo' is called on mocked AdminProtocol, throw IOE // the first time. 'Succeed' the second time we are called. - Mockito.when(implementation.getRegionInfo((byte[]) Mockito.any())). - thenThrow(new IOException("Server not running, aborting")). - thenReturn(new HRegionInfo()); + GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); + builder.setRegionInfo(ProtobufUtil.toRegionInfo(new HRegionInfo(Bytes.toBytes("test")))); + Mockito.when(admin.getRegionInfo((RpcController)Mockito.any(), + (GetRegionInfoRequest)Mockito.any())).thenThrow( + new ServiceException(new IOException("Server not running, aborting"))). + thenReturn(builder.build()); // After we encounter the above 'Server not running', we should catch the // IOE and go into retrying for the meta mode. We'll do gets on -ROOT- to @@ -292,18 +307,19 @@ public class TestCatalogTracker { * @throws IOException * @throws InterruptedException * @throws KeeperException + * @throws ServiceException */ @Test public void testVerifyRootRegionLocationFails() - throws IOException, InterruptedException, KeeperException { + throws IOException, InterruptedException, KeeperException, ServiceException { HConnection connection = Mockito.mock(HConnection.class); - ConnectException connectException = - new ConnectException("Connection refused"); - final HRegionInterface implementation = - Mockito.mock(HRegionInterface.class); - Mockito.when(implementation.getRegionInfo((byte [])Mockito.any())). - thenThrow(connectException); - Mockito.when(connection.getHRegionConnection(Mockito.anyString(), + ServiceException connectException = + new ServiceException(new ConnectException("Connection refused")); + final AdminProtocol implementation = + Mockito.mock(AdminProtocol.class); + Mockito.when(implementation.getRegionInfo((RpcController)Mockito.any(), + (GetRegionInfoRequest)Mockito.any())).thenThrow(connectException); + Mockito.when(connection.getAdmin(Mockito.anyString(), Mockito.anyInt(), Mockito.anyBoolean())). thenReturn(implementation); final CatalogTracker ct = constructAndStartCatalogTracker(connection); @@ -379,11 +395,11 @@ public class TestCatalogTracker { // that ... and so one. @Test public void testNoTimeoutWaitForMeta() throws Exception { - // Mock an HConnection and a HRegionInterface implementation. Have the + // Mock an HConnection and a AdminProtocol implementation. Have the // HConnection return the HRI. Have the HRI return a few mocked up responses // to make our test work. - // Mock an HRegionInterface. - final HRegionInterface implementation = Mockito.mock(HRegionInterface.class); + // Mock an AdminProtocol. + final AdminProtocol implementation = Mockito.mock(AdminProtocol.class); HConnection connection = mockConnection(implementation, null); try { // Now the ct is up... set into the mocks some answers that make it look @@ -396,8 +412,10 @@ public class TestCatalogTracker { // It works for now but has been deprecated. Mockito.when(connection.getRegionServerWithRetries((ServerCallable)Mockito.any())). thenReturn(result); - Mockito.when(implementation.getRegionInfo((byte[]) Mockito.any())). - thenReturn(HRegionInfo.FIRST_META_REGIONINFO); + GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); + builder.setRegionInfo(ProtobufUtil.toRegionInfo(HRegionInfo.FIRST_META_REGIONINFO)); + Mockito.when(implementation.getRegionInfo((RpcController)Mockito.any(), + (GetRegionInfoRequest)Mockito.any())).thenReturn(builder.build()); final CatalogTracker ct = constructAndStartCatalogTracker(connection); ServerName hsa = ct.getMetaLocation(); Assert.assertNull(hsa); @@ -430,7 +448,7 @@ public class TestCatalogTracker { } /** - * @param implementation An {@link HRegionInterface} instance; you'll likely + * @param admin An {@link AdminProtocol} instance; you'll likely * want to pass a mocked HRS; can be null. * @param client A mocked ClientProtocol instance, can be null * @return Mock up a connection that returns a {@link Configuration} when @@ -443,9 +461,8 @@ public class TestCatalogTracker { * when done with this mocked Connection. * @throws IOException */ - private HConnection mockConnection( - final HRegionInterface implementation, final ClientProtocol client) - throws IOException { + private HConnection mockConnection(final AdminProtocol admin, + final ClientProtocol client) throws IOException { HConnection connection = HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration()); Mockito.doNothing().when(connection).close(); @@ -459,10 +476,10 @@ public class TestCatalogTracker { Mockito.when(connection.locateRegion((byte[]) Mockito.any(), (byte[]) Mockito.any())). thenReturn(anyLocation); - if (implementation != null) { + if (admin != null) { // If a call to getHRegionConnection, return this implementation. - Mockito.when(connection.getHRegionConnection(Mockito.anyString(), Mockito.anyInt())). - thenReturn(implementation); + Mockito.when(connection.getAdmin(Mockito.anyString(), Mockito.anyInt())). + thenReturn(admin); } if (client != null) { // If a call to getClient, return this implementation. diff --git a/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java b/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java index 3cfc02b775c..a81059ae111 100644 --- a/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java +++ b/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java @@ -27,12 +27,12 @@ import java.util.NavigableMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.ClientProtocol; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.util.Bytes; diff --git a/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java index 8af0f91d32d..42092b753f5 100644 --- a/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java +++ b/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java @@ -24,10 +24,11 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.client.AdminProtocol; +import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionKey; import org.apache.hadoop.hbase.ipc.HRegionInterface; -import org.apache.hadoop.hbase.protobuf.ClientProtocol; import org.mockito.Mockito; /** @@ -72,14 +73,14 @@ public class HConnectionTestingUtility { * connection when done by calling * {@link HConnectionManager#deleteConnection(Configuration, boolean)} else it * will stick around; this is probably not what you want. - * @param implementation An {@link HRegionInterface} instance; you'll likely - * want to pass a mocked HRS; can be null. - * + * * @param conf Configuration to use - * @param implementation An HRegionInterface; can be null but is usually + * @param admin An AdminProtocol; can be null but is usually + * itself a mock. + * @param client A ClientProtocol; can be null but is usually * itself a mock. * @param sn ServerName to include in the region location returned by this - * implementation + * connection * @param hri HRegionInfo to include in the location returned when * getRegionLocation is called on the mocked connection * @return Mock up a connection that returns a {@link Configuration} when @@ -93,7 +94,7 @@ public class HConnectionTestingUtility { * @throws IOException */ public static HConnection getMockedConnectionAndDecorate(final Configuration conf, - final HRegionInterface implementation, final ClientProtocol client, + final AdminProtocol admin, final ClientProtocol client, final ServerName sn, final HRegionInfo hri) throws IOException { HConnection c = HConnectionTestingUtility.getMockedConnection(conf); @@ -105,10 +106,10 @@ public class HConnectionTestingUtility { thenReturn(loc); Mockito.when(c.locateRegion((byte[]) Mockito.any(), (byte[]) Mockito.any())). thenReturn(loc); - if (implementation != null) { - // If a call to getHRegionConnection, return this implementation. - Mockito.when(c.getHRegionConnection(Mockito.anyString(), Mockito.anyInt())). - thenReturn(implementation); + if (admin != null) { + // If a call to getAdmin, return this implementation. + Mockito.when(c.getAdmin(Mockito.anyString(), Mockito.anyInt())). + thenReturn(admin); } if (client != null) { // If a call to getClient, return this client. diff --git a/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index 804dfdeeed5..b49aafe55e7 100644 --- a/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ b/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -1220,8 +1220,12 @@ public class TestAdmin { if (!regionInfo.isMetaTable()) { if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegion1")) { info = regionInfo; - admin.closeRegionWithEncodedRegionName("sample", rs.getServerName() + try { + admin.closeRegionWithEncodedRegionName("sample", rs.getServerName() .getServerName()); + } catch (NotServingRegionException nsre) { + // expected, ignore it + } } } } @@ -1320,8 +1324,12 @@ public class TestAdmin { if (!regionInfo.isMetaTable()) { if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegion4")) { info = regionInfo; - admin.closeRegionWithEncodedRegionName(regionInfo + try { + admin.closeRegionWithEncodedRegionName(regionInfo .getRegionNameAsString(), rs.getServerName().getServerName()); + } catch (NotServingRegionException nsre) { + // expected, ignore it. + } } } } diff --git a/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index 7dd60de9091..0079b13bf4a 100644 --- a/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -22,7 +22,7 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; -import java.util.ArrayList; +import java.util.List; import java.util.Random; import org.apache.commons.logging.Log; @@ -32,7 +32,8 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.LargeTests; -import org.apache.hadoop.hbase.ipc.HRegionInterface; +import org.apache.hadoop.hbase.client.AdminProtocol; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.junit.After; @@ -42,17 +43,12 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; -import com.google.common.collect.Lists; - @Category(LargeTests.class) public class TestFromClientSide3 { final Log LOG = LogFactory.getLog(getClass()); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static byte[] ROW = Bytes.toBytes("testRow"); private static byte[] FAMILY = Bytes.toBytes("testFamily"); - private static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); - private static byte[] VALUE = Bytes.toBytes("testValue"); private static Random random = new Random(); private static int SLAVES = 3; @@ -108,19 +104,21 @@ public class TestFromClientSide3 { HConnection conn = HConnectionManager.getConnection(TEST_UTIL .getConfiguration()); HRegionLocation loc = table.getRegionLocation(row, true); - HRegionInterface server = conn.getHRegionConnection(loc.getHostname(), loc + AdminProtocol server = conn.getAdmin(loc.getHostname(), loc .getPort()); byte[] regName = loc.getRegionInfo().getRegionName(); for (int i = 0; i < nFlushes; i++) { randomCFPuts(table, row, family, nPuts); - int sfCount = server.getStoreFileList(regName, FAMILY).size(); + List sf = ProtobufUtil.getStoreFiles(server, regName, FAMILY); + int sfCount = sf.size(); // TODO: replace this api with a synchronous flush after HBASE-2949 admin.flush(table.getTableName()); // synchronously poll wait for a new storefile to appear (flush happened) - while (server.getStoreFileList(regName, FAMILY).size() == sfCount) { + while (ProtobufUtil.getStoreFiles( + server, regName, FAMILY).size() == sfCount) { Thread.sleep(40); } } @@ -154,9 +152,10 @@ public class TestFromClientSide3 { // Verify we have multiple store files. HRegionLocation loc = hTable.getRegionLocation(row, true); byte[] regionName = loc.getRegionInfo().getRegionName(); - HRegionInterface server = connection.getHRegionConnection( - loc.getHostname(), loc.getPort()); - assertTrue(server.getStoreFileList(regionName, FAMILY).size() > 1); + AdminProtocol server = connection.getAdmin( + loc.getHostname(), loc.getPort()); + assertTrue(ProtobufUtil.getStoreFiles( + server, regionName, FAMILY).size() > 1); // Issue a compaction request admin.compact(TABLE); @@ -167,16 +166,17 @@ public class TestFromClientSide3 { loc = hTable.getRegionLocation(row, true); if (!loc.getRegionInfo().isOffline()) { regionName = loc.getRegionInfo().getRegionName(); - server = connection.getHRegionConnection(loc.getHostname(), loc - .getPort()); - if (server.getStoreFileList(regionName, FAMILY).size() <= 1) { + server = connection.getAdmin(loc.getHostname(), loc.getPort()); + if (ProtobufUtil.getStoreFiles( + server, regionName, FAMILY).size() <= 1) { break; } } Thread.sleep(40); } // verify the compactions took place and that we didn't just time out - assertTrue(server.getStoreFileList(regionName, FAMILY).size() <= 1); + assertTrue(ProtobufUtil.getStoreFiles( + server, regionName, FAMILY).size() <= 1); // change the compaction.min config option for this table to 5 LOG.info("hbase.hstore.compaction.min should now be 5"); @@ -198,11 +198,11 @@ public class TestFromClientSide3 { // This time, the compaction request should not happen Thread.sleep(10 * 1000); - int sfCount = 0; loc = hTable.getRegionLocation(row, true); regionName = loc.getRegionInfo().getRegionName(); - server = connection.getHRegionConnection(loc.getHostname(), loc.getPort()); - sfCount = server.getStoreFileList(regionName, FAMILY).size(); + server = connection.getAdmin(loc.getHostname(), loc.getPort()); + int sfCount = ProtobufUtil.getStoreFiles( + server, regionName, FAMILY).size(); assertTrue(sfCount > 1); // change an individual CF's config option to 2 & online schema update @@ -225,9 +225,10 @@ public class TestFromClientSide3 { loc = hTable.getRegionLocation(row, true); regionName = loc.getRegionInfo().getRegionName(); try { - server = connection.getHRegionConnection(loc.getHostname(), loc + server = connection.getAdmin(loc.getHostname(), loc .getPort()); - if (server.getStoreFileList(regionName, FAMILY).size() < sfCount) { + if (ProtobufUtil.getStoreFiles( + server, regionName, FAMILY).size() < sfCount) { break; } } catch (Exception e) { @@ -236,7 +237,8 @@ public class TestFromClientSide3 { Thread.sleep(40); } // verify the compaction took place and that we didn't just time out - assertTrue(server.getStoreFileList(regionName, FAMILY).size() < sfCount); + assertTrue(ProtobufUtil.getStoreFiles( + server, regionName, FAMILY).size() < sfCount); // Finally, ensure that we can remove a custom config value after we made it LOG.info("Removing CF config value"); diff --git a/src/test/java/org/apache/hadoop/hbase/client/TestHTableUtil.java b/src/test/java/org/apache/hadoop/hbase/client/TestHTableUtil.java index 24f878e7d74..7b0b74d68a7 100644 --- a/src/test/java/org/apache/hadoop/hbase/client/TestHTableUtil.java +++ b/src/test/java/org/apache/hadoop/hbase/client/TestHTableUtil.java @@ -42,7 +42,6 @@ import org.junit.experimental.categories.Category; public class TestHTableUtil { final Log LOG = LogFactory.getLog(getClass()); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static byte [] ROW = Bytes.toBytes("testRow"); private static byte [] FAMILY = Bytes.toBytes("testFamily"); private static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); private static byte [] VALUE = Bytes.toBytes("testValue"); diff --git a/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java b/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java index 301ee27f1a9..3acb988df89 100644 --- a/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java +++ b/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java @@ -42,13 +42,13 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.ServerCallable; -import org.apache.hadoop.hbase.ipc.HRegionInterface; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad; import org.apache.hadoop.hbase.util.Bytes; @@ -60,6 +60,8 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; import com.google.common.collect.Multimap; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; /** * Test cases for the atomic load error handling of the bulk load functionality. @@ -259,7 +261,7 @@ public class TestLoadIncrementalHFilesSplitRecovery { } private HConnection getMockedConnection(final Configuration conf) - throws IOException { + throws IOException, ServiceException { HConnection c = Mockito.mock(HConnection.class); Mockito.when(c.getConfiguration()).thenReturn(conf); Mockito.doNothing().when(c).close(); @@ -271,10 +273,10 @@ public class TestLoadIncrementalHFilesSplitRecovery { thenReturn(loc); Mockito.when(c.locateRegion((byte[]) Mockito.any(), (byte[]) Mockito.any())). thenReturn(loc); - HRegionInterface hri = Mockito.mock(HRegionInterface.class); - Mockito.when(hri.bulkLoadHFiles(Mockito.anyList(), (byte [])Mockito.any())). - thenThrow(new IOException("injecting bulk load error")); - Mockito.when(c.getHRegionConnection(Mockito.anyString(), Mockito.anyInt())). + ClientProtocol hri = Mockito.mock(ClientProtocol.class); + Mockito.when(hri.bulkLoadHFile((RpcController)Mockito.any(), (BulkLoadHFileRequest)Mockito.any())). + thenThrow(new ServiceException(new IOException("injecting bulk load error"))); + Mockito.when(c.getClient(Mockito.anyString(), Mockito.anyInt())). thenReturn(hri); return c; } diff --git a/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index a59e15212c6..f3168d1f328 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -27,31 +27,41 @@ import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerInfo; -import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.catalog.CatalogTracker; -import org.apache.hadoop.hbase.client.Append; -import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.AdminProtocol; +import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Increment; -import org.apache.hadoop.hbase.client.MultiAction; -import org.apache.hadoop.hbase.client.MultiResponse; -import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.coprocessor.Exec; -import org.apache.hadoop.hbase.client.coprocessor.ExecResult; -import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; -import org.apache.hadoop.hbase.filter.WritableByteArrayComparable; -import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary; -import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.ipc.ProtocolSignature; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.ClientProtocol; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ExecCoprocessorRequest; @@ -70,14 +80,10 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.UnlockRowResponse import org.apache.hadoop.hbase.regionserver.CompactionRequestor; import org.apache.hadoop.hbase.regionserver.FlushRequester; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.regionserver.RegionServerAccounting; import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException; import org.apache.hadoop.hbase.regionserver.wal.HLog; -import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; @@ -92,7 +98,7 @@ import com.google.protobuf.ServiceException; * {@link #setGetResult(byte[], byte[], Result)} for how to fill the backing data * store that the get pulls from. */ -class MockRegionServer implements HRegionInterface, ClientProtocol, RegionServerServices { +class MockRegionServer implements AdminProtocol, ClientProtocol, RegionServerServices { private final ServerName sn; private final ZooKeeperWatcher zkw; private final Configuration conf; @@ -209,138 +215,12 @@ class MockRegionServer implements HRegionInterface, ClientProtocol, RegionServer return false; } - @Override - public HRegionInfo getRegionInfo(byte[] regionName) { - // Just return this. Calls to getRegionInfo are usually to test connection - // to regionserver does reasonable things so should be safe to return - // anything. - return HRegionInfo.ROOT_REGIONINFO; - } - - @Override - public void flushRegion(byte[] regionName) throws IllegalArgumentException, - IOException { - // TODO Auto-generated method stub - } - - @Override - public void flushRegion(byte[] regionName, long ifOlderThanTS) - throws IllegalArgumentException, IOException { - // TODO Auto-generated method stub - } - - @Override - public long getLastFlushTime(byte[] regionName) { - // TODO Auto-generated method stub - return 0; - } - - @Override - public List getStoreFileList(byte[] regionName, byte[] columnFamily) - throws IllegalArgumentException { - // TODO Auto-generated method stub - return null; - } - - @Override - public List getStoreFileList(byte[] regionName, - byte[][] columnFamilies) throws IllegalArgumentException { - // TODO Auto-generated method stub - return null; - } - - @Override - public List getStoreFileList(byte[] regionName) - throws IllegalArgumentException { - // TODO Auto-generated method stub - return null; - } - - @Override - public Result getClosestRowBefore(byte[] regionName, byte[] row, - byte[] family) throws IOException { - // TODO Auto-generated method stub - return null; - } - - @Override - public Result get(byte[] regionName, Get get) throws IOException { - // TODO Auto-generated method stub - return null; - } - - @Override - public boolean exists(byte[] regionName, Get get) throws IOException { - // TODO Auto-generated method stub - return false; - } - - @Override - public void put(byte[] regionName, Put put) throws IOException { - // TODO Auto-generated method stub - } - - @Override - public int put(byte[] regionName, List puts) throws IOException { - // TODO Auto-generated method stub - return 0; - } - - @Override - public void delete(byte[] regionName, Delete delete) throws IOException { - // TODO Auto-generated method stub - } - - @Override - public int delete(byte[] regionName, List deletes) - throws IOException { - // TODO Auto-generated method stub - return 0; - } - - @Override - public boolean checkAndPut(byte[] regionName, byte[] row, byte[] family, - byte[] qualifier, byte[] value, Put put) throws IOException { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean checkAndDelete(byte[] regionName, byte[] row, byte[] family, - byte[] qualifier, byte[] value, Delete delete) throws IOException { - // TODO Auto-generated method stub - return false; - } - - @Override - public long incrementColumnValue(byte[] regionName, byte[] row, - byte[] family, byte[] qualifier, long amount, boolean writeToWAL) - throws IOException { - // TODO Auto-generated method stub - return 0; - } - - @Override - public Result append(byte[] regionName, Append append) throws IOException { - // TODO Auto-generated method stub - return null; - } - - @Override - public Result increment(byte[] regionName, Increment increment) - throws IOException { - // TODO Auto-generated method stub - return null; - } - - @Override public long openScanner(byte[] regionName, Scan scan) throws IOException { long scannerId = this.random.nextLong(); this.scannersAndOffsets.put(scannerId, new RegionNameAndIndex(regionName)); return scannerId; } - @Override public Result next(long scannerId) throws IOException { RegionNameAndIndex rnai = this.scannersAndOffsets.get(scannerId); int index = rnai.getThenIncrement(); @@ -349,173 +229,16 @@ class MockRegionServer implements HRegionInterface, ClientProtocol, RegionServer return index < results.length? results[index]: null; } - @Override public Result [] next(long scannerId, int numberOfRows) throws IOException { // Just return one result whatever they ask for. Result r = next(scannerId); return r == null? null: new Result [] {r}; } - @Override public void close(final long scannerId) throws IOException { this.scannersAndOffsets.remove(scannerId); } - @Override - public long lockRow(byte[] regionName, byte[] row) throws IOException { - // TODO Auto-generated method stub - return 0; - } - - @Override - public void unlockRow(byte[] regionName, long lockId) throws IOException { - // TODO Auto-generated method stub - - } - - @Override - public List getOnlineRegions() throws IOException { - // TODO Auto-generated method stub - return null; - } - - @Override - public List getOnlineRegions(byte[] tableName) throws IOException { - // TODO Auto-generated method stub - return null; - } - - @Override - public HServerInfo getHServerInfo() throws IOException { - // TODO Auto-generated method stub - return null; - } - - @Override - public MultiResponse multi(MultiAction multi) throws IOException { - // TODO Auto-generated method stub - return null; - } - - @Override - public boolean bulkLoadHFiles(List> familyPaths, - byte[] regionName) throws IOException { - // TODO Auto-generated method stub - return false; - } - - @Override - public RegionOpeningState openRegion(HRegionInfo region) throws IOException { - // TODO Auto-generated method stub - return null; - } - - @Override - public RegionOpeningState openRegion(HRegionInfo region, - int versionOfOfflineNode) throws IOException { - // TODO Auto-generated method stub - return null; - } - - @Override - public void openRegions(List regions) throws IOException { - // TODO Auto-generated method stub - } - - @Override - public boolean closeRegion(HRegionInfo region) throws IOException { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean closeRegion(HRegionInfo region, int versionOfClosingNode) - throws IOException { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean closeRegion(HRegionInfo region, boolean zk) - throws IOException { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean closeRegion(byte[] encodedRegionName, boolean zk) - throws IOException { - // TODO Auto-generated method stub - return false; - } - - @Override - public void flushRegion(HRegionInfo regionInfo) - throws NotServingRegionException, IOException { - // TODO Auto-generated method stub - } - - @Override - public void splitRegion(HRegionInfo regionInfo) - throws NotServingRegionException, IOException { - // TODO Auto-generated method stub - } - - @Override - public void splitRegion(HRegionInfo regionInfo, byte[] splitPoint) - throws NotServingRegionException, IOException { - // TODO Auto-generated method stub - } - - @Override - public void compactRegion(HRegionInfo regionInfo, boolean major) - throws NotServingRegionException, IOException { - // TODO Auto-generated method stub - } - - @Override - public void replicateLogEntries(Entry[] entries) throws IOException { - // TODO Auto-generated method stub - } - - @Override - public ExecResult execCoprocessor(byte[] regionName, Exec call) - throws IOException { - // TODO Auto-generated method stub - return null; - } - - @Override - public boolean checkAndPut(byte[] regionName, byte[] row, byte[] family, - byte[] qualifier, CompareOp compareOp, - WritableByteArrayComparable comparator, Put put) throws IOException { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean checkAndDelete(byte[] regionName, byte[] row, byte[] family, - byte[] qualifier, CompareOp compareOp, - WritableByteArrayComparable comparator, Delete delete) - throws IOException { - // TODO Auto-generated method stub - return false; - } - - @Override - public List getBlockCacheColumnFamilySummaries() - throws IOException { - // TODO Auto-generated method stub - return null; - } - - @Override - public byte[][] rollHLogWriter() throws IOException, - FailedLogCloseException { - // TODO Auto-generated method stub - return null; - } - @Override public void stop(String why) { this.zkw.close(); @@ -612,11 +335,6 @@ class MockRegionServer implements HRegionInterface, ClientProtocol, RegionServer return null; } - @Override - public void mutateRow(byte[] regionName, RowMutations rm) throws IOException { - // TODO Auto-generated method stub - } - @Override public GetResponse get(RpcController controller, GetRequest request) throws ServiceException { @@ -699,4 +417,95 @@ class MockRegionServer implements HRegionInterface, ClientProtocol, RegionServer // TODO Auto-generated method stub return null; } + + @Override + public GetRegionInfoResponse getRegionInfo(RpcController controller, + GetRegionInfoRequest request) throws ServiceException { + GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); + builder.setRegionInfo(ProtobufUtil.toRegionInfo(HRegionInfo.ROOT_REGIONINFO)); + return builder.build(); + } + + @Override + public GetStoreFileResponse getStoreFile(RpcController controller, + GetStoreFileRequest request) throws ServiceException { + // TODO Auto-generated method stub + return null; + } + + @Override + public GetOnlineRegionResponse getOnlineRegion(RpcController controller, + GetOnlineRegionRequest request) throws ServiceException { + // TODO Auto-generated method stub + return null; + } + + @Override + public OpenRegionResponse openRegion(RpcController controller, + OpenRegionRequest request) throws ServiceException { + // TODO Auto-generated method stub + return null; + } + + @Override + public CloseRegionResponse closeRegion(RpcController controller, + CloseRegionRequest request) throws ServiceException { + // TODO Auto-generated method stub + return null; + } + + @Override + public FlushRegionResponse flushRegion(RpcController controller, + FlushRegionRequest request) throws ServiceException { + // TODO Auto-generated method stub + return null; + } + + @Override + public SplitRegionResponse splitRegion(RpcController controller, + SplitRegionRequest request) throws ServiceException { + // TODO Auto-generated method stub + return null; + } + + @Override + public CompactRegionResponse compactRegion(RpcController controller, + CompactRegionRequest request) throws ServiceException { + // TODO Auto-generated method stub + return null; + } + + @Override + public ReplicateWALEntryResponse replicateWALEntry(RpcController controller, + ReplicateWALEntryRequest request) throws ServiceException { + // TODO Auto-generated method stub + return null; + } + + @Override + public RollWALWriterResponse rollWALWriter(RpcController controller, + RollWALWriterRequest request) throws ServiceException { + // TODO Auto-generated method stub + return null; + } + + @Override + public GetServerInfoResponse getServerInfo(RpcController controller, + GetServerInfoRequest request) throws ServiceException { + // TODO Auto-generated method stub + return null; + } + + @Override + public StopServerResponse stopServer(RpcController controller, + StopServerRequest request) throws ServiceException { + // TODO Auto-generated method stub + return null; + } + + @Override + public List getOnlineRegions(byte[] tableName) throws IOException { + // TODO Auto-generated method stub + return null; + } } \ No newline at end of file diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java b/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java index df57ec124ff..36046f8d4d5 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; @@ -48,7 +49,6 @@ import org.apache.hadoop.hbase.executor.EventHandler.EventType; import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType; import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.ClientProtocol; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index cedf31e3544..1020374352e 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -47,15 +47,15 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.client.AdminProtocol; +import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.io.Reference; -import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.ClientProtocol; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse; import org.apache.hadoop.hbase.regionserver.Store; @@ -93,12 +93,12 @@ public class TestCatalogJanitor { } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); } - // Mock an HConnection and a HRegionInterface implementation. Have the + // Mock an HConnection and a AdminProtocol implementation. Have the // HConnection return the HRI. Have the HRI return a few mocked up responses // to make our test work. this.connection = HConnectionTestingUtility.getMockedConnectionAndDecorate(this.c, - Mockito.mock(HRegionInterface.class), ri, + Mockito.mock(AdminProtocol.class), ri, new ServerName("example.org,12345,6789"), HRegionInfo.FIRST_META_REGIONINFO); // Set hbase.rootdir into test dir. @@ -106,7 +106,7 @@ public class TestCatalogJanitor { Path rootdir = fs.makeQualified(new Path(this.c.get(HConstants.HBASE_DIR))); this.c.set(HConstants.HBASE_DIR, rootdir.toString()); this.ct = Mockito.mock(CatalogTracker.class); - HRegionInterface hri = Mockito.mock(HRegionInterface.class); + AdminProtocol hri = Mockito.mock(AdminProtocol.class); Mockito.when(this.ct.getConnection()).thenReturn(this.connection); Mockito.when(ct.waitForMetaServerConnectionDefault()).thenReturn(hri); } diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java index 3122b159224..ed6a0933488 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java @@ -23,7 +23,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.catalog.MetaReader; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventHandler.EventHandlerListener; @@ -31,7 +30,6 @@ import org.apache.hadoop.hbase.executor.EventHandler.EventType; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import java.io.IOException; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index c0ac12ca5e3..e4e7cebb75d 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -68,7 +68,6 @@ import org.apache.hadoop.hbase.filter.FilterList; import org.apache.hadoop.hbase.filter.NullComparator; import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; -import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index d0cad45a429..b5e43e06bc7 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext; +import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; @@ -39,8 +40,8 @@ import org.apache.hadoop.hbase.client.ServerCallable; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; @@ -164,9 +165,12 @@ public class TestHRegionServerBulkLoad { public Void call() throws Exception { LOG.debug("compacting " + location + " for row " + Bytes.toStringBinary(row)); - HRegionInterface server = connection.getHRegionConnection( + AdminProtocol server = connection.getAdmin( location.getHostname(), location.getPort()); - server.compactRegion(location.getRegionInfo(), true); + CompactRegionRequest request = + RequestConverter.buildCompactRegionRequest( + location.getRegionInfo().getRegionName(), true); + server.compactRegion(null, request); numCompactions.incrementAndGet(); return null; } diff --git a/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index 6b64f10f033..4314572e16b 100644 --- a/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ b/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; @@ -59,7 +60,7 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.executor.EventHandler.EventType; import org.apache.hadoop.hbase.executor.RegionTransitionData; -import org.apache.hadoop.hbase.ipc.HRegionInterface; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE; @@ -400,19 +401,19 @@ public class TestHBaseFsck { /** * Get region info from local cluster. */ - Map> getDeployedHRIs(HBaseAdmin admin) - throws IOException { + Map> getDeployedHRIs( + final HBaseAdmin admin) throws IOException { ClusterStatus status = admin.getMaster().getClusterStatus(); Collection regionServers = status.getServers(); Map> mm = new HashMap>(); HConnection connection = admin.getConnection(); for (ServerName hsi : regionServers) { - HRegionInterface server = - connection.getHRegionConnection(hsi.getHostname(), hsi.getPort()); + AdminProtocol server = + connection.getAdmin(hsi.getHostname(), hsi.getPort()); // list all online regions from this region server - List regions = server.getOnlineRegions(); + List regions = ProtobufUtil.getOnlineRegions(server); List regionNames = new ArrayList(); for (HRegionInfo hri : regions) { regionNames.add(hri.getRegionNameAsString());