diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/IpcProtocol.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/IpcProtocol.java deleted file mode 100644 index 3b0c535a34e..00000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/IpcProtocol.java +++ /dev/null @@ -1,32 +0,0 @@ -package org.apache.hadoop.hbase; -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Marker Interface used by ipc. We need a means of referring to - * ipc "protocols" generically. For example, we need to tell an rpc - * server the "protocols" it implements and it helps if all protocols - * implement a common 'type'. That is what this Interface is used for. - */ -// This Interface replaces the old VersionedProtocol Interface. Rather -// than redo a bunch of code its removal, instead we put in place this -// Interface and change all VP references to Protocol references. - -// It is moved up here to top-level because it is ugly having members -// of super packages reach down into subpackages. -public interface IpcProtocol {} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java deleted file mode 100644 index 2e4b76c063d..00000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterAdminProtocol.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService; -import org.apache.hadoop.hbase.security.KerberosInfo; -import org.apache.hadoop.hbase.security.TokenInfo; - -/** - * Protocol that a client uses to communicate with the Master (for admin purposes). - */ -@KerberosInfo( - serverPrincipal = "hbase.master.kerberos.principal") -@TokenInfo("HBASE_AUTH_TOKEN") -@InterfaceAudience.Private -@InterfaceStability.Evolving -public interface MasterAdminProtocol -extends MasterAdminService.BlockingInterface, MasterProtocol {} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java deleted file mode 100644 index b8c3dff95a0..00000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterMonitorProtocol.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService; -import org.apache.hadoop.hbase.security.KerberosInfo; -import org.apache.hadoop.hbase.security.TokenInfo; - -/** - * Protocol that a client uses to communicate with the Master (for monitoring purposes). - */ -@KerberosInfo( - serverPrincipal = "hbase.master.kerberos.principal") -@TokenInfo("HBASE_AUTH_TOKEN") -@InterfaceAudience.Public -@InterfaceStability.Evolving -public interface MasterMonitorProtocol -extends MasterMonitorService.BlockingInterface, MasterProtocol {} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java index 288068ec305..d9027ed2a3d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RemoteExceptionHandler.java @@ -98,7 +98,6 @@ public class RemoteExceptionHandler { if (t instanceof IOException) { i = (IOException) t; - } else { i = new IOException("server error"); i.initCause(t); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java index 46b85f1085d..4b4856249f3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java @@ -24,7 +24,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HTable; @@ -32,6 +31,7 @@ import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.exceptions.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -265,7 +265,7 @@ public class CatalogTracker { * @throws IOException * @deprecated Use #getMetaServerConnection(long) */ - public AdminProtocol waitForMetaServerConnection(long timeout) + public AdminService.BlockingInterface waitForMetaServerConnection(long timeout) throws InterruptedException, NotAllMetaRegionsOnlineException, IOException { return getMetaServerConnection(timeout); } @@ -281,7 +281,7 @@ public class CatalogTracker { * @throws NotAllMetaRegionsOnlineException if timed out waiting * @throws IOException */ - AdminProtocol getMetaServerConnection(long timeout) + AdminService.BlockingInterface getMetaServerConnection(long timeout) throws InterruptedException, NotAllMetaRegionsOnlineException, IOException { return getCachedConnection(waitForMeta(timeout)); } @@ -313,14 +313,14 @@ public class CatalogTracker { * invocation, or may be null. * @throws IOException */ - private AdminProtocol getCachedConnection(ServerName sn) + private AdminService.BlockingInterface getCachedConnection(ServerName sn) throws IOException { if (sn == null) { return null; } - AdminProtocol protocol = null; + AdminService.BlockingInterface service = null; try { - protocol = connection.getAdmin(sn); + service = connection.getAdmin(sn); } catch (RetriesExhaustedException e) { if (e.getCause() != null && e.getCause() instanceof ConnectException) { // Catch this; presume it means the cached connection has gone bad. @@ -349,7 +349,7 @@ public class CatalogTracker { } } - return protocol; + return service; } /** @@ -367,7 +367,7 @@ public class CatalogTracker { // rather than have to pass it in. Its made awkward by the fact that the // HRI is likely a proxy against remote server so the getServerName needs // to be fixed to go to a local method or to a cache before we can do this. - private boolean verifyRegionLocation(AdminProtocol hostingServer, + private boolean verifyRegionLocation(AdminService.BlockingInterface hostingServer, final ServerName address, final byte [] regionName) throws IOException { if (hostingServer == null) { @@ -411,9 +411,9 @@ public class CatalogTracker { */ public boolean verifyMetaRegionLocation(final long timeout) throws InterruptedException, IOException { - AdminProtocol connection = null; + AdminService.BlockingInterface service = null; try { - connection = waitForMetaServerConnection(timeout); + service = waitForMetaServerConnection(timeout); } catch (NotAllMetaRegionsOnlineException e) { // Pass } catch (ServerNotRunningYetException e) { @@ -421,8 +421,8 @@ public class CatalogTracker { } catch (UnknownHostException e) { // Pass -- server name doesn't resolve so it can't be assigned anything. } - return (connection == null)? false: - verifyRegionLocation(connection, + return (service == null)? false: + verifyRegionLocation(service, this.metaRegionTracker.getMetaRegionLocation(), META_REGION_NAME); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java deleted file mode 100644 index 7a43451791a..00000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminProtocol.java +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.IpcProtocol; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.security.KerberosInfo; -import org.apache.hadoop.hbase.security.TokenInfo; - -/** - * Protocol that a HBase client uses to communicate with a region server. - */ -@KerberosInfo( - serverPrincipal = "hbase.regionserver.kerberos.principal") -@TokenInfo("HBASE_AUTH_TOKEN") -@InterfaceAudience.Private -public interface AdminProtocol -extends AdminService.BlockingInterface, IpcProtocol {} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java deleted file mode 100644 index 16ae40ce4d8..00000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientProtocol.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.IpcProtocol; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; -import org.apache.hadoop.hbase.security.KerberosInfo; -import org.apache.hadoop.hbase.security.TokenInfo; - -/** - * Protocol that a HBase client uses to communicate with a region server. - */ -@KerberosInfo( - serverPrincipal = "hbase.regionserver.kerberos.principal") -@TokenInfo("HBASE_AUTH_TOKEN") -@InterfaceAudience.Public -@InterfaceStability.Evolving -public interface ClientProtocol -extends ClientService.BlockingInterface, IpcProtocol {} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java index eb7f0c36230..160cba7add1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java @@ -318,8 +318,8 @@ public class ClientScanner extends AbstractClientScanner { if (retryAfterOutOfOrderException) { retryAfterOutOfOrderException = false; } else { - throw new DoNotRetryIOException("Failed after retry" - + ", it could be cause by rpc timeout", e); + throw new DoNotRetryIOException("Failed after retry of " + + "OutOfOrderScannerNextException: was there a rpc timeout?", e); } } // Clear region diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index f8c3ec63b01..1e19a454456 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; @@ -78,6 +79,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRespo import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; @@ -570,7 +572,7 @@ public class HBaseAdmin implements Abortable, Closeable { firstMetaServer.getRegionInfo().getRegionName(), scan, 1, true); Result[] values = null; // Get a batch at a time. - ClientProtocol server = connection.getClient(firstMetaServer.getServerName()); + ClientService.BlockingInterface server = connection.getClient(firstMetaServer.getServerName()); try { ScanResponse response = server.scan(null, request); values = ResponseConverter.getResults(response); @@ -583,7 +585,7 @@ public class HBaseAdmin implements Abortable, Closeable { if (values == null || values.length == 0) { tableExists = false; GetTableDescriptorsResponse htds; - MasterMonitorKeepAliveConnection master = connection.getKeepAliveMasterMonitor(); + MasterMonitorKeepAliveConnection master = connection.getKeepAliveMasterMonitorService(); try { GetTableDescriptorsRequest req = RequestConverter.buildGetTableDescriptorsRequest(null); @@ -607,7 +609,7 @@ public class HBaseAdmin implements Abortable, Closeable { if(tries == numRetries - 1) { // no more tries left if (ex instanceof RemoteException) { throw ((RemoteException) ex).unwrapRemoteException(); - }else { + } else { throw ex; } } @@ -1221,7 +1223,7 @@ public class HBaseAdmin implements Abortable, Closeable { "The servername cannot be null or empty."); } ServerName sn = new ServerName(serverName); - AdminProtocol admin = this.connection.getAdmin(sn); + AdminService.BlockingInterface admin = this.connection.getAdmin(sn); // Close the region without updating zk state. CloseRegionRequest request = RequestConverter.buildCloseRegionRequest(encodedRegionName, false); @@ -1246,8 +1248,7 @@ public class HBaseAdmin implements Abortable, Closeable { */ public void closeRegion(final ServerName sn, final HRegionInfo hri) throws IOException { - AdminProtocol admin = - this.connection.getAdmin(sn); + AdminService.BlockingInterface admin = this.connection.getAdmin(sn); // Close the region without updating zk state. ProtobufUtil.closeRegion(admin, hri.getRegionName(), false); } @@ -1257,8 +1258,7 @@ public class HBaseAdmin implements Abortable, Closeable { */ public List getOnlineRegions( final ServerName sn) throws IOException { - AdminProtocol admin = - this.connection.getAdmin(sn); + AdminService.BlockingInterface admin = this.connection.getAdmin(sn); return ProtobufUtil.getOnlineRegions(admin); } @@ -1320,8 +1320,7 @@ public class HBaseAdmin implements Abortable, Closeable { private void flush(final ServerName sn, final HRegionInfo hri) throws IOException { - AdminProtocol admin = - this.connection.getAdmin(sn); + AdminService.BlockingInterface admin = this.connection.getAdmin(sn); FlushRegionRequest request = RequestConverter.buildFlushRegionRequest(hri.getRegionName()); try { @@ -1490,8 +1489,7 @@ public class HBaseAdmin implements Abortable, Closeable { private void compact(final ServerName sn, final HRegionInfo hri, final boolean major, final byte [] family) throws IOException { - AdminProtocol admin = - this.connection.getAdmin(sn); + AdminService.BlockingInterface admin = this.connection.getAdmin(sn); CompactRegionRequest request = RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family); try { @@ -1518,10 +1516,11 @@ public class HBaseAdmin implements Abortable, Closeable { */ public void move(final byte [] encodedRegionName, final byte [] destServerName) throws HBaseIOException, MasterNotRunningException, ZooKeeperConnectionException { - MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService(); try { - MoveRegionRequest request = RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName); - master.moveRegion(null,request); + MoveRegionRequest request = + RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName); + stub.moveRegion(null,request); } catch (ServiceException se) { IOException ioe = ProtobufUtil.getRemoteException(se); if (ioe instanceof HBaseIOException) { @@ -1530,9 +1529,8 @@ public class HBaseAdmin implements Abortable, Closeable { LOG.error("Unexpected exception: " + se + " from calling HMaster.moveRegion"); } catch (DeserializationException de) { LOG.error("Could not parse destination server name: " + de); - } - finally { - master.close(); + } finally { + stub.close(); } } @@ -1587,7 +1585,7 @@ public class HBaseAdmin implements Abortable, Closeable { */ public void offline(final byte [] regionName) throws IOException { - MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdminService(); try { master.offlineRegion(null,RequestConverter.buildOfflineRegionRequest(regionName)); } catch (ServiceException se) { @@ -1605,11 +1603,11 @@ public class HBaseAdmin implements Abortable, Closeable { */ public boolean setBalancerRunning(final boolean on, final boolean synchronous) throws MasterNotRunningException, ZooKeeperConnectionException { - MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService(); try { SetBalancerRunningRequest req = RequestConverter.buildSetBalancerRunningRequest(on, synchronous); - return master.setBalancerRunning(null, req).getPrevBalanceValue(); + return stub.setBalancerRunning(null, req).getPrevBalanceValue(); } catch (ServiceException se) { IOException ioe = ProtobufUtil.getRemoteException(se); if (ioe instanceof MasterNotRunningException) { @@ -1623,7 +1621,7 @@ public class HBaseAdmin implements Abortable, Closeable { // break interface by adding additional exception type. throw new MasterNotRunningException("Unexpected exception when calling balanceSwitch",se); } finally { - master.close(); + stub.close(); } } @@ -1635,11 +1633,11 @@ public class HBaseAdmin implements Abortable, Closeable { */ public boolean balancer() throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException { - MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService(); try { - return master.balance(null,RequestConverter.buildBalanceRequest()).getBalancerRan(); + return stub.balance(null,RequestConverter.buildBalanceRequest()).getBalancerRan(); } finally { - master.close(); + stub.close(); } } @@ -1652,12 +1650,12 @@ public class HBaseAdmin implements Abortable, Closeable { */ public boolean enableCatalogJanitor(boolean enable) throws ServiceException, MasterNotRunningException { - MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService(); try { - return master.enableCatalogJanitor(null, + return stub.enableCatalogJanitor(null, RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue(); } finally { - master.close(); + stub.close(); } } @@ -1668,12 +1666,12 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws MasterNotRunningException */ public int runCatalogScan() throws ServiceException, MasterNotRunningException { - MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService(); try { - return master.runCatalogScan(null, + return stub.runCatalogScan(null, RequestConverter.buildCatalogScanRequest()).getScanResult(); } finally { - master.close(); + stub.close(); } } @@ -1683,12 +1681,12 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws org.apache.hadoop.hbase.exceptions.MasterNotRunningException */ public boolean isCatalogJanitorEnabled() throws ServiceException, MasterNotRunningException { - MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdmin(); + MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService(); try { - return master.isCatalogJanitorEnabled(null, + return stub.isCatalogJanitorEnabled(null, RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue(); } finally { - master.close(); + stub.close(); } } @@ -1704,7 +1702,7 @@ public class HBaseAdmin implements Abortable, Closeable { final byte[] encodedNameOfRegionB, final boolean forcible) throws IOException { MasterAdminKeepAliveConnection master = connection - .getKeepAliveMasterAdmin(); + .getKeepAliveMasterAdminService(); try { DispatchMergingRegionsRequest request = RequestConverter .buildDispatchMergingRegionsRequest(encodedNameOfRegionA, @@ -1800,8 +1798,7 @@ public class HBaseAdmin implements Abortable, Closeable { private void split(final ServerName sn, final HRegionInfo hri, byte[] splitPoint) throws IOException { - AdminProtocol admin = - this.connection.getAdmin(sn); + AdminService.BlockingInterface admin = this.connection.getAdmin(sn); ProtobufUtil.split(admin, hri, splitPoint); } @@ -1924,7 +1921,7 @@ public class HBaseAdmin implements Abortable, Closeable { throws IOException { String hostname = Addressing.parseHostname(hostnamePort); int port = Addressing.parsePort(hostnamePort); - AdminProtocol admin = + AdminService.BlockingInterface admin = this.connection.getAdmin(new ServerName(hostname, port, 0)); StopServerRequest request = RequestConverter.buildStopServerRequest( "Called by admin client " + this.connection.toString()); @@ -2067,7 +2064,7 @@ public class HBaseAdmin implements Abortable, Closeable { public synchronized byte[][] rollHLogWriter(String serverName) throws IOException, FailedLogCloseException { ServerName sn = new ServerName(serverName); - AdminProtocol admin = this.connection.getAdmin(sn); + AdminService.BlockingInterface admin = this.connection.getAdmin(sn); RollWALWriterRequest request = RequestConverter.buildRollWALWriterRequest(); try { RollWALWriterResponse response = admin.rollWALWriter(null, request); @@ -2127,8 +2124,7 @@ public class HBaseAdmin implements Abortable, Closeable { throw new NoServerForRegionException(Bytes.toStringBinary(tableNameOrRegionName)); } else { ServerName sn = regionServerPair.getSecond(); - AdminProtocol admin = - this.connection.getAdmin(sn); + AdminService.BlockingInterface admin = this.connection.getAdmin(sn); GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest( regionServerPair.getFirst().getRegionName(), true); GetRegionInfoResponse response = admin.getRegionInfo(null, request); @@ -2143,8 +2139,7 @@ public class HBaseAdmin implements Abortable, Closeable { if (pair.getSecond() == null) continue; try { ServerName sn = pair.getSecond(); - AdminProtocol admin = - this.connection.getAdmin(sn); + AdminService.BlockingInterface admin = this.connection.getAdmin(sn); GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest( pair.getFirst().getRegionName(), true); GetRegionInfoResponse response = admin.getRegionInfo(null, request); @@ -2607,7 +2602,7 @@ public class HBaseAdmin implements Abortable, Closeable { * Create a {@link MasterAdminCallable} to use it. */ private V execute(MasterAdminCallable function) throws IOException { - function.masterAdmin = connection.getKeepAliveMasterAdmin(); + function.masterAdmin = connection.getKeepAliveMasterAdminService(); try { return executeCallable(function); } finally { @@ -2621,7 +2616,7 @@ public class HBaseAdmin implements Abortable, Closeable { * Create a {@link MasterAdminCallable} to use it. */ private V execute(MasterMonitorCallable function) throws IOException { - function.masterMonitor = connection.getKeepAliveMasterMonitor(); + function.masterMonitor = connection.getKeepAliveMasterMonitorService(); try { return executeCallable(function); } finally { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectable.java new file mode 100644 index 00000000000..21485d3b528 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectable.java @@ -0,0 +1,48 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; + +/** + * This class makes it convenient for one to execute a command in the context + * of a {@link HConnection} instance based on the given {@link Configuration}. + * + *

+ * If you find yourself wanting to use a {@link HConnection} for a relatively + * short duration of time, and do not want to deal with the hassle of creating + * and cleaning up that resource, then you should consider using this + * convenience class. + * + * @param + * the return type of the {@link HConnectable#connect(HConnection)} + * method. + */ +public abstract class HConnectable { + public Configuration conf; + + protected HConnectable(Configuration conf) { + this.conf = conf; + } + + public abstract T connect(HConnection connection) throws IOException; +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java index 9c0a0f469b1..2ab9897ea68 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -18,40 +18,43 @@ */ package org.apache.hadoop.hbase.client; +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.ExecutorService; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MasterAdminProtocol; -import org.apache.hadoop.hbase.MasterMonitorProtocol; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.exceptions.MasterNotRunningException; import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import java.util.concurrent.ExecutorService; - /** - * Cluster connection. Hosts a connection to the ZooKeeper ensemble and - * thereafter into the HBase cluster. Knows how to locate regions out on the cluster, + * A cluster connection. Knows how to find the master, locate regions out on the cluster, * keeps a cache of locations and then knows how to recalibrate after they move. - * {@link HConnectionManager} manages instances of this class. + * {@link HConnectionManager} manages instances of this class. This is NOT a connection to a + * particular server but to all servers in the cluster. An implementation takes care of individual + * connections at a lower level. * *

HConnections are used by {@link HTable} mostly but also by * {@link HBaseAdmin}, {@link CatalogTracker}, * and {@link ZooKeeperWatcher}. HConnection instances can be shared. Sharing * is usually what you want because rather than each HConnection instance * having to do its own discovery of regions out on the cluster, instead, all - * clients get to share the one cache of locations. Sharing makes cleanup of - * HConnections awkward. See {@link HConnectionManager} for cleanup - * discussion. + * clients get to share the one cache of locations. {@link HConnectionManager} does the + * sharing for you if you go by it getting connections. Sharing makes cleanup of + * HConnections awkward. See {@link HConnectionManager} for cleanup discussion. * * @see HConnectionManager */ @@ -213,29 +216,14 @@ public interface HConnection extends Abortable, Closeable { final boolean offlined) throws IOException; /** - * Returns a {@link MasterAdminProtocol} to the active master + * Returns a {@link MasterAdminKeepAliveConnection} to the active master */ - public MasterAdminProtocol getMasterAdmin() throws IOException; + public MasterAdminService.BlockingInterface getMasterAdmin() throws IOException; /** - * Returns an {@link MasterMonitorProtocol} to the active master + * Returns an {@link MasterMonitorKeepAliveConnection} to the active master */ - public MasterMonitorProtocol getMasterMonitor() throws IOException; - - - /** - * Establishes a connection to the region server at the specified address. - * @param hostname RegionServer hostname - * @param port RegionServer port - * @return proxy for HRegionServer - * @throws IOException if a remote or network exception occurs - * @deprecated - use @link {#getAdmin(final ServerName serverName)} which takes into account - * the startCode - */ - @Deprecated - public AdminProtocol getAdmin(final String hostname, final int port) - throws IOException; - + public MasterMonitorService.BlockingInterface getMasterMonitor() throws IOException; /** * Establishes a connection to the region server at the specified address. @@ -243,27 +231,10 @@ public interface HConnection extends Abortable, Closeable { * @return proxy for HRegionServer * @throws IOException if a remote or network exception occurs */ - public AdminProtocol getAdmin(final ServerName serverName) - throws IOException; + public AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException; /** - * Establishes a connection to the region server at the specified address, and return - * a region client protocol. - * - * @param hostname RegionServer hostname - * @param port RegionServer port - * @return ClientProtocol proxy for RegionServer - * @throws IOException if a remote or network exception occurs - * @deprecated - use @link {#getClient(final ServerName serverName)} which takes into account - * the startCode - */ - @Deprecated - public ClientProtocol getClient(final String hostname, final int port) - throws IOException; - - - /** - * Establishes a connection to the region server at the specified address, and return + * Establishes a connection to the region server at the specified address, and returns * a region client protocol. * * @param serverName @@ -271,30 +242,17 @@ public interface HConnection extends Abortable, Closeable { * @throws IOException if a remote or network exception occurs * */ - public ClientProtocol getClient(final ServerName serverName) throws IOException; - - /** - * Establishes a connection to the region server at the specified address. - * @param hostname RegionServer hostname - * @param port RegionServer port - * @param getMaster - do we check if master is alive - * @return proxy for HRegionServer - * @throws IOException if a remote or network exception occurs - * @deprecated use @link {#getAdmin(final ServerName serverName, boolean getMaster)} - * which takes into account the startCode. - */ - @Deprecated - public AdminProtocol getAdmin(final String hostname, final int port, boolean getMaster) - throws IOException; + public ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException; /** * Establishes a connection to the region server at the specified address. * @param serverName - * @param getMaster - do we check if master is alive + * @param getMaster do we check if master is alive * @return proxy for HRegionServer * @throws IOException if a remote or network exception occurs + * @deprecated You can pass master flag but nothing special is done. */ - public AdminProtocol getAdmin(final ServerName serverName, boolean getMaster) + public AdminService.BlockingInterface getAdmin(final ServerName serverName, boolean getMaster) throws IOException; /** @@ -417,13 +375,14 @@ public interface HConnection extends Abortable, Closeable { public void clearCaches(final ServerName sn); /** - * This function allows HBaseAdminProtocol and potentially others to get a shared MasterMonitor + * This function allows HBaseAdmin and potentially others to get a shared MasterMonitor * connection. * @return The shared instance. Never returns null. * @throws MasterNotRunningException */ - public MasterMonitorKeepAliveConnection getKeepAliveMasterMonitor() - throws MasterNotRunningException; + // TODO: Why is this in the public interface when the returned type is shutdown package access? + public MasterMonitorKeepAliveConnection getKeepAliveMasterMonitorService() + throws MasterNotRunningException; /** * This function allows HBaseAdmin and potentially others to get a shared MasterAdminProtocol @@ -431,7 +390,8 @@ public interface HConnection extends Abortable, Closeable { * @return The shared instance. Never returns null. * @throws MasterNotRunningException */ - public MasterAdminKeepAliveConnection getKeepAliveMasterAdmin() throws MasterNotRunningException; + // TODO: Why is this in the public interface when the returned type is shutdown package access? + public MasterAdminKeepAliveConnection getKeepAliveMasterAdminService() throws MasterNotRunningException; /** * @param serverName @@ -439,4 +399,3 @@ public interface HConnection extends Abortable, Closeable { */ public boolean isDeadServer(ServerName serverName); } - diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionKey.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionKey.java new file mode 100644 index 00000000000..ac6914e7ef4 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionKey.java @@ -0,0 +1,140 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.security.User; + +/** + * Denotes a unique key to an {@link HConnection} instance. + * + * In essence, this class captures the properties in {@link Configuration} + * that may be used in the process of establishing a connection. In light of + * that, if any new such properties are introduced into the mix, they must be + * added to the {@link HConnectionKey#properties} list. + * + */ +class HConnectionKey { + final static String[] CONNECTION_PROPERTIES = new String[] { + HConstants.ZOOKEEPER_QUORUM, HConstants.ZOOKEEPER_ZNODE_PARENT, + HConstants.ZOOKEEPER_CLIENT_PORT, + HConstants.ZOOKEEPER_RECOVERABLE_WAITTIME, + HConstants.HBASE_CLIENT_PAUSE, HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.HBASE_RPC_TIMEOUT_KEY, + HConstants.HBASE_CLIENT_PREFETCH_LIMIT, + HConstants.HBASE_META_SCANNER_CACHING, + HConstants.HBASE_CLIENT_INSTANCE_ID }; + + private Map properties; + private String username; + + HConnectionKey(Configuration conf) { + Map m = new HashMap(); + if (conf != null) { + for (String property : CONNECTION_PROPERTIES) { + String value = conf.get(property); + if (value != null) { + m.put(property, value); + } + } + } + this.properties = Collections.unmodifiableMap(m); + + try { + User currentUser = User.getCurrent(); + if (currentUser != null) { + username = currentUser.getName(); + } + } catch (IOException ioe) { + HConnectionManager.LOG.warn("Error obtaining current user, skipping username in HConnectionKey", ioe); + } + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + if (username != null) { + result = username.hashCode(); + } + for (String property : CONNECTION_PROPERTIES) { + String value = properties.get(property); + if (value != null) { + result = prime * result + value.hashCode(); + } + } + + return result; + } + + + @edu.umd.cs.findbugs.annotations.SuppressWarnings (value="ES_COMPARING_STRINGS_WITH_EQ", + justification="Optimization") + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + HConnectionKey that = (HConnectionKey) obj; + if (this.username != null && !this.username.equals(that.username)) { + return false; + } else if (this.username == null && that.username != null) { + return false; + } + if (this.properties == null) { + if (that.properties != null) { + return false; + } + } else { + if (that.properties == null) { + return false; + } + for (String property : CONNECTION_PROPERTIES) { + String thisValue = this.properties.get(property); + String thatValue = that.properties.get(property); + //noinspection StringEquality + if (thisValue == thatValue) { + continue; + } + if (thisValue == null || !thisValue.equals(thatValue)) { + return false; + } + } + } + return true; + } + + @Override + public String toString() { + return "HConnectionKey{" + + "properties=" + properties + + ", username='" + username + '\'' + + '}'; + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 8bce9afc821..ebaaa9dd83d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -20,15 +20,9 @@ package org.apache.hadoop.hbase.client; import java.io.Closeable; import java.io.IOException; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.lang.reflect.Proxy; import java.lang.reflect.UndeclaredThrowableException; -import java.net.InetSocketAddress; import java.net.SocketException; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -59,12 +53,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.IpcProtocol; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.MasterAdminProtocol; -import org.apache.hadoop.hbase.MasterMonitorProtocol; -import org.apache.hadoop.hbase.MasterProtocol; -import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; @@ -77,26 +66,95 @@ import org.apache.hadoop.hbase.exceptions.RegionOpeningException; import org.apache.hadoop.hbase.exceptions.RegionServerStoppedException; import org.apache.hadoop.hbase.exceptions.TableNotFoundException; import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.ipc.HBaseClientRPC; -import org.apache.hadoop.hbase.ipc.ProtobufRpcClientEngine; -import org.apache.hadoop.hbase.ipc.RpcClientEngine; +import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.SoftValueSortedMap; import org.apache.hadoop.hbase.util.Triple; -import org.apache.hadoop.hbase.zookeeper.*; +import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; +import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker; +import org.apache.hadoop.hbase.zookeeper.ZKClusterId; +import org.apache.hadoop.hbase.zookeeper.ZKTableReadOnly; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.ipc.RemoteException; import org.apache.zookeeper.KeeperException; import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.BlockingRpcChannel; +import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; /** @@ -105,7 +163,8 @@ import com.google.protobuf.ServiceException; * {@link Configuration}; all invocations of {@link #getConnection(Configuration)} * that pass the same {@link Configuration} instance will be returned the same * {@link HConnection} instance (Adding properties to a Configuration - * instance does not change its object identity). Sharing {@link HConnection} + * instance does not change its object identity; for more on how this is done see + * {@link HConnectionKey}). Sharing {@link HConnection} * instances is usually what you want; all clients of the {@link HConnection} * instances share the HConnections' cache of Region locations rather than each * having to discover for itself the location of meta, etc. It makes @@ -116,11 +175,9 @@ import com.google.protobuf.ServiceException; * implemented atop Hadoop RPC and as of this writing, Hadoop RPC does a * connection per cluster-member, exclusively). * - *

But sharing connections - * makes clean up of {@link HConnection} instances a little awkward. Currently, - * clients cleanup by calling - * {@link #deleteConnection(Configuration)}. This will shutdown the - * zookeeper connection the HConnection was using and clean up all + *

But sharing connections makes clean up of {@link HConnection} instances a little awkward. + * Currently, clients cleanup by calling {@link #deleteConnection(Configuration)}. This will + * shutdown the zookeeper connection the HConnection was using and clean up all * HConnection resources as well as stopping proxies to servers out on the * cluster. Not running the cleanup will not end the world; it'll * just stall the closeup some and spew some zookeeper connection failed @@ -150,43 +207,30 @@ import com.google.protobuf.ServiceException; @InterfaceAudience.Public @InterfaceStability.Evolving public class HConnectionManager { + static final Log LOG = LogFactory.getLog(HConnectionManager.class); + + public static final String RETRIES_BY_SERVER_KEY = "hbase.client.retries.by.server"; + // An LRU Map of HConnectionKey -> HConnection (TableServer). All // access must be synchronized. This map is not private because tests // need to be able to tinker with it. - static final Map HBASE_INSTANCES; + static final Map CONNECTION_INSTANCES; - public static final int MAX_CACHED_HBASE_INSTANCES; - - /** Parameter name for what client protocol to use. */ - public static final String CLIENT_PROTOCOL_CLASS = "hbase.clientprotocol.class"; - - /** Default client protocol class name. */ - public static final String DEFAULT_CLIENT_PROTOCOL_CLASS = ClientProtocol.class.getName(); - - /** Parameter name for what admin protocol to use. */ - public static final String REGION_PROTOCOL_CLASS = "hbase.adminprotocol.class"; - - /** Default admin protocol class name. */ - public static final String DEFAULT_ADMIN_PROTOCOL_CLASS = AdminProtocol.class.getName(); - - public static final String RETRIES_BY_SERVER = "hbase.client.retries.by.server"; - - private static final Log LOG = LogFactory.getLog(HConnectionManager.class); + public static final int MAX_CACHED_CONNECTION_INSTANCES; static { // We set instances to one more than the value specified for {@link // HConstants#ZOOKEEPER_MAX_CLIENT_CNXNS}. By default, the zk default max // connections to the ensemble from the one client is 30, so in that case we // should run into zk issues before the LRU hit this value of 31. - MAX_CACHED_HBASE_INSTANCES = HBaseConfiguration.create().getInt( - HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS, - HConstants.DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS) + 1; - HBASE_INSTANCES = new LinkedHashMap( - (int) (MAX_CACHED_HBASE_INSTANCES / 0.75F) + 1, 0.75F, true) { - @Override + MAX_CACHED_CONNECTION_INSTANCES = HBaseConfiguration.create().getInt( + HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS, HConstants.DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS) + 1; + CONNECTION_INSTANCES = new LinkedHashMap( + (int) (MAX_CACHED_CONNECTION_INSTANCES / 0.75F) + 1, 0.75F, true) { + @Override protected boolean removeEldestEntry( Map.Entry eldest) { - return size() > MAX_CACHED_HBASE_INSTANCES; + return size() > MAX_CACHED_CONNECTION_INSTANCES; } }; } @@ -194,31 +238,31 @@ public class HConnectionManager { /* * Non-instantiable. */ - protected HConnectionManager() { + private HConnectionManager() { super(); } /** - * Get the connection that goes with the passed conf - * configuration instance. - * If no current connection exists, method creates a new connection for the - * passed conf instance. + * Get the connection that goes with the passed conf configuration instance. + * If no current connection exists, method creates a new connection and keys it using + * connection-specific properties from the passed {@link Configuration}; see + * {@link HConnectionKey}. * @param conf configuration * @return HConnection object for conf * @throws ZooKeeperConnectionException */ - public static HConnection getConnection(Configuration conf) + public static HConnection getConnection(final Configuration conf) throws IOException { HConnectionKey connectionKey = new HConnectionKey(conf); - synchronized (HBASE_INSTANCES) { - HConnectionImplementation connection = HBASE_INSTANCES.get(connectionKey); + synchronized (CONNECTION_INSTANCES) { + HConnectionImplementation connection = CONNECTION_INSTANCES.get(connectionKey); if (connection == null) { connection = new HConnectionImplementation(conf, true); - HBASE_INSTANCES.put(connectionKey, connection); + CONNECTION_INSTANCES.put(connectionKey, connection); } else if (connection.isClosed()) { HConnectionManager.deleteConnection(connectionKey, true); connection = new HConnectionImplementation(conf, true); - HBASE_INSTANCES.put(connectionKey, connection); + CONNECTION_INSTANCES.put(connectionKey, connection); } connection.incCount(); return connection; @@ -226,11 +270,10 @@ public class HConnectionManager { } /** - * Create a new HConnection instance using the passed conf - * instance. - * Note: This bypasses the usual HConnection life cycle management! - * Use this with caution, the caller is responsible for closing the - * created connection. + * Create a new HConnection instance using the passed conf instance. + *

Note: This bypasses the usual HConnection life cycle management done by + * {@link #getConnection(Configuration)}. Use this with caution, the caller is responsible for + * calling {@link HConnection#close()} on the returned connection instance. * @param conf configuration * @return HConnection object for conf * @throws ZooKeeperConnectionException @@ -241,22 +284,19 @@ public class HConnectionManager { } /** - * Delete connection information for the instance specified by configuration. - * If there are no more references to it, this will then close connection to - * the zookeeper ensemble and let go of all resources. + * Delete connection information for the instance specified by passed configuration. + * If there are no more references to the designated connection connection, this method will + * then close connection to the zookeeper ensemble and let go of all associated resources. * - * @param conf - * configuration whose identity is used to find {@link HConnection} - * instance. + * @param conf configuration whose identity is used to find {@link HConnection} instance. */ public static void deleteConnection(Configuration conf) { deleteConnection(new HConnectionKey(conf), false); } /** - * Delete stale connection information for the instance specified by configuration. - * This will then close connection to - * the zookeeper ensemble and let go of all resources. + * Cleanup a known stale connection. + * This will then close connection to the zookeeper ensemble and let go of all resources. * * @param connection */ @@ -268,22 +308,21 @@ public class HConnectionManager { * Delete information for all connections. */ public static void deleteAllConnections() { - synchronized (HBASE_INSTANCES) { + synchronized (CONNECTION_INSTANCES) { Set connectionKeys = new HashSet(); - connectionKeys.addAll(HBASE_INSTANCES.keySet()); + connectionKeys.addAll(CONNECTION_INSTANCES.keySet()); for (HConnectionKey connectionKey : connectionKeys) { deleteConnection(connectionKey, false); } - HBASE_INSTANCES.clear(); + CONNECTION_INSTANCES.clear(); } } private static void deleteConnection(HConnection connection, boolean staleConnection) { - synchronized (HBASE_INSTANCES) { - for (Entry connectionEntry : HBASE_INSTANCES - .entrySet()) { - if (connectionEntry.getValue() == connection) { - deleteConnection(connectionEntry.getKey(), staleConnection); + synchronized (CONNECTION_INSTANCES) { + for (Entry e: CONNECTION_INSTANCES.entrySet()) { + if (e.getValue() == connection) { + deleteConnection(e.getKey(), staleConnection); break; } } @@ -291,18 +330,17 @@ public class HConnectionManager { } private static void deleteConnection(HConnectionKey connectionKey, boolean staleConnection) { - synchronized (HBASE_INSTANCES) { - HConnectionImplementation connection = HBASE_INSTANCES - .get(connectionKey); + synchronized (CONNECTION_INSTANCES) { + HConnectionImplementation connection = CONNECTION_INSTANCES.get(connectionKey); if (connection != null) { connection.decCount(); if (connection.isZeroReference() || staleConnection) { - HBASE_INSTANCES.remove(connectionKey); + CONNECTION_INSTANCES.remove(connectionKey); connection.internalClose(); } } else { LOG.error("Connection not found in the list, can't delete it "+ - "(connection key="+connectionKey+"). May be the key was modified?"); + "(connection key=" + connectionKey + "). May be the key was modified?"); } } } @@ -313,14 +351,12 @@ public class HConnectionManager { * @return Number of cached regions for the table. * @throws ZooKeeperConnectionException */ - static int getCachedRegionCount(Configuration conf, - final byte[] tableName) + static int getCachedRegionCount(Configuration conf, final byte[] tableName) throws IOException { return execute(new HConnectable(conf) { @Override public Integer connect(HConnection connection) { - return ((HConnectionImplementation) connection) - .getNumberOfCachedRegionLocations(tableName); + return ((HConnectionImplementation)connection).getNumberOfCachedRegionLocations(tableName); } }); } @@ -331,8 +367,8 @@ public class HConnectionManager { * @return true if the region where the table and row reside is cached. * @throws ZooKeeperConnectionException */ - static boolean isRegionCached(Configuration conf, - final byte[] tableName, final byte[] row) throws IOException { + static boolean isRegionCached(Configuration conf, final byte[] tableName, final byte[] row) + throws IOException { return execute(new HConnectable(conf) { @Override public Boolean connect(HConnection connection) { @@ -341,34 +377,10 @@ public class HConnectionManager { }); } - /** - * This class makes it convenient for one to execute a command in the context - * of a {@link HConnection} instance based on the given {@link Configuration}. - * - *

- * If you find yourself wanting to use a {@link HConnection} for a relatively - * short duration of time, and do not want to deal with the hassle of creating - * and cleaning up that resource, then you should consider using this - * convenience class. - * - * @param - * the return type of the {@link HConnectable#connect(HConnection)} - * method. - */ - public static abstract class HConnectable { - public Configuration conf; - - protected HConnectable(Configuration conf) { - this.conf = conf; - } - - public abstract T connect(HConnection connection) throws IOException; - } - /** * This convenience method invokes the given {@link HConnectable#connect} * implementation using a {@link HConnection} instance that lasts just for the - * duration of that invocation. + * duration of the invocation. * * @param the return type of the connect method * @param connectable the {@link HConnectable} instance @@ -398,127 +410,14 @@ public class HConnectionManager { } } - /** - * Denotes a unique key to a {@link HConnection} instance. - * - * In essence, this class captures the properties in {@link Configuration} - * that may be used in the process of establishing a connection. In light of - * that, if any new such properties are introduced into the mix, they must be - * added to the {@link HConnectionKey#properties} list. - * - */ - public static class HConnectionKey { - final static String[] CONNECTION_PROPERTIES = new String[] { - HConstants.ZOOKEEPER_QUORUM, HConstants.ZOOKEEPER_ZNODE_PARENT, - HConstants.ZOOKEEPER_CLIENT_PORT, - HConstants.ZOOKEEPER_RECOVERABLE_WAITTIME, - HConstants.HBASE_CLIENT_PAUSE, HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.HBASE_CLIENT_RPC_MAXATTEMPTS, - HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.HBASE_CLIENT_PREFETCH_LIMIT, - HConstants.HBASE_META_SCANNER_CACHING, - HConstants.HBASE_CLIENT_INSTANCE_ID }; - - private Map properties; - private String username; - - public HConnectionKey(Configuration conf) { - Map m = new HashMap(); - if (conf != null) { - for (String property : CONNECTION_PROPERTIES) { - String value = conf.get(property); - if (value != null) { - m.put(property, value); - } - } - } - this.properties = Collections.unmodifiableMap(m); - - try { - User currentUser = User.getCurrent(); - if (currentUser != null) { - username = currentUser.getName(); - } - } catch (IOException ioe) { - LOG.warn("Error obtaining current user, skipping username in HConnectionKey", - ioe); - } - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - if (username != null) { - result = username.hashCode(); - } - for (String property : CONNECTION_PROPERTIES) { - String value = properties.get(property); - if (value != null) { - result = prime * result + value.hashCode(); - } - } - - return result; - } - - - @edu.umd.cs.findbugs.annotations.SuppressWarnings (value="ES_COMPARING_STRINGS_WITH_EQ", - justification="Optimization") - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - HConnectionKey that = (HConnectionKey) obj; - if (this.username != null && !this.username.equals(that.username)) { - return false; - } else if (this.username == null && that.username != null) { - return false; - } - if (this.properties == null) { - if (that.properties != null) { - return false; - } - } else { - if (that.properties == null) { - return false; - } - for (String property : CONNECTION_PROPERTIES) { - String thisValue = this.properties.get(property); - String thatValue = that.properties.get(property); - //noinspection StringEquality - if (thisValue == thatValue) { - continue; - } - if (thisValue == null || !thisValue.equals(thatValue)) { - return false; - } - } - } - return true; - } - - @Override - public String toString() { - return "HConnectionKey{" + - "properties=" + properties + - ", username='" + username + '\'' + - '}'; - } - } - /** Encapsulates connection to zookeeper and regionservers.*/ + @edu.umd.cs.findbugs.annotations.SuppressWarnings( + value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION", + justification="Access to the conncurrent hash map is under a lock so should be fine.") static class HConnectionImplementation implements HConnection, Closeable { static final Log LOG = LogFactory.getLog(HConnectionImplementation.class); - private final Class adminClass; - private final Class clientClass; private final long pause; private final int numTries; - private final int maxRPCAttempts; private final int rpcTimeout; private final int prefetchRegionLimit; private final boolean useServerTrackerForRetries; @@ -546,22 +445,15 @@ public class HConnectionManager { private final Configuration conf; - // client RPC - private RpcClientEngine rpcEngine; - - // Known region ServerName.toString() -> RegionClient/Admin - private final ConcurrentHashMap> servers = - new ConcurrentHashMap>(); - private final ConcurrentHashMap connectionLock = - new ConcurrentHashMap(); + // Client rpc instance. + private RpcClient rpcClient; /** * Map of table to table {@link HRegionLocation}s. The table key is made * by doing a {@link Bytes#mapKey(byte[])} of the table's name. */ - private final Map> - cachedRegionLocations = - new HashMap>(); + private final Map> cachedRegionLocations = + new HashMap>(); // The presence of a server in the map implies it's likely that there is an // entry in cachedRegionLocations that map to this server; but the absence @@ -579,47 +471,33 @@ public class HConnectionManager { // indicates whether this connection's life cycle is managed (by us) private final boolean managed; + /** * constructor * @param conf Configuration object + * @param managed If true, does not do full shutdown on close; i.e. cleanup of connection + * to zk and shutdown of all services; we just close down the resources this connection was + * responsible for and decrement usage counters. It is up to the caller to do the full + * cleanup. It is set when we want have connection sharing going on -- reuse of zk connection, + * and cached region locations, established regionserver connections, etc. When connections + * are shared, we have reference counting going on and will only do full cleanup when no more + * users of an HConnectionImplementation instance. */ - @SuppressWarnings("unchecked") - public HConnectionImplementation(Configuration conf, boolean managed) throws IOException { + HConnectionImplementation(Configuration conf, boolean managed) throws IOException { this.conf = conf; this.managed = managed; - String adminClassName = conf.get(REGION_PROTOCOL_CLASS, - DEFAULT_ADMIN_PROTOCOL_CLASS); this.closed = false; - try { - this.adminClass = - (Class) Class.forName(adminClassName); - } catch (ClassNotFoundException e) { - throw new UnsupportedOperationException( - "Unable to find region server interface " + adminClassName, e); - } - String clientClassName = conf.get(CLIENT_PROTOCOL_CLASS, - DEFAULT_CLIENT_PROTOCOL_CLASS); - try { - this.clientClass = - (Class) Class.forName(clientClassName); - } catch (ClassNotFoundException e) { - throw new UnsupportedOperationException( - "Unable to find client protocol " + clientClassName, e); - } this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, - HConstants.DEFAULT_HBASE_CLIENT_PAUSE); + HConstants.DEFAULT_HBASE_CLIENT_PAUSE); this.numTries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); - this.maxRPCAttempts = conf.getInt( - HConstants.HBASE_CLIENT_RPC_MAXATTEMPTS, - HConstants.DEFAULT_HBASE_CLIENT_RPC_MAXATTEMPTS); + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); this.rpcTimeout = conf.getInt( - HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + HConstants.HBASE_RPC_TIMEOUT_KEY, + HConstants.DEFAULT_HBASE_RPC_TIMEOUT); this.prefetchRegionLimit = conf.getInt( - HConstants.HBASE_CLIENT_PREFETCH_LIMIT, - HConstants.DEFAULT_HBASE_CLIENT_PREFETCH_LIMIT); - this.useServerTrackerForRetries = conf.getBoolean(RETRIES_BY_SERVER, true); + HConstants.HBASE_CLIENT_PREFETCH_LIMIT, + HConstants.DEFAULT_HBASE_CLIENT_PREFETCH_LIMIT); + this.useServerTrackerForRetries = conf.getBoolean(RETRIES_BY_SERVER_KEY, true); long serverTrackerTimeout = 0; if (this.useServerTrackerForRetries) { // Server tracker allows us to do faster, and yet useful (hopefully), retries. @@ -636,11 +514,7 @@ public class HConnectionManager { this.serverTrackerTimeout = serverTrackerTimeout; retrieveClusterId(); - // ProtobufRpcClientEngine is the main RpcClientEngine implementation, - // but we maintain access through an interface to allow overriding for tests - // RPC engine setup must follow obtaining the cluster ID for token authentication to work - this.rpcEngine = new ProtobufRpcClientEngine(this.conf, this.clusterId); - + this.rpcClient = new RpcClient(this.conf, this.clusterId); // Do we publish the status? Class listenerClass = @@ -654,13 +528,24 @@ public class HConnectionManager { @Override public void newDead(ServerName sn) { clearCaches(sn); - rpcEngine.getClient().cancelConnections(sn.getHostname(), sn.getPort(), - new SocketException(sn.getServerName() + " is dead: closing its connection.")); + rpcClient.cancelConnections(sn.getHostname(), sn.getPort(), + new SocketException(sn.getServerName() + " is dead: closing its connection.")); } }, conf, listenerClass); } } + /** + * For tests only. + * @param rpcClient Client we should use instead. + * @return Previous rpcClient + */ + RpcClient setRpcClient(final RpcClient rpcClient) { + RpcClient oldRpcClient = this.rpcClient; + this.rpcClient = rpcClient; + return oldRpcClient; + } + /** * An identifier that will remain the same for a given connection. * @return @@ -706,125 +591,6 @@ public class HConnectionManager { return this.conf; } - private static class MasterProtocolState { - public MasterProtocol protocol; - public int userCount; - public long keepAliveUntil = Long.MAX_VALUE; - public final Class protocolClass; - - public MasterProtocolState ( - final Class protocolClass) { - this.protocolClass = protocolClass; - } - } - - /** - * Create a new Master proxy. Try once only. - */ - private MasterProtocol createMasterInterface( - MasterProtocolState masterProtocolState) - throws IOException, KeeperException, ServiceException { - - ZooKeeperKeepAliveConnection zkw; - try { - zkw = getKeepAliveZooKeeperWatcher(); - } catch (IOException e) { - throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e); - } - - try { - - checkIfBaseNodeAvailable(zkw); - ServerName sn = MasterAddressTracker.getMasterAddress(zkw); - if (sn == null) { - String msg = - "ZooKeeper available but no active master location found"; - LOG.info(msg); - throw new MasterNotRunningException(msg); - } - - - InetSocketAddress isa = - new InetSocketAddress(sn.getHostname(), sn.getPort()); - MasterProtocol tryMaster = rpcEngine.getProxy( - masterProtocolState.protocolClass, - isa, this.conf, this.rpcTimeout); - - if (tryMaster.isMasterRunning( - null, RequestConverter.buildIsMasterRunningRequest()).getIsMasterRunning()) { - return tryMaster; - } else { - String msg = "Can create a proxy to master, but it is not running"; - LOG.info(msg); - throw new MasterNotRunningException(msg); - } - } finally { - zkw.close(); - } - } - - /** - * Create a master, retries if necessary. - */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings (value="SWL_SLEEP_WITH_LOCK_HELD") - private MasterProtocol createMasterWithRetries( - MasterProtocolState masterProtocolState) throws MasterNotRunningException { - - // The lock must be at the beginning to prevent multiple master creation - // (and leaks) in a multithread context - - synchronized (this.masterAndZKLock) { - Exception exceptionCaught = null; - MasterProtocol master = null; - int tries = 0; - while ( - !this.closed && master == null - ) { - tries++; - try { - master = createMasterInterface(masterProtocolState); - } catch (IOException e) { - exceptionCaught = e; - } catch (KeeperException e) { - exceptionCaught = e; - } catch (ServiceException e) { - exceptionCaught = e; - } - - if (exceptionCaught != null) - // It failed. If it's not the last try, we're going to wait a little - if (tries < numTries) { - // tries at this point is 1 or more; decrement to start from 0. - long pauseTime = ConnectionUtils.getPauseTime(this.pause, tries - 1); - LOG.info("getMaster attempt " + tries + " of " + numTries + - " failed; retrying after sleep of " +pauseTime + ", exception=" + exceptionCaught); - - try { - Thread.sleep(pauseTime); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new RuntimeException( - "Thread was interrupted while trying to connect to master.", e); - } - - } else { - // Enough tries, we stop now - LOG.info("getMaster attempt " + tries + " of " + numTries + - " failed; no more retrying.", exceptionCaught); - throw new MasterNotRunningException(exceptionCaught); - } - } - - if (master == null) { - // implies this.closed true - throw new MasterNotRunningException( - "Connection was closed while trying to get master"); - } - - return master; - } - } - private void checkIfBaseNodeAvailable(ZooKeeperWatcher zkw) throws MasterNotRunningException { String errorMsg; @@ -851,11 +617,16 @@ public class HConnectionManager { */ @Override public boolean isMasterRunning() - throws MasterNotRunningException, ZooKeeperConnectionException { - // When getting the master proxy connection, we check it's running, + throws MasterNotRunningException, ZooKeeperConnectionException { + // When getting the master connection, we check it's running, // so if there is no exception, it means we've been able to get a // connection on a running master - getKeepAliveMasterMonitor().close(); + MasterMonitorKeepAliveConnection m = getKeepAliveMasterMonitorService(); + try { + m.close(); + } catch (IOException e) { + throw new MasterNotRunningException("Failed close", e); + } return true; } @@ -1142,7 +913,7 @@ public class HConnectionManager { metaLocation = locateRegion(parentTable, metaKey, true, false); // If null still, go around again. if (metaLocation == null) continue; - ClientProtocol server = getClient(metaLocation.getServerName()); + ClientService.BlockingInterface service = getClient(metaLocation.getServerName()); Result regionInfoRow; // This block guards against two threads trying to load the meta @@ -1172,7 +943,7 @@ public class HConnectionManager { forceDeleteCachedLocation(tableName, row); } // Query the meta region for the location of the meta region - regionInfoRow = ProtobufUtil.getRowOrBefore(server, + regionInfoRow = ProtobufUtil.getRowOrBefore(service, metaLocation.getRegionInfo().getRegionName(), metaKey, HConstants.CATALOG_FAMILY); } @@ -1231,7 +1002,7 @@ public class HConnectionManager { throw e; } catch (IOException e) { if (e instanceof RemoteException) { - e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e); + e = ((RemoteException)e).unwrapRemoteException(); } if (tries < numTries - 1) { if (LOG.isDebugEnabled()) { @@ -1445,103 +1216,334 @@ public class HConnectionManager { } } - @Override - @Deprecated - public AdminProtocol getAdmin(final String hostname, final int port) throws IOException { - return getAdmin(new ServerName(hostname, port, 0L)); + // Map keyed by service name + regionserver to service stub implementation + private final ConcurrentHashMap stubs = + new ConcurrentHashMap(); + // Map of locks used creating service stubs per regionserver. + private final ConcurrentHashMap connectionLock = + new ConcurrentHashMap(); + + /** + * Maintains current state of MasterService instance. + */ + static abstract class MasterServiceState { + HConnection connection; + int userCount; + long keepAliveUntil = Long.MAX_VALUE; + + MasterServiceState (final HConnection connection) { + super(); + this.connection = connection; + } + + abstract Object getStub(); + abstract void clearStub(); + abstract boolean isMasterRunning() throws ServiceException; } + /** + * State of the MasterAdminService connection/setup. + */ + static class MasterAdminServiceState extends MasterServiceState { + MasterAdminService.BlockingInterface stub; + MasterAdminServiceState(final HConnection connection) { + super(connection); + } + + @Override + public String toString() { + return "MasterAdminService"; + } + + @Override + Object getStub() { + return this.stub; + } + + @Override + void clearStub() { + this.stub = null; + } + + @Override + boolean isMasterRunning() throws ServiceException { + MasterProtos.IsMasterRunningResponse response = + this.stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest()); + return response != null? response.getIsMasterRunning(): false; + } + } + + /** + * State of the MasterMonitorService connection/setup. + */ + static class MasterMonitorServiceState extends MasterServiceState { + MasterMonitorService.BlockingInterface stub; + MasterMonitorServiceState(final HConnection connection) { + super(connection); + } + + @Override + public String toString() { + return "MasterMonitorService"; + } + + @Override + Object getStub() { + return this.stub; + } + + @Override + void clearStub() { + this.stub = null; + } + + @Override + boolean isMasterRunning() throws ServiceException { + MasterProtos.IsMasterRunningResponse response = + this.stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest()); + return response != null? response.getIsMasterRunning(): false; + } + } + + /** + * Makes a client-side stub for master services. Sub-class to specialize. + * Depends on hosting class so not static. Exists so we avoid duplicating a bunch of code + * when setting up the MasterMonitorService and MasterAdminService. + */ + abstract class StubMaker { + /** + * Returns the name of the service stub being created. + */ + protected abstract String getServiceName(); + + /** + * Make stub and cache it internal so can be used later doing the isMasterRunning call. + * @param channel + */ + protected abstract Object makeStub(final BlockingRpcChannel channel); + + /** + * Once setup, check it works by doing isMasterRunning check. + * @throws ServiceException + */ + protected abstract void isMasterRunning() throws ServiceException; + + /** + * Create a stub. Try once only. It is not typed because there is no common type to + * protobuf services nor their interfaces. Let the caller do appropriate casting. + * @return A stub for master services. + * @throws IOException + * @throws KeeperException + * @throws ServiceException + */ + private Object makeStubNoRetries() throws IOException, KeeperException, ServiceException { + ZooKeeperKeepAliveConnection zkw; + try { + zkw = getKeepAliveZooKeeperWatcher(); + } catch (IOException e) { + throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e); + } + try { + checkIfBaseNodeAvailable(zkw); + ServerName sn = MasterAddressTracker.getMasterAddress(zkw); + if (sn == null) { + String msg = "ZooKeeper available but no active master location found"; + LOG.info(msg); + throw new MasterNotRunningException(msg); + } + if (isDeadServer(sn)) { + throw new MasterNotRunningException(sn + " is dead."); + } + // Use the security info interface name as our stub key + String key = getStubKey(getServiceName(), sn.getHostAndPort()); + connectionLock.putIfAbsent(key, key); + Object stub = null; + synchronized (connectionLock.get(key)) { + stub = stubs.get(key); + if (stub == null) { + BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn, + User.getCurrent(), rpcTimeout); + stub = makeStub(channel); + isMasterRunning(); + stubs.put(key, stub); + } + } + return stub; + } finally { + zkw.close(); + } + } + + /** + * Create a stub against the master. Retry if necessary. + * @return A stub to do intf against the master + * @throws MasterNotRunningException + */ + @edu.umd.cs.findbugs.annotations.SuppressWarnings (value="SWL_SLEEP_WITH_LOCK_HELD") + Object makeStub() throws MasterNotRunningException { + // The lock must be at the beginning to prevent multiple master creations + // (and leaks) in a multithread context + synchronized (masterAndZKLock) { + Exception exceptionCaught = null; + Object stub = null; + int tries = 0; + while (!closed && stub == null) { + tries++; + try { + stub = makeStubNoRetries(); + } catch (IOException e) { + exceptionCaught = e; + } catch (KeeperException e) { + exceptionCaught = e; + } catch (ServiceException e) { + exceptionCaught = e; + } + + if (exceptionCaught != null) + // It failed. If it's not the last try, we're going to wait a little + if (tries < numTries) { + // tries at this point is 1 or more; decrement to start from 0. + long pauseTime = ConnectionUtils.getPauseTime(pause, tries - 1); + LOG.info("getMaster attempt " + tries + " of " + numTries + + " failed; retrying after sleep of " +pauseTime + ", exception=" + + exceptionCaught); + + try { + Thread.sleep(pauseTime); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException( + "Thread was interrupted while trying to connect to master.", e); + } + } else { + // Enough tries, we stop now + LOG.info("getMaster attempt " + tries + " of " + numTries + + " failed; no more retrying.", exceptionCaught); + throw new MasterNotRunningException(exceptionCaught); + } + } + + if (stub == null) { + // implies this.closed true + throw new MasterNotRunningException("Connection was closed while trying to get master"); + } + return stub; + } + } + } + + /** + * Class to make a MasterMonitorService stub. + */ + class MasterMonitorServiceStubMaker extends StubMaker { + private MasterMonitorService.BlockingInterface stub; + @Override + protected String getServiceName() { + return MasterMonitorService.getDescriptor().getName(); + } + + @Override + @edu.umd.cs.findbugs.annotations.SuppressWarnings("SWL_SLEEP_WITH_LOCK_HELD") + MasterMonitorService.BlockingInterface makeStub() throws MasterNotRunningException { + return (MasterMonitorService.BlockingInterface)super.makeStub(); + } + + @Override + protected Object makeStub(BlockingRpcChannel channel) { + this.stub = MasterMonitorService.newBlockingStub(channel); + return this.stub; + } + + @Override + protected void isMasterRunning() throws ServiceException { + this.stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest()); + } + } + + /** + * Class to make a MasterAdminService stub. + */ + class MasterAdminServiceStubMaker extends StubMaker { + private MasterAdminService.BlockingInterface stub; + + @Override + protected String getServiceName() { + return MasterAdminService.getDescriptor().getName(); + } + + @Override + @edu.umd.cs.findbugs.annotations.SuppressWarnings("SWL_SLEEP_WITH_LOCK_HELD") + MasterAdminService.BlockingInterface makeStub() throws MasterNotRunningException { + return (MasterAdminService.BlockingInterface)super.makeStub(); + } + + @Override + protected Object makeStub(BlockingRpcChannel channel) { + this.stub = MasterAdminService.newBlockingStub(channel); + return this.stub; + } + + @Override + protected void isMasterRunning() throws ServiceException { + this.stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest()); + } + }; + @Override - public AdminProtocol getAdmin(final ServerName serverName) + public AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException { return getAdmin(serverName, false); } @Override - @Deprecated - public ClientProtocol getClient(final String hostname, final int port) + // Nothing is done w/ the 'master' parameter. It is ignored. + public AdminService.BlockingInterface getAdmin(final ServerName serverName, + final boolean master) throws IOException { - return (ClientProtocol)getProtocol(hostname, port, clientClass); - } - - @Override - public ClientProtocol getClient(final ServerName serverName) - throws IOException { - if (isDeadServer(serverName)){ - throw new RegionServerStoppedException("The server " + serverName + " is dead."); + if (isDeadServer(serverName)) { + throw new RegionServerStoppedException(serverName + " is dead."); } - return (ClientProtocol) - getProtocol(serverName.getHostname(), serverName.getPort(), clientClass); - } - - @Override - @Deprecated - public AdminProtocol getAdmin(final String hostname, final int port, - final boolean master) - throws IOException { - return (AdminProtocol)getProtocol(hostname, port, adminClass); - } - - @Override - public AdminProtocol getAdmin(final ServerName serverName, final boolean master) - throws IOException { - if (isDeadServer(serverName)){ - throw new RegionServerStoppedException("The server " + serverName + " is dead."); - } - return (AdminProtocol)getProtocol( - serverName.getHostname(), serverName.getPort(), adminClass); - } - - /** - * Either the passed isa is null or hostname - * can be but not both. - * @param hostname - * @param port - * @param protocolClass - * @return Proxy. - * @throws IOException - */ - IpcProtocol getProtocol(final String hostname, - final int port, final Class protocolClass) - throws IOException { - String rsName = Addressing.createHostAndPortStr(hostname, port); - // See if we already have a connection (common case) - Map protocols = this.servers.get(rsName); - if (protocols == null) { - protocols = new HashMap(); - Map existingProtocols = - this.servers.putIfAbsent(rsName, protocols); - if (existingProtocols != null) { - protocols = existingProtocols; + String key = getStubKey(AdminService.BlockingInterface.class.getName(), + serverName.getHostAndPort()); + this.connectionLock.putIfAbsent(key, key); + AdminService.BlockingInterface stub = null; + synchronized (this.connectionLock.get(key)) { + stub = (AdminService.BlockingInterface)this.stubs.get(key); + if (stub == null) { + BlockingRpcChannel channel = this.rpcClient.createBlockingRpcChannel(serverName, + User.getCurrent(), this.rpcTimeout); + stub = AdminService.newBlockingStub(channel); + this.stubs.put(key, stub); } } - String protocol = protocolClass.getName(); - IpcProtocol server = protocols.get(protocol); - if (server == null) { - // create a unique lock for this RS + protocol (if necessary) - String lockKey = protocol + "@" + rsName; - this.connectionLock.putIfAbsent(lockKey, lockKey); - // get the RS lock - synchronized (this.connectionLock.get(lockKey)) { - // do one more lookup in case we were stalled above - server = protocols.get(protocol); - if (server == null) { - try { - // Only create isa when we need to. - InetSocketAddress address = new InetSocketAddress(hostname, port); - // definitely a cache miss. establish an RPC for this RS - server = HBaseClientRPC.waitForProxy(rpcEngine, protocolClass, address, this.conf, - this.maxRPCAttempts, this.rpcTimeout, this.rpcTimeout); - protocols.put(protocol, server); - } catch (RemoteException e) { - LOG.warn("RemoteException connecting to RS", e); - // Throw what the RemoteException was carrying. - throw e.unwrapRemoteException(); - } - } + return stub; + } + + @Override + public ClientService.BlockingInterface getClient(final ServerName sn) + throws IOException { + if (isDeadServer(sn)) { + throw new RegionServerStoppedException(sn + " is dead."); + } + String key = getStubKey(ClientService.BlockingInterface.class.getName(), sn.getHostAndPort()); + this.connectionLock.putIfAbsent(key, key); + ClientService.BlockingInterface stub = null; + synchronized (this.connectionLock.get(key)) { + stub = (ClientService.BlockingInterface)this.stubs.get(key); + if (stub == null) { + BlockingRpcChannel channel = this.rpcClient.createBlockingRpcChannel(sn, + User.getCurrent(), this.rpcTimeout); + stub = ClientService.newBlockingStub(channel); + // In old days, after getting stub/proxy, we'd make a call. We are not doing that here. + // Just fail on first actual call rather than in here on setup. + this.stubs.put(key, stub); } } - return server; + return stub; + } + + static String getStubKey(final String serviceName, final String rsHostnamePort) { + return serviceName + "@" + rsHostnamePort; } @Override @@ -1570,14 +1572,12 @@ public class HConnectionManager { private static final long keepAlive = 5 * 60 * 1000; /** - * Retrieve a shared ZooKeeperWatcher. You must close it it once you've have - * finished with it. + * Retrieve a shared ZooKeeperWatcher. You must close it it once you've have finished with it. * @return The shared instance. Never returns null. */ public ZooKeeperKeepAliveConnection getKeepAliveZooKeeperWatcher() throws IOException { synchronized (masterAndZKLock) { - if (keepAliveZookeeper == null) { // We don't check that our link to ZooKeeper is still valid // But there is a retry mechanism in the ZooKeeperWatcher itself @@ -1586,7 +1586,6 @@ public class HConnectionManager { } keepAliveZookeeperUserCount++; keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE; - return keepAliveZookeeper; } } @@ -1608,7 +1607,7 @@ public class HConnectionManager { /** * Creates a Chore thread to check the connections to master & zookeeper * and close them when they reach their closing time ( - * {@link MasterProtocolState#keepAliveUntil} and + * {@link MasterServiceState#keepAliveUntil} and * {@link #keepZooKeeperWatcherAliveUntil}). Keep alive time is * managed by the release functions and the variable {@link #keepAlive} */ @@ -1636,9 +1635,9 @@ public class HConnectionManager { return new DelayedClosing(hci, stoppable); } - protected void closeMasterProtocol(MasterProtocolState protocolState) { + protected void closeMasterProtocol(MasterServiceState protocolState) { if (System.currentTimeMillis() > protocolState.keepAliveUntil) { - hci.closeMasterProtocol(protocolState); + hci.closeMasterService(protocolState); protocolState.keepAliveUntil = Long.MAX_VALUE; } } @@ -1654,8 +1653,8 @@ public class HConnectionManager { hci.keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE; } } - closeMasterProtocol(hci.masterAdminProtocol); - closeMasterProtocol(hci.masterMonitorProtocol); + closeMasterProtocol(hci.adminMasterServiceState); + closeMasterProtocol(hci.monitorMasterServiceState); } } @@ -1683,117 +1682,289 @@ public class HConnectionManager { } } - private static class MasterProtocolHandler implements InvocationHandler { - private HConnectionImplementation connection; - private MasterProtocolState protocolStateTracker; + final MasterAdminServiceState adminMasterServiceState = new MasterAdminServiceState(this); + final MasterMonitorServiceState monitorMasterServiceState = + new MasterMonitorServiceState(this); - protected MasterProtocolHandler(HConnectionImplementation connection, - MasterProtocolState protocolStateTracker) { - this.connection = connection; - this.protocolStateTracker = protocolStateTracker; - } - - @Override - public Object invoke(Object proxy, Method method, Object[] args) - throws Throwable { - if (method.getName().equals("close") && - method.getParameterTypes().length == 0) { - release(connection, protocolStateTracker); - return null; - } else { - try { - return method.invoke(protocolStateTracker.protocol, args); - }catch (InvocationTargetException e){ - // We will have this for all the exception, checked on not, sent - // by any layer, including the functional exception - Throwable cause = e.getCause(); - if (cause == null){ - throw new RuntimeException( - "Proxy invocation failed and getCause is null", e); - } - if (cause instanceof UndeclaredThrowableException) { - cause = cause.getCause(); - } - throw cause; - } - } - } - - private void release( - HConnectionImplementation connection, - MasterProtocolState target) { - connection.releaseMaster(target); - } + @Override + public MasterAdminService.BlockingInterface getMasterAdmin() throws MasterNotRunningException { + return getKeepAliveMasterAdminService(); } - MasterProtocolState masterAdminProtocol = - new MasterProtocolState(MasterAdminProtocol.class); - MasterProtocolState masterMonitorProtocol = - new MasterProtocolState(MasterMonitorProtocol.class); + @Override + public MasterMonitorService.BlockingInterface getMasterMonitor() + throws MasterNotRunningException { + return getKeepAliveMasterMonitorService(); + } - /** - * This function allows HBaseAdmin and potentially others - * to get a shared master connection. - * - * @return The shared instance. Never returns null. - * @throws MasterNotRunningException - */ - private Object getKeepAliveMasterProtocol( - MasterProtocolState protocolState, Class connectionClass) - throws MasterNotRunningException { + private void resetMasterServiceState(final MasterServiceState mss) { + mss.userCount++; + mss.keepAliveUntil = Long.MAX_VALUE; + } + + @Override + public MasterAdminKeepAliveConnection getKeepAliveMasterAdminService() + throws MasterNotRunningException { synchronized (masterAndZKLock) { - if (!isKeepAliveMasterConnectedAndRunning(protocolState)) { - protocolState.protocol = null; - protocolState.protocol = createMasterWithRetries(protocolState); + if (!isKeepAliveMasterConnectedAndRunning(this.adminMasterServiceState)) { + MasterAdminServiceStubMaker stubMaker = new MasterAdminServiceStubMaker(); + this.adminMasterServiceState.stub = stubMaker.makeStub(); + } + resetMasterServiceState(this.adminMasterServiceState); + } + // Ugly delegation just so we can add in a Close method. + final MasterAdminService.BlockingInterface stub = this.adminMasterServiceState.stub; + return new MasterAdminKeepAliveConnection() { + MasterAdminServiceState mss = adminMasterServiceState; + @Override + public AddColumnResponse addColumn(RpcController controller, + AddColumnRequest request) throws ServiceException { + return stub.addColumn(controller, request); } - protocolState.userCount++; - protocolState.keepAliveUntil = Long.MAX_VALUE; - return Proxy.newProxyInstance( - connectionClass.getClassLoader(), - new Class[]{connectionClass}, - new MasterProtocolHandler(this, protocolState) - ); + @Override + public DeleteColumnResponse deleteColumn(RpcController controller, + DeleteColumnRequest request) throws ServiceException { + return stub.deleteColumn(controller, request); + } + + @Override + public ModifyColumnResponse modifyColumn(RpcController controller, + ModifyColumnRequest request) throws ServiceException { + return stub.modifyColumn(controller, request); + } + + @Override + public MoveRegionResponse moveRegion(RpcController controller, + MoveRegionRequest request) throws ServiceException { + return stub.moveRegion(controller, request); + } + + @Override + public DispatchMergingRegionsResponse dispatchMergingRegions( + RpcController controller, DispatchMergingRegionsRequest request) + throws ServiceException { + return stub.dispatchMergingRegions(controller, request); + } + + @Override + public AssignRegionResponse assignRegion(RpcController controller, + AssignRegionRequest request) throws ServiceException { + return stub.assignRegion(controller, request); + } + + @Override + public UnassignRegionResponse unassignRegion(RpcController controller, + UnassignRegionRequest request) throws ServiceException { + return stub.unassignRegion(controller, request); + } + + @Override + public OfflineRegionResponse offlineRegion(RpcController controller, + OfflineRegionRequest request) throws ServiceException { + return stub.offlineRegion(controller, request); + } + + @Override + public DeleteTableResponse deleteTable(RpcController controller, + DeleteTableRequest request) throws ServiceException { + return stub.deleteTable(controller, request); + } + + @Override + public EnableTableResponse enableTable(RpcController controller, + EnableTableRequest request) throws ServiceException { + return stub.enableTable(controller, request); + } + + @Override + public DisableTableResponse disableTable(RpcController controller, + DisableTableRequest request) throws ServiceException { + return stub.disableTable(controller, request); + } + + @Override + public ModifyTableResponse modifyTable(RpcController controller, + ModifyTableRequest request) throws ServiceException { + return stub.modifyTable(controller, request); + } + + @Override + public CreateTableResponse createTable(RpcController controller, + CreateTableRequest request) throws ServiceException { + return stub.createTable(controller, request); + } + + @Override + public ShutdownResponse shutdown(RpcController controller, + ShutdownRequest request) throws ServiceException { + return stub.shutdown(controller, request); + } + + @Override + public StopMasterResponse stopMaster(RpcController controller, + StopMasterRequest request) throws ServiceException { + return stub.stopMaster(controller, request); + } + + @Override + public BalanceResponse balance(RpcController controller, + BalanceRequest request) throws ServiceException { + return stub.balance(controller, request); + } + + @Override + public SetBalancerRunningResponse setBalancerRunning( + RpcController controller, SetBalancerRunningRequest request) + throws ServiceException { + return stub.setBalancerRunning(controller, request); + } + + @Override + public CatalogScanResponse runCatalogScan(RpcController controller, + CatalogScanRequest request) throws ServiceException { + return stub.runCatalogScan(controller, request); + } + + @Override + public EnableCatalogJanitorResponse enableCatalogJanitor( + RpcController controller, EnableCatalogJanitorRequest request) + throws ServiceException { + return stub.enableCatalogJanitor(controller, request); + } + + @Override + public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled( + RpcController controller, IsCatalogJanitorEnabledRequest request) + throws ServiceException { + return stub.isCatalogJanitorEnabled(controller, request); + } + + @Override + public CoprocessorServiceResponse execMasterService( + RpcController controller, CoprocessorServiceRequest request) + throws ServiceException { + return stub.execMasterService(controller, request); + } + + @Override + public TakeSnapshotResponse snapshot(RpcController controller, + TakeSnapshotRequest request) throws ServiceException { + return stub.snapshot(controller, request); + } + + @Override + public ListSnapshotResponse getCompletedSnapshots( + RpcController controller, ListSnapshotRequest request) + throws ServiceException { + return stub.getCompletedSnapshots(controller, request); + } + + @Override + public DeleteSnapshotResponse deleteSnapshot(RpcController controller, + DeleteSnapshotRequest request) throws ServiceException { + return stub.deleteSnapshot(controller, request); + } + + @Override + public IsSnapshotDoneResponse isSnapshotDone(RpcController controller, + IsSnapshotDoneRequest request) throws ServiceException { + return stub.isSnapshotDone(controller, request); + } + + @Override + public RestoreSnapshotResponse restoreSnapshot( + RpcController controller, RestoreSnapshotRequest request) + throws ServiceException { + return stub.restoreSnapshot(controller, request); + } + + @Override + public IsRestoreSnapshotDoneResponse isRestoreSnapshotDone( + RpcController controller, IsRestoreSnapshotDoneRequest request) + throws ServiceException { + return stub.isRestoreSnapshotDone(controller, request); + } + + @Override + public IsMasterRunningResponse isMasterRunning( + RpcController controller, IsMasterRunningRequest request) + throws ServiceException { + return stub.isMasterRunning(controller, request); + } + + @Override + public void close() { + release(this.mss); + } + }; + } + + private static void release(MasterServiceState mss) { + if (mss != null && mss.connection != null) { + ((HConnectionImplementation)mss.connection).releaseMaster(mss); } } @Override - public MasterAdminProtocol getMasterAdmin() throws MasterNotRunningException { - return getKeepAliveMasterAdmin(); + public MasterMonitorKeepAliveConnection getKeepAliveMasterMonitorService() + throws MasterNotRunningException { + synchronized (masterAndZKLock) { + if (!isKeepAliveMasterConnectedAndRunning(this.monitorMasterServiceState)) { + MasterMonitorServiceStubMaker stubMaker = new MasterMonitorServiceStubMaker(); + this.monitorMasterServiceState.stub = stubMaker.makeStub(); + } + resetMasterServiceState(this.monitorMasterServiceState); + } + // Ugly delegation just so can implement close + final MasterMonitorService.BlockingInterface stub = this.monitorMasterServiceState.stub; + return new MasterMonitorKeepAliveConnection() { + final MasterMonitorServiceState mss = monitorMasterServiceState; + @Override + public GetSchemaAlterStatusResponse getSchemaAlterStatus( + RpcController controller, GetSchemaAlterStatusRequest request) + throws ServiceException { + return stub.getSchemaAlterStatus(controller, request); + } + + @Override + public GetTableDescriptorsResponse getTableDescriptors( + RpcController controller, GetTableDescriptorsRequest request) + throws ServiceException { + return stub.getTableDescriptors(controller, request); + } + + @Override + public GetClusterStatusResponse getClusterStatus( + RpcController controller, GetClusterStatusRequest request) + throws ServiceException { + return stub.getClusterStatus(controller, request); + } + + @Override + public IsMasterRunningResponse isMasterRunning( + RpcController controller, IsMasterRunningRequest request) + throws ServiceException { + return stub.isMasterRunning(controller, request); + } + + @Override + public void close() throws IOException { + release(this.mss); + } + }; } - @Override - public MasterMonitorProtocol getMasterMonitor() throws MasterNotRunningException { - return getKeepAliveMasterMonitor(); - } - - @Override - public MasterAdminKeepAliveConnection getKeepAliveMasterAdmin() - throws MasterNotRunningException { - return (MasterAdminKeepAliveConnection) - getKeepAliveMasterProtocol(masterAdminProtocol, MasterAdminKeepAliveConnection.class); - } - - @Override - public MasterMonitorKeepAliveConnection getKeepAliveMasterMonitor() - throws MasterNotRunningException { - return (MasterMonitorKeepAliveConnection) - getKeepAliveMasterProtocol(masterMonitorProtocol, MasterMonitorKeepAliveConnection.class); - } - - private boolean isKeepAliveMasterConnectedAndRunning(MasterProtocolState protocolState){ - if (protocolState.protocol == null){ + private boolean isKeepAliveMasterConnectedAndRunning(MasterServiceState mss) { + if (mss.getStub() == null){ return false; } try { - return protocolState.protocol.isMasterRunning( - null, RequestConverter.buildIsMasterRunningRequest()).getIsMasterRunning(); - }catch (UndeclaredThrowableException e){ + return mss.isMasterRunning(); + } catch (UndeclaredThrowableException e) { // It's somehow messy, but we can receive exceptions such as - // java.net.ConnectException but they're not declared. So we catch - // it... - LOG.info("Master connection is not running anymore", - e.getUndeclaredThrowable()); + // java.net.ConnectException but they're not declared. So we catch it... + LOG.info("Master connection is not running anymore", e.getUndeclaredThrowable()); return false; } catch (ServiceException se) { LOG.warn("Checking master connection", se); @@ -1801,35 +1972,32 @@ public class HConnectionManager { } } - private void releaseMaster(MasterProtocolState protocolState) { - if (protocolState.protocol == null){ - return; - } + void releaseMaster(MasterServiceState mss) { + if (mss.getStub() == null) return; synchronized (masterAndZKLock) { - --protocolState.userCount; - if (protocolState.userCount <= 0) { - protocolState.keepAliveUntil = - System.currentTimeMillis() + keepAlive; + --mss.userCount; + if (mss.userCount <= 0) { + mss.keepAliveUntil = System.currentTimeMillis() + keepAlive; } } } - private void closeMasterProtocol(MasterProtocolState protocolState) { - if (protocolState.protocol != null){ - LOG.info("Closing master protocol: " + protocolState.protocolClass.getName()); - protocolState.protocol = null; + private void closeMasterService(MasterServiceState mss) { + if (mss.getStub() != null) { + LOG.info("Closing master protocol: " + mss); + mss.clearStub(); } - protocolState.userCount = 0; + mss.userCount = 0; } /** - * Immediate close of the shared master. Can be by the delayed close or - * when closing the connection itself. + * Immediate close of the shared master. Can be by the delayed close or when closing the + * connection itself. */ private void closeMaster() { synchronized (masterAndZKLock) { - closeMasterProtocol(masterAdminProtocol); - closeMasterProtocol(masterMonitorProtocol); + closeMasterService(adminMasterServiceState); + closeMasterService(monitorMasterServiceState); } } @@ -2473,8 +2641,7 @@ public class HConnectionManager { delayedClosing.stop("Closing connection"); closeMaster(); closeZooKeeperWatcher(); - this.servers.clear(); - this.rpcEngine.close(); + this.stubs.clear(); if (clusterStatusListener != null) { clusterStatusListener.close(); } @@ -2515,7 +2682,7 @@ public class HConnectionManager { @Override public HTableDescriptor[] listTables() throws IOException { - MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitor(); + MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitorService(); try { GetTableDescriptorsRequest req = RequestConverter.buildGetTableDescriptorsRequest(null); @@ -2530,7 +2697,7 @@ public class HConnectionManager { @Override public HTableDescriptor[] getHTableDescriptors(List tableNames) throws IOException { if (tableNames == null || tableNames.isEmpty()) return new HTableDescriptor[0]; - MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitor(); + MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitorService(); try { GetTableDescriptorsRequest req = RequestConverter.buildGetTableDescriptorsRequest(tableNames); @@ -2556,7 +2723,7 @@ public class HConnectionManager { if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { return HTableDescriptor.META_TABLEDESC; } - MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitor(); + MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitorService(); GetTableDescriptorsResponse htds; try { GetTableDescriptorsRequest req = @@ -2575,14 +2742,6 @@ public class HConnectionManager { throw new TableNotFoundException(Bytes.toString(tableName)); } - /** - * Override the RpcClientEngine implementation used by this connection. - * FOR TESTING PURPOSES ONLY! - */ - void setRpcEngine(RpcClientEngine engine) { - this.rpcEngine = engine; - } - /** * The record of errors for servers. Visible for testing. */ @@ -2685,17 +2844,15 @@ public class HConnectionManager { * @param c The Configuration instance to set the retries into. * @param log Used to log what we set in here. */ - public static void setServerSideHConnectionRetries(final Configuration c, + public static void setServerSideHConnectionRetries(final Configuration c, final String sn, final Log log) { int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); // Go big. Multiply by 10. If we can't get to meta after this many retries // then something seriously wrong. - int serversideMultiplier = - c.getInt("hbase.client.serverside.retries.multiplier", 10); + int serversideMultiplier = c.getInt("hbase.client.serverside.retries.multiplier", 10); int retries = hcRetries * serversideMultiplier; c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries); - log.debug("HConnection retries=" + retries); + log.debug(sn + " HConnection server-to-server retries=" + retries); } } - diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index 3948d166c9a..bc1a68646e9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.filter.BinaryComparator; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; @@ -534,7 +533,7 @@ public class HTable implements HTableInterface { throws IOException { return new ServerCallable(connection, tableName, row, operationTimeout) { public Result call() throws IOException { - return ProtobufUtil.getRowOrBefore(server, + return ProtobufUtil.getRowOrBefore(stub, location.getRegionInfo().getRegionName(), row, family); } }.withRetries(); @@ -580,7 +579,7 @@ public class HTable implements HTableInterface { public Result get(final Get get) throws IOException { return new ServerCallable(connection, tableName, get.getRow(), operationTimeout) { public Result call() throws IOException { - return ProtobufUtil.get(server, + return ProtobufUtil.get(stub, location.getRegionInfo().getRegionName(), get); } }.withRetries(); @@ -649,7 +648,7 @@ public class HTable implements HTableInterface { try { MutateRequest request = RequestConverter.buildMutateRequest( location.getRegionInfo().getRegionName(), delete); - MutateResponse response = server.mutate(null, request); + MutateResponse response = stub.mutate(null, request); return Boolean.valueOf(response.getProcessed()); } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); @@ -726,7 +725,7 @@ public class HTable implements HTableInterface { try { MultiRequest request = RequestConverter.buildMultiRequest( location.getRegionInfo().getRegionName(), rm); - server.multi(null, request); + stub.multi(null, request); } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); } @@ -751,7 +750,7 @@ public class HTable implements HTableInterface { location.getRegionInfo().getRegionName(), append); PayloadCarryingRpcController rpcController = new PayloadCarryingRpcController(); - MutateResponse response = server.mutate(rpcController, request); + MutateResponse response = stub.mutate(rpcController, request); if (!response.hasResult()) return null; return ProtobufUtil.toResult(response.getResult(), rpcController.cellScanner()); } catch (ServiceException se) { @@ -776,7 +775,7 @@ public class HTable implements HTableInterface { MutateRequest request = RequestConverter.buildMutateRequest( location.getRegionInfo().getRegionName(), increment); PayloadCarryingRpcController rpcContoller = new PayloadCarryingRpcController(); - MutateResponse response = server.mutate(rpcContoller, request); + MutateResponse response = stub.mutate(rpcContoller, request); return ProtobufUtil.toResult(response.getResult(), rpcContoller.cellScanner()); } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); @@ -821,7 +820,7 @@ public class HTable implements HTableInterface { location.getRegionInfo().getRegionName(), row, family, qualifier, amount, durability); PayloadCarryingRpcController rpcController = new PayloadCarryingRpcController(); - MutateResponse response = server.mutate(rpcController, request); + MutateResponse response = stub.mutate(rpcController, request); Result result = ProtobufUtil.toResult(response.getResult(), rpcController.cellScanner()); return Long.valueOf(Bytes.toLong(result.getValue(family, qualifier))); @@ -846,7 +845,7 @@ public class HTable implements HTableInterface { MutateRequest request = RequestConverter.buildMutateRequest( location.getRegionInfo().getRegionName(), row, family, qualifier, new BinaryComparator(value), CompareType.EQUAL, put); - MutateResponse response = server.mutate(null, request); + MutateResponse response = stub.mutate(null, request); return Boolean.valueOf(response.getProcessed()); } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); @@ -870,7 +869,7 @@ public class HTable implements HTableInterface { MutateRequest request = RequestConverter.buildMutateRequest( location.getRegionInfo().getRegionName(), row, family, qualifier, new BinaryComparator(value), CompareType.EQUAL, delete); - MutateResponse response = server.mutate(null, request); + MutateResponse response = stub.mutate(null, request); return Boolean.valueOf(response.getProcessed()); } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); @@ -889,7 +888,7 @@ public class HTable implements HTableInterface { try { GetRequest request = RequestConverter.buildGetRequest( location.getRegionInfo().getRegionName(), get, true); - GetResponse response = server.get(null, request); + GetResponse response = stub.get(null, request); return response.getExists(); } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); @@ -992,7 +991,7 @@ public class HTable implements HTableInterface { try { MultiGetRequest requests = RequestConverter.buildMultiGetRequest(location .getRegionInfo().getRegionName(), getsByRegionEntry.getValue(), true, false); - MultiGetResponse responses = server.multiGet(null, requests); + MultiGetResponse responses = stub.multiGet(null, requests); return responses.getExistsList(); } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAdminKeepAliveConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAdminKeepAliveConnection.java index 7126073f9f7..7c361197ac1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAdminKeepAliveConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterAdminKeepAliveConnection.java @@ -20,25 +20,25 @@ package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.hbase.MasterAdminProtocol; - -import java.io.Closeable; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos; /** * A KeepAlive connection is not physically closed immediately after the close, - * but rather kept alive for a few minutes. It makes sense only if it's shared. + * but rather kept alive for a few minutes. It makes sense only if it is shared. * - * This interface is used by a dynamic proxy. It allows to have a #close - * function in a master client. + *

This interface is implemented on a stub. It allows to have a #close function in a master + * client. * - * This class is intended to be used internally by HBase classes that need to - * speak the MasterAdminProtocol; but not by * final user code. Hence it's - * package protected. + *

This class is intended to be used internally by HBase classes that need to make invocations + * against the master on the MasterAdminProtos.MasterAdminService.BlockingInterface; but not by + * final user code. Hence it's package protected. */ -interface MasterAdminKeepAliveConnection extends MasterAdminProtocol, Closeable { - - @Override +interface MasterAdminKeepAliveConnection +extends MasterAdminProtos.MasterAdminService.BlockingInterface { + /** + * Close down all resources. + */ + // The Closeable Interface wants to throw an IOE out of a close. + // Thats a PITA. Do this below instead of Closeable. public void close(); -} - +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterMonitorKeepAliveConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterMonitorKeepAliveConnection.java index a4c7650e1dc..4f032b3204e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterMonitorKeepAliveConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterMonitorKeepAliveConnection.java @@ -20,11 +20,10 @@ package org.apache.hadoop.hbase.client; - -import org.apache.hadoop.hbase.MasterMonitorProtocol; - import java.io.Closeable; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos; + /** * A KeepAlive connection is not physically closed immediately after the close, * but rather kept alive for a few minutes. It makes sense only if it's shared. @@ -36,9 +35,5 @@ import java.io.Closeable; * speak the MasterMonitorProtocol; but not by final user code. Hence it's * package protected. */ -interface MasterMonitorKeepAliveConnection extends MasterMonitorProtocol, Closeable { - - @Override - public void close(); -} - +interface MasterMonitorKeepAliveConnection +extends MasterMonitorProtos.MasterMonitorService.BlockingInterface, Closeable {} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java index 99ba7b75c8e..578959dea5f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java @@ -26,10 +26,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable; import org.apache.hadoop.hbase.exceptions.TableNotFoundException; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.PairOfSameType; import java.io.Closeable; import java.io.IOException; @@ -275,7 +273,7 @@ public class MetaScanner { public static List listAllRegions(Configuration conf, final boolean offlined) throws IOException { final List regions = new ArrayList(); - MetaScannerVisitor visitor = new DefaultMetaScannerVisitor(conf) { + MetaScannerVisitor visitor = new DefaultMetaScannerVisitor() { @Override public boolean processRowInternal(Result result) throws IOException { if (result == null || result.isEmpty()) { @@ -310,7 +308,7 @@ public class MetaScanner { final byte [] tablename, final boolean offlined) throws IOException { final NavigableMap regions = new TreeMap(); - MetaScannerVisitor visitor = new TableMetaScannerVisitor(conf, tablename) { + MetaScannerVisitor visitor = new TableMetaScannerVisitor(tablename) { @Override public boolean processRowInternal(Result rowResult) throws IOException { HRegionInfo info = getHRegionInfo(rowResult); @@ -354,10 +352,8 @@ public class MetaScanner { public static abstract class DefaultMetaScannerVisitor extends MetaScannerVisitorBase { - protected Configuration conf; - - public DefaultMetaScannerVisitor(Configuration conf) { - this.conf = conf; + public DefaultMetaScannerVisitor() { + super(); } public abstract boolean processRowInternal(Result rowResult) throws IOException; @@ -386,8 +382,8 @@ public class MetaScanner { public static abstract class TableMetaScannerVisitor extends DefaultMetaScannerVisitor { private byte[] tableName; - public TableMetaScannerVisitor(Configuration conf, byte[] tableName) { - super(conf); + public TableMetaScannerVisitor(byte[] tableName) { + super(); this.tableName = tableName; } @@ -402,6 +398,5 @@ public class MetaScanner { } return super.processRow(rowResult); } - } -} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java index 2a3d2c573ba..45157ccfbe8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java @@ -74,7 +74,7 @@ class MultiServerCallable extends ServerCallable { RequestConverter.buildNoDataMultiRequest(regionName, rms, cells); // Carry the cells over the proxy/pb Service interface using the payload carrying // rpc controller. - server.multi(new PayloadCarryingRpcController(cells), multiRequest); + stub.multi(new PayloadCarryingRpcController(cells), multiRequest); // This multi call does not return results. response.add(regionName, action.getOriginalIndex(), Result.EMPTY_RESULT); } catch (ServiceException se) { @@ -99,7 +99,7 @@ class MultiServerCallable extends ServerCallable { // Controller optionally carries cell data over the proxy/service boundary and also // optionally ferries cell response data back out again. PayloadCarryingRpcController controller = new PayloadCarryingRpcController(cells); - ClientProtos.MultiResponse responseProto = server.multi(controller, multiRequest); + ClientProtos.MultiResponse responseProto = stub.multi(controller, multiRequest); results = ResponseConverter.getResults(responseProto, controller.cellScanner()); } catch (ServiceException se) { ex = ProtobufUtil.getRemoteException(se); @@ -114,7 +114,7 @@ class MultiServerCallable extends ServerCallable { } @Override - public void connect(boolean reload) throws IOException { - server = connection.getClient(loc.getServerName()); + public void prepare(boolean reload) throws IOException { + stub = connection.getClient(loc.getServerName()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java index 3ede7618e02..c1d40fb0eb0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -92,9 +92,9 @@ public class ScannerCallable extends ServerCallable { * @throws IOException */ @Override - public void connect(boolean reload) throws IOException { + public void prepare(boolean reload) throws IOException { if (!instantiated || reload) { - super.connect(reload); + super.prepare(reload); checkIfRegionServerIsRemote(); instantiated = true; } @@ -144,7 +144,7 @@ public class ScannerCallable extends ServerCallable { RequestConverter.buildScanRequest(scannerId, caching, false, nextCallSeq); ScanResponse response = null; try { - response = server.scan(null, request); + response = stub.scan(null, request); // Client and RS maintain a nextCallSeq number during the scan. Every next() call // from client to server will increment this number in both sides. Client passes this // number along with the request and at RS side both the incoming nextCallSeq and its @@ -248,7 +248,7 @@ public class ScannerCallable extends ServerCallable { ScanRequest request = RequestConverter.buildScanRequest(this.scannerId, 0, true); try { - server.scan(null, request); + stub.scan(null, request); } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); } @@ -265,7 +265,7 @@ public class ScannerCallable extends ServerCallable { this.location.getRegionInfo().getRegionName(), this.scan, 0, false); try { - ScanResponse response = server.scan(null, request); + ScanResponse response = stub.scan(null, request); long id = response.getScannerId(); if (logScannerActivity) { LOG.info("Open scanner=" + id + " for scan=" + scan.toString() diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java index ae099dd7c6d..1b2e54a170f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java @@ -29,7 +29,8 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException; import org.apache.hadoop.hbase.exceptions.NotServingRegionException; -import org.apache.hadoop.hbase.ipc.HBaseClientRPC; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.ipc.RemoteException; @@ -47,7 +48,7 @@ import java.util.concurrent.Callable; * return type and method we actually invoke on remote Server. Usually * used inside a try/catch that fields usual connection failures all wrapped * up in a retry loop. - *

Call {@link #connect(boolean)} to connect to server hosting region + *

Call {@link #prepare(boolean)} to connect to server hosting region * that contains the passed row in the passed table before invoking * {@link #call()}. * @see HConnection#getRegionServerWithoutRetries(ServerCallable) @@ -62,7 +63,7 @@ public abstract class ServerCallable implements Callable { protected final byte [] tableName; protected final byte [] row; protected HRegionLocation location; - protected ClientProtocol server; + protected ClientService.BlockingInterface stub; protected int callTimeout; protected long globalStartTime; protected long startTime, endTime; @@ -86,13 +87,14 @@ public abstract class ServerCallable implements Callable { } /** - * Connect to the server hosting region with row from tablename. + * Prepare for connection to the server hosting region with row from tablename. Does lookup + * to find region location and hosting server. * @param reload Set this to true if connection should re-find the region * @throws IOException e */ - public void connect(final boolean reload) throws IOException { + public void prepare(final boolean reload) throws IOException { this.location = connection.getRegionLocation(tableName, row, reload); - this.server = connection.getClient(location.getServerName()); + this.stub = connection.getClient(location.getServerName()); } /** @return the server name @@ -127,11 +129,11 @@ public abstract class ServerCallable implements Callable { // resetting to the minimum. remaining = MIN_RPC_TIMEOUT; } - HBaseClientRPC.setRpcTimeout(remaining); + RpcClient.setRpcTimeout(remaining); } public void afterCall() { - HBaseClientRPC.resetRpcTimeout(); + RpcClient.resetRpcTimeout(); this.endTime = EnvironmentEdgeManager.currentTimeMillis(); } @@ -164,11 +166,11 @@ public abstract class ServerCallable implements Callable { long expectedSleep = 0; try { beforeCall(); - connect(tries != 0); // if called with false, check table status on ZK + prepare(tries != 0); // if called with false, check table status on ZK return call(); } catch (Throwable t) { - LOG.warn("Received exception, tries=" + tries + ", numRetries=" + numRetries + - " message=" + t.getMessage()); + LOG.warn("Received exception, tries=" + tries + ", numRetries=" + numRetries + ":" + + t.getMessage()); t = translateException(t); // translateException throws an exception when we should not retry, i.e. when it's the @@ -237,7 +239,7 @@ public abstract class ServerCallable implements Callable { this.globalStartTime = EnvironmentEdgeManager.currentTimeMillis(); try { beforeCall(); - connect(false); + prepare(false); return call(); } catch (Throwable t) { Throwable t2 = translateException(t); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerEngine.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BadAuthException.java similarity index 61% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerEngine.java rename to hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BadAuthException.java index e6600538614..66bd13342bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerEngine.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BadAuthException.java @@ -1,5 +1,4 @@ -/* - * +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,17 +17,16 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; +public class BadAuthException extends FatalConnectionException { + public BadAuthException() { + super(); + } -import java.io.IOException; + public BadAuthException(String msg) { + super(msg); + } -/** An RPC implementation for the server. */ -@InterfaceAudience.Private -interface RpcServerEngine { - /** Construct a server for a protocol implementation instance. */ - RpcServer getServer(Object instance, Class[] protocols, - String bindAddress, int port, int numHandlers, int metaHandlerCount, - boolean verbose, Configuration conf, int highPriorityLevel) - throws IOException; + public BadAuthException(String msg, Throwable t) { + super(msg, t); + } } \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClientRPC.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClientRPC.java deleted file mode 100644 index 8d29a49ae3d..00000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClientRPC.java +++ /dev/null @@ -1,152 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.IpcProtocol; -import org.apache.hadoop.hbase.client.RetriesExhaustedException; - -import java.io.IOException; -import java.io.InterruptedIOException; -import java.net.ConnectException; -import java.net.InetSocketAddress; -import java.net.SocketTimeoutException; - -/** - * An RPC implementation. This class provides the client side. - */ -@InterfaceAudience.Private -public class HBaseClientRPC { - protected static final Log LOG = - LogFactory.getLog("org.apache.hadoop.ipc.HBaseClientRPC"); - - // thread-specific RPC timeout, which may override that of RpcEngine - private static ThreadLocal rpcTimeout = new ThreadLocal() { - @Override - protected Integer initialValue() { - return HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT; - } - }; - - /** - * @param protocol protocol interface - * @param addr address of remote service - * @param conf configuration - * @param maxAttempts max attempts - * @param rpcTimeout timeout for each RPC - * @param timeout timeout in milliseconds - * @return proxy - * @throws java.io.IOException e - */ - public static T waitForProxy(RpcClientEngine engine, - Class protocol, - InetSocketAddress addr, - Configuration conf, - int maxAttempts, - int rpcTimeout, - long timeout) - throws IOException { - // HBase does limited number of reconnects which is different from hadoop. - long startTime = System.currentTimeMillis(); - IOException ioe; - int reconnectAttempts = 0; - while (true) { - try { - return engine.getProxy(protocol, addr, conf, rpcTimeout); - } catch (SocketTimeoutException te) { - LOG.info("Problem connecting to server: " + addr); - ioe = te; - } catch (IOException ioex) { - // We only handle the ConnectException. - ConnectException ce = null; - if (ioex instanceof ConnectException) { - ce = (ConnectException) ioex; - ioe = ce; - } else if (ioex.getCause() != null - && ioex.getCause() instanceof ConnectException) { - ce = (ConnectException) ioex.getCause(); - ioe = ce; - } else if (ioex.getMessage().toLowerCase() - .contains("connection refused")) { - ce = new ConnectException(ioex.getMessage()); - ioe = ce; - } else { - // This is the exception we can't handle. - ioe = ioex; - } - if (ce != null) { - handleConnectionException(++reconnectAttempts, maxAttempts, protocol, - addr, ce); - } - } - // check if timed out - if (System.currentTimeMillis() - timeout >= startTime) { - throw ioe; - } - - // wait for retry - try { - Thread.sleep(1000); - } catch (InterruptedException ie) { - Thread.interrupted(); - throw new InterruptedIOException(); - } - } - } - - /** - * @param retries current retried times. - * @param maxAttmpts max attempts - * @param protocol protocol interface - * @param addr address of remote service - * @param ce ConnectException - * @throws org.apache.hadoop.hbase.client.RetriesExhaustedException - * - */ - private static void handleConnectionException(int retries, - int maxAttmpts, - Class protocol, - InetSocketAddress addr, - ConnectException ce) - throws RetriesExhaustedException { - if (maxAttmpts >= 0 && retries >= maxAttmpts) { - LOG.info("Server at " + addr + " could not be reached after " - + maxAttmpts + " tries, giving up."); - throw new RetriesExhaustedException("Failed setting up proxy " + protocol - + " to " + addr.toString() + " after attempts=" + maxAttmpts, ce); - } - } - - public static void setRpcTimeout(int t) { - rpcTimeout.set(t); - } - - public static int getRpcTimeout() { - return rpcTimeout.get(); - } - - public static void resetRpcTimeout() { - rpcTimeout.remove(); - } -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java index 19c786d6b00..3b43bfdc952 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java @@ -33,6 +33,12 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.io.ByteBufferOutputStream; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.compress.CodecPool; import org.apache.hadoop.io.compress.CompressionCodec; @@ -44,6 +50,7 @@ import com.google.common.base.Preconditions; import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; import com.google.protobuf.Message; +import com.google.protobuf.TextFormat; /** * Utility to help ipc'ing. @@ -263,4 +270,24 @@ class IPCUtil { Preconditions.checkArgument(totalSize < Integer.MAX_VALUE); return totalSize; } -} + + /** + * Return short version of Param Message toString'd, shorter than TextFormat#regionServerStartup + * @param methodName + * @param request + * @return toString of passed param + */ + static String getRequestShortTextFormat(Message request) { + if (request instanceof ScanRequest) { + return TextFormat.shortDebugString(request); + } else if (request instanceof RegionServerReportRequest) { + // Print a short message only, just the servername and the requests, not the full load. + RegionServerReportRequest r = (RegionServerReportRequest)request; + return "server " + TextFormat.shortDebugString(r.getServer()) + + " load { numberOfRequests: " + r.getLoad().getNumberOfRequests() + " }"; + } else if (request instanceof RegionServerStartupRequest) { + return TextFormat.shortDebugString(request); + } + return "TODO " + TextFormat.shortDebugString(request); + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcClientEngine.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcClientEngine.java deleted file mode 100644 index f28b61b2c3f..00000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcClientEngine.java +++ /dev/null @@ -1,166 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import com.google.protobuf.Message; -import com.google.protobuf.ServiceException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.IpcProtocol; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.ipc.RemoteException; - -import java.io.IOException; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.Method; -import java.lang.reflect.Proxy; -import java.net.InetSocketAddress; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -public class ProtobufRpcClientEngine implements RpcClientEngine { - - private static final Log LOG = - LogFactory.getLog("org.apache.hadoop.hbase.ipc.ProtobufRpcClientEngine"); - - public HBaseClient getClient() { - return client; - } - - protected HBaseClient client; - - public ProtobufRpcClientEngine(Configuration conf, String clusterId) { - this.client = new HBaseClient(conf, clusterId); - } - - - @Override - public T getProxy( - Class protocol, InetSocketAddress addr, - Configuration conf, int rpcTimeout) throws IOException { - final Invoker invoker = new Invoker(protocol, addr, User.getCurrent(), rpcTimeout, client); - return (T) Proxy.newProxyInstance( - protocol.getClassLoader(), new Class[]{protocol}, invoker); - } - - @Override - public void close() { - this.client.stop(); - } - - static class Invoker implements InvocationHandler { - private static final Map returnTypes = - new ConcurrentHashMap(); - private Class protocol; - private InetSocketAddress address; - private User ticket; - private HBaseClient client; - final private int rpcTimeout; - - public Invoker(Class protocol, InetSocketAddress addr, User ticket, - int rpcTimeout, HBaseClient client) - throws IOException { - this.protocol = protocol; - this.address = addr; - this.ticket = ticket; - this.client = client; - this.rpcTimeout = rpcTimeout; - } - - /** - * This is the client side invoker of RPC method. It only throws - * ServiceException, since the invocation proxy expects only - * ServiceException to be thrown by the method in case protobuf service. - * - * ServiceException has the following causes: - *

    - *
  1. Exceptions encountered on the client side in this method are - * set as cause in ServiceException as is.
  2. - *
  3. Exceptions from the server are wrapped in RemoteException and are - * set as cause in ServiceException
  4. - *
- * - *

Note that the client calling protobuf RPC methods, must handle - * ServiceException by getting the cause from the ServiceException. If the - * cause is RemoteException, then unwrap it to get the exception thrown by - * the server. - */ - @Override - public Object invoke(Object proxy, Method method, Object[] args) - throws ServiceException { - long startTime = 0; - if (LOG.isTraceEnabled()) { - startTime = System.currentTimeMillis(); - } - if (args.length != 2) { - throw new ServiceException(method.getName() + " didn't get two args: " + args.length); - } - // Get the controller. Often null. Presume payload carrying controller. Payload is optional. - // It is cells/data that we do not want to protobuf. - PayloadCarryingRpcController controller = (PayloadCarryingRpcController)args[0]; - CellScanner cells = null; - if (controller != null) { - cells = controller.cellScanner(); - // Clear it here so we don't by mistake try and these cells processing results. - controller.setCellScanner(null); - } - // The request parameter - Message param = (Message)args[1]; - Pair val = null; - try { - val = client.call(method, param, cells, address, protocol, ticket, rpcTimeout); - if (controller != null) { - // Shove the results into controller so can be carried across the proxy/pb service void. - if (val.getSecond() != null) controller.setCellScanner(val.getSecond()); - } else if (val.getSecond() != null) { - throw new ServiceException("Client dropping data on the floor!"); - } - - if (LOG.isTraceEnabled()) { - long callTime = System.currentTimeMillis() - startTime; - if (LOG.isTraceEnabled()) LOG.trace("Call: " + method.getName() + " " + callTime); - } - return val.getFirst(); - } catch (Throwable e) { - if (e instanceof RemoteException) { - Throwable cause = ((RemoteException)e).unwrapRemoteException(); - throw new ServiceException("methodName=" + method.getName(), cause); - } - throw new ServiceException(e); - } - } - - static Message getReturnProtoType(Method method) throws Exception { - if (returnTypes.containsKey(method.getName())) { - return returnTypes.get(method.getName()); - } - Class returnType = method.getReturnType(); - if (returnType.getName().equals("void")) return null; - Method newInstMethod = returnType.getMethod("getDefaultInstance"); - newInstMethod.setAccessible(true); - Message protoType = (Message) newInstMethod.invoke(null, (Object[]) null); - returnTypes.put(method.getName(), protoType); - return protoType; - } - } -} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ReflectionCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ReflectionCache.java deleted file mode 100644 index 18564c07ca1..00000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ReflectionCache.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.ipc; - -import java.lang.reflect.Method; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import org.apache.hadoop.hbase.IpcProtocol; - - -import com.google.protobuf.Message; - -/** - * Save on relection by keeping around method, method argument, and constructor instances - */ -class ReflectionCache { - private final Map methodArgCache = new ConcurrentHashMap(); - private final Map methodInstanceCache = new ConcurrentHashMap(); - - public ReflectionCache() { - super(); - } - - Method getMethod(Class protocol, String methodName) { - Method method = this.methodInstanceCache.get(methodName); - if (method != null) return method; - Method [] methods = protocol.getMethods(); - for (Method m : methods) { - if (m.getName().equals(methodName)) { - m.setAccessible(true); - this.methodInstanceCache.put(methodName, m); - return m; - } - } - return null; - } - - Message getMethodArgType(Method method) throws Exception { - Message protoType = this.methodArgCache.get(method.getName()); - if (protoType != null) return protoType; - Class[] args = method.getParameterTypes(); - Class arg; - if (args.length == 2) { - // RpcController + Message in the method args - // (generated code from RPC bits in .proto files have RpcController) - arg = args[1]; - } else if (args.length == 1) { - arg = args[0]; - } else { - //unexpected - return null; - } - //in the protobuf methods, args[1] is the only significant argument - Method newInstMethod = arg.getMethod("getDefaultInstance"); - newInstMethod.setAccessible(true); - protoType = (Message) newInstMethod.invoke(null, (Object[]) null); - this.methodArgCache.put(method.getName(), protoType); - return protoType; - } -} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java index 7131a261637..82974508050 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java @@ -79,7 +79,7 @@ public class RegionCoprocessorRpcChannel extends CoprocessorRpcChannel{ new ServerCallable(connection, table, row) { public CoprocessorServiceResponse call() throws Exception { byte[] regionName = location.getRegionInfo().getRegionName(); - return ProtobufUtil.execService(server, call, regionName); + return ProtobufUtil.execService(stub, call, regionName); } }; CoprocessorServiceResponse result = callable.withRetries(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java index d405590aac4..79d1daa0fac 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java @@ -22,8 +22,9 @@ import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException; import org.apache.hadoop.ipc.RemoteException; /** - * An {@link RemoteException} with some extra information. If source exception + * A {@link RemoteException} with some extra information. If source exception * was a {@link DoNotRetryIOException}, {@link #isDoNotRetry()} will return true. + *

A {@link RemoteException} hosts exceptions we got from the server. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java similarity index 79% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java rename to hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java index be60417f26d..6ff45d7c552 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java @@ -27,7 +27,6 @@ import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.lang.reflect.Method; import java.net.ConnectException; import java.net.InetSocketAddress; import java.net.Socket; @@ -55,9 +54,10 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.IpcProtocol; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.codec.KeyValueCodec; +import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.CellBlockMeta; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse; @@ -67,8 +67,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation; import org.apache.hadoop.hbase.protobuf.generated.Tracing.RPCTInfo; import org.apache.hadoop.hbase.security.AuthMethod; import org.apache.hadoop.hbase.security.HBaseSaslRpcClient; -import org.apache.hadoop.hbase.security.KerberosInfo; -import org.apache.hadoop.hbase.security.TokenInfo; +import org.apache.hadoop.hbase.security.SecurityInfo; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier; import org.apache.hadoop.hbase.security.token.AuthenticationTokenSelector; @@ -89,23 +88,25 @@ import org.apache.hadoop.security.token.TokenSelector; import org.cloudera.htrace.Span; import org.cloudera.htrace.Trace; +import com.google.protobuf.BlockingRpcChannel; +import com.google.protobuf.Descriptors.MethodDescriptor; import com.google.protobuf.Message; import com.google.protobuf.Message.Builder; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; import com.google.protobuf.TextFormat; /** - * A client for an IPC service. IPC calls take a single Protobuf message as a - * request and returns a single Protobuf message as result. A service runs on - * a port and is defined by a parameter class and a value class. - * + * Does RPC against a cluster. Manages connections per regionserver in the cluster. *

See HBaseServer */ @InterfaceAudience.Private -public class HBaseClient { - public static final Log LOG = LogFactory.getLog("org.apache.hadoop.ipc.HBaseClient"); +public class RpcClient { + // The LOG key is intentionally not from this package to avoid ipc logging at DEBUG (all under + // o.a.h.hbase is set to DEBUG as default). + public static final Log LOG = LogFactory.getLog("org.apache.hadoop.ipc.RpcClient"); protected final PoolMap connections; - private ReflectionCache reflectionCache = new ReflectionCache(); protected int counter; // counter for call ids protected final AtomicBoolean running = new AtomicBoolean(true); // if client runs @@ -117,7 +118,6 @@ public class HBaseClient { protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm protected final boolean tcpKeepAlive; // if T then use keepalives protected int pingInterval; // how often sends ping to the server in msecs - protected int socketTimeout; // socket timeout protected FailedServers failedServers; private final Codec codec; private final CompressionCodec compressor; @@ -135,6 +135,15 @@ public class HBaseClient { public final static String FAILED_SERVER_EXPIRY_KEY = "hbase.ipc.client.failed.servers.expiry"; public final static int FAILED_SERVER_EXPIRY_DEFAULT = 2000; + // thread-specific RPC timeout, which may override that of what was passed in. + // TODO: Verify still being used. + private static ThreadLocal rpcTimeout = new ThreadLocal() { + @Override + protected Integer initialValue() { + return HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT; + } + }; + /** * A class to manage a list of servers that failed recently. */ @@ -242,24 +251,28 @@ public class HBaseClient { */ CellScanner cells; Message response; // value, null if error + // The return type. Used to create shell into which we deserialize the response if any. + Message responseDefaultType; IOException error; // exception, null if value boolean done; // true when call is done long startTime; - final Method method; + final MethodDescriptor md; - protected Call(final Method method, Message param, final CellScanner cells) { + protected Call(final MethodDescriptor md, Message param, final CellScanner cells, + final Message responseDefaultType) { this.param = param; - this.method = method; + this.md = md; this.cells = cells; this.startTime = System.currentTimeMillis(); - synchronized (HBaseClient.this) { + this.responseDefaultType = responseDefaultType; + synchronized (RpcClient.this) { this.id = counter++; } } @Override public String toString() { - return "callId: " + this.id + " methodName: " + this.method.getName() + " param {" + + return "callId: " + this.id + " methodName: " + this.md.getName() + " param {" + (this.param != null? TextFormat.shortDebugString(this.param): "") + "}"; } @@ -275,18 +288,19 @@ public class HBaseClient { * * @param error exception thrown by the call; either local or remote */ - public synchronized void setException(IOException error) { + public void setException(IOException error) { this.error = error; callComplete(); } - /** Set the return value when there is no error. + /** + * Set the return value when there is no error. * Notify the caller the call is done. * * @param response return value of the call. * @param cells Can be null */ - public synchronized void setResponse(Message response, final CellScanner cells) { + public void setResponse(Message response, final CellScanner cells) { this.response = response; this.cells = cells; callComplete(); @@ -297,10 +311,11 @@ public class HBaseClient { } } - protected final static Map> tokenHandlers = - new HashMap>(); + protected final static Map> tokenHandlers = + new HashMap>(); static { - tokenHandlers.put(AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.toString(), + tokenHandlers.put(AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN, new AuthenticationTokenSelector()); } @@ -352,33 +367,30 @@ public class HBaseClient { this.compressor = compressor; UserGroupInformation ticket = remoteId.getTicket().getUGI(); - Class protocol = remoteId.getProtocol(); + SecurityInfo securityInfo = SecurityInfo.getInfo(remoteId.getServiceName()); this.useSasl = User.isHBaseSecurityEnabled(conf); - if (useSasl && protocol != null) { - TokenInfo tokenInfo = protocol.getAnnotation(TokenInfo.class); - if (tokenInfo != null) { + if (useSasl && securityInfo != null) { + AuthenticationProtos.TokenIdentifier.Kind tokenKind = securityInfo.getTokenKind(); + if (tokenKind != null) { TokenSelector tokenSelector = - tokenHandlers.get(tokenInfo.value()); + tokenHandlers.get(tokenKind); if (tokenSelector != null) { token = tokenSelector.selectToken(new Text(clusterId), ticket.getTokens()); } else if (LOG.isDebugEnabled()) { - LOG.debug("No token selector found for type "+tokenInfo.value()); + LOG.debug("No token selector found for type "+tokenKind); } } - KerberosInfo krbInfo = protocol.getAnnotation(KerberosInfo.class); - if (krbInfo != null) { - String serverKey = krbInfo.serverPrincipal(); - if (serverKey == null) { - throw new IOException( - "Can't obtain server Kerberos config key from KerberosInfo"); - } - serverPrincipal = SecurityUtil.getServerPrincipal( - conf.get(serverKey), server.getAddress().getCanonicalHostName().toLowerCase()); - if (LOG.isDebugEnabled()) { - LOG.debug("RPC Server Kerberos principal name for protocol=" - + protocol.getCanonicalName() + " is " + serverPrincipal); - } + String serverKey = securityInfo.getServerPrincipal(); + if (serverKey == null) { + throw new IOException( + "Can't obtain server Kerberos config key from SecurityInfo"); + } + serverPrincipal = SecurityUtil.getServerPrincipal( + conf.get(serverKey), server.getAddress().getCanonicalHostName().toLowerCase()); + if (LOG.isDebugEnabled()) { + LOG.debug("RPC Server Kerberos principal name for service=" + + remoteId.getServiceName() + " is " + serverPrincipal); } } @@ -391,14 +403,14 @@ public class HBaseClient { } if (LOG.isDebugEnabled()) { - LOG.debug("Use " + authMethod + " authentication for protocol " - + (protocol == null ? "null" : protocol.getSimpleName())); + LOG.debug("Use " + authMethod + " authentication for service " + remoteId.serviceName + + ", sasl=" + useSasl); } reloginMaxBackoff = conf.getInt("hbase.security.relogin.maxbackoff", 5000); this.remoteId = remoteId; ConnectionHeader.Builder builder = ConnectionHeader.newBuilder(); - builder.setProtocol(protocol == null ? "" : protocol.getName()); + builder.setServiceName(remoteId.getServiceName()); UserInformation userInfoPB; if ((userInfoPB = getUserInfo(ticket)) != null) { builder.setUserInfo(userInfoPB); @@ -464,7 +476,9 @@ public class HBaseClient { } } else { calls.put(call.id, call); - notify(); + synchronized (call) { + notify(); + } } } @@ -483,8 +497,7 @@ public class HBaseClient { * otherwise, throw the timeout exception. */ private void handleTimeout(SocketTimeoutException e) throws IOException { - if (shouldCloseConnection.get() || !running.get() || - remoteId.rpcTimeout > 0) { + if (shouldCloseConnection.get() || !running.get() || remoteId.rpcTimeout > 0) { throw e; } sendPing(); @@ -580,8 +593,8 @@ public class HBaseClient { * @param ioe failure reason * @throws IOException if max number of retries is reached */ - private void handleConnectionFailure( - int curRetries, int maxRetries, IOException ioe) throws IOException { + private void handleConnectionFailure(int curRetries, int maxRetries, IOException ioe) + throws IOException { closeConnection(); @@ -657,7 +670,7 @@ public class HBaseClient { } try { - while (waitForWork()) {//wait here for work - read or close connection + while (waitForWork()) { // Wait here for work - read or close connection readResponse(); } } catch (Throwable t) { @@ -772,7 +785,7 @@ public class HBaseClient { } protected synchronized void setupIOstreams() - throws IOException, InterruptedException { + throws IOException, InterruptedException { if (socket != null || shouldCloseConnection.get()) { return; } @@ -811,19 +824,15 @@ public class HBaseClient { ticket = ticket.getRealUser(); } } - boolean continueSasl; + boolean continueSasl = false; + if (ticket == null) throw new FatalConnectionException("ticket/user is null"); try { - if (ticket == null) { - throw new NullPointerException("ticket is null"); - } else { - continueSasl = - ticket.doAs(new PrivilegedExceptionAction() { - @Override - public Boolean run() throws IOException { - return setupSaslConnection(in2, out2); - } - }); - } + continueSasl = ticket.doAs(new PrivilegedExceptionAction() { + @Override + public Boolean run() throws IOException { + return setupSaslConnection(in2, out2); + } + }); } catch (Exception ex) { if (rand == null) { rand = new Random(); @@ -855,7 +864,7 @@ public class HBaseClient { } } catch (Throwable t) { failedServers.addToFailedServers(remoteId.address); - IOException e; + IOException e = null; if (t instanceof IOException) { e = (IOException)t; markClosed(e); @@ -891,9 +900,11 @@ public class HBaseClient { * Out is not synchronized because only the first thread does this. */ private void writeConnectionHeader() throws IOException { - this.out.writeInt(this.header.getSerializedSize()); - this.header.writeTo(this.out); - this.out.flush(); + synchronized (this.out) { + this.out.writeInt(this.header.getSerializedSize()); + this.header.writeTo(this.out); + this.out.flush(); + } } /** Close the connection. */ @@ -913,7 +924,9 @@ public class HBaseClient { // close the streams and therefore the socket IOUtils.closeStream(out); + this.out = null; IOUtils.closeStream(in); + this.in = null; disposeSasl(); // clean up all calls @@ -930,7 +943,7 @@ public class HBaseClient { // log the info if (LOG.isDebugEnabled()) { LOG.debug(getName() + ": closing ipc connection to " + server + ": " + - closeException.getMessage(),closeException); + closeException.getMessage(), closeException); } // cleanup calls @@ -957,7 +970,7 @@ public class HBaseClient { builder.setTraceInfo(RPCTInfo.newBuilder(). setParentId(s.getSpanId()).setTraceId(s.getTraceId())); } - builder.setMethodName(call.method.getName()); + builder.setMethodName(call.md.getName()); builder.setRequestParam(call.param != null); ByteBuffer cellBlock = ipcUtil.buildCellBlock(this.codec, this.compressor, call.cells); if (cellBlock != null) { @@ -970,8 +983,8 @@ public class HBaseClient { synchronized (this.out) { // FindBugs IS2_INCONSISTENT_SYNC IPCUtil.write(this.out, header, call.param, cellBlock); } - if (LOG.isTraceEnabled()) { - LOG.trace(getName() + ": wrote request header " + TextFormat.shortDebugString(header)); + if (LOG.isDebugEnabled()) { + LOG.debug(getName() + ": wrote request header " + TextFormat.shortDebugString(header)); } } catch(IOException e) { markClosed(e); @@ -984,20 +997,32 @@ public class HBaseClient { protected void readResponse() { if (shouldCloseConnection.get()) return; touch(); + int totalSize = -1; try { // See HBaseServer.Call.setResponse for where we write out the response. - // Total size of the response. Unused. But have to read it in anyways. - /*int totalSize =*/ in.readInt(); + totalSize = in.readInt(); // Read the header ResponseHeader responseHeader = ResponseHeader.parseDelimitedFrom(in); int id = responseHeader.getCallId(); if (LOG.isDebugEnabled()) { LOG.debug(getName() + ": got response header " + - TextFormat.shortDebugString(responseHeader)); + TextFormat.shortDebugString(responseHeader) + ", totalSize: " + totalSize + " bytes"); } Call call = calls.get(id); + if (call == null) { + // So we got a response for which we have no corresponding 'call' here on the client-side. + // We probably timed out waiting, cleaned up all references, and now the server decides + // to return a response. There is nothing we can do w/ the response at this stage. Clean + // out the wire of the response so its out of the way and we can get other responses on + // this connection. + int readSoFar = IPCUtil.getTotalSizeWhenWrittenDelimited(responseHeader); + int whatIsLeftToRead = totalSize - readSoFar; + LOG.debug("Unknown callId: " + id + ", skipping over this response of " + + whatIsLeftToRead + " bytes"); + IOUtils.skipFully(in, whatIsLeftToRead); + } if (responseHeader.hasException()) { ExceptionResponse exceptionResponse = responseHeader.getException(); RemoteException re = createRemoteException(exceptionResponse); @@ -1007,20 +1032,10 @@ public class HBaseClient { if (call != null) call.setException(re); } } else { - Message rpcResponseType = null; - if (call != null){ - try { - // TODO: Why pb engine pollution in here in this class? FIX. - rpcResponseType = - ProtobufRpcClientEngine.Invoker.getReturnProtoType( - reflectionCache.getMethod(remoteId.getProtocol(), call.method.getName())); - } catch (Exception e) { - throw new RuntimeException(e); //local exception - } - } Message value = null; - if (rpcResponseType != null) { - Builder builder = rpcResponseType.newBuilderForType(); + // Call may be null because it may have timedout and been cleaned up on this side already + if (call != null && call.responseDefaultType != null) { + Builder builder = call.responseDefaultType.newBuilderForType(); builder.mergeDelimitedFrom(in); value = builder.build(); } @@ -1028,7 +1043,7 @@ public class HBaseClient { if (responseHeader.hasCellBlockMeta()) { int size = responseHeader.getCellBlockMeta().getLength(); byte [] cellBlock = new byte[size]; - IPCUtil.readChunked(this.in, cellBlock, 0, size); + IOUtils.readFully(this.in, cellBlock, 0, cellBlock.length); cellBlockScanner = ipcUtil.createCellScanner(this.codec, this.compressor, cellBlock); } // it's possible that this call may have been cleaned up due to a RPC @@ -1043,8 +1058,7 @@ public class HBaseClient { // {@link ConnectionId#rpcTimeout}. closeException = e; } else { - // Since the server did not respond within the default ping interval - // time, treat this as a fatal condition and close this connection + // Treat this as a fatal condition and close this connection markClosed(e); } } finally { @@ -1146,31 +1160,42 @@ public class HBaseClient { } /** - * Construct an IPC client whose values are of the {@link Message} - * class. + * Construct an IPC cluster client whose values are of the {@link Message} class. * @param conf configuration * @param factory socket factory */ - public HBaseClient(Configuration conf, String clusterId, SocketFactory factory) { - this.maxIdleTime = - conf.getInt("hbase.ipc.client.connection.maxidletime", 10000); //10s + RpcClient(Configuration conf, String clusterId, SocketFactory factory) { + this.maxIdleTime = conf.getInt("hbase.ipc.client.connection.maxidletime", 10000); //10s this.maxRetries = conf.getInt("hbase.ipc.client.connect.max.retries", 0); this.failureSleep = conf.getInt("hbase.client.pause", 1000); this.tcpNoDelay = conf.getBoolean("hbase.ipc.client.tcpnodelay", true); this.tcpKeepAlive = conf.getBoolean("hbase.ipc.client.tcpkeepalive", true); this.pingInterval = getPingInterval(conf); - if (LOG.isDebugEnabled()) { - LOG.debug("Ping interval: " + this.pingInterval + "ms."); - } this.ipcUtil = new IPCUtil(conf); this.conf = conf; this.codec = getCodec(conf); this.compressor = getCompressor(conf); this.socketFactory = factory; this.clusterId = clusterId != null ? clusterId : HConstants.CLUSTER_ID_DEFAULT; - this.connections = new PoolMap( - getPoolType(conf), getPoolSize(conf)); + this.connections = new PoolMap(getPoolType(conf), getPoolSize(conf)); this.failedServers = new FailedServers(conf); + if (LOG.isDebugEnabled()) { + LOG.debug("Codec=" + this.codec + ", compressor=" + this.compressor + + ", tcpKeepAlive=" + this.tcpKeepAlive + + ", tcpNoDelay=" + this.tcpNoDelay + + ", maxIdleTime=" + this.maxIdleTime + + ", maxRetries=" + this.maxRetries + + ", ping interval=" + this.pingInterval + "ms."); + } + } + + /** + * Construct an IPC client for the cluster clusterId with the default SocketFactory + * @param conf configuration + * @param clusterId + */ + public RpcClient(Configuration conf, String clusterId) { + this(conf, clusterId, NetUtils.getDefaultSocketFactory(conf)); } /** @@ -1202,14 +1227,6 @@ public class HBaseClient { } } - /** - * Construct an IPC client with the default SocketFactory - * @param conf configuration - */ - public HBaseClient(Configuration conf, String clusterId) { - this(conf, clusterId, NetUtils.getDefaultSocketFactory(conf)); - } - /** * Return the pool type specified in the configuration, which must be set to * either {@link PoolType#RoundRobin} or {@link PoolType#ThreadLocal}, @@ -1217,7 +1234,7 @@ public class HBaseClient { * * For applications with many user threads, use a small round-robin pool. For * applications with few user threads, you may want to try using a - * thread-local pool. In any case, the number of {@link HBaseClient} instances + * thread-local pool. In any case, the number of {@link RpcClient} instances * should not exceed the operating system's hard limit on the number of * connections. * @@ -1252,13 +1269,8 @@ public class HBaseClient { /** Stop all threads related to this client. No further calls may be made * using this client. */ public void stop() { - if (LOG.isDebugEnabled()) { - LOG.debug("Stopping client"); - } - - if (!running.compareAndSet(true, false)) { - return; - } + if (LOG.isDebugEnabled()) LOG.debug("Stopping rpc client"); + if (!running.compareAndSet(true, false)) return; // wake up all connections synchronized (connections) { @@ -1281,11 +1293,11 @@ public class HBaseClient { * with the ticket credentials, returning the value. * Throws exceptions if there are network problems or if the remote code * threw an exception. - * @param method + * @param md * @param param * @param cells * @param addr - * @param protocol + * @param returnType * @param ticket Be careful which ticket you pass. A new user will mean a new Connection. * {@link User#getCurrent()} makes a new instance of User each time so will be a new Connection * each time. @@ -1294,12 +1306,13 @@ public class HBaseClient { * @throws InterruptedException * @throws IOException */ - public Pair call(Method method, Message param, CellScanner cells, - InetSocketAddress addr, Class protocol, User ticket, int rpcTimeout) - throws InterruptedException, IOException { - Call call = new Call(method, param, cells); + Pair call(MethodDescriptor md, Message param, CellScanner cells, + Message returnType, User ticket, InetSocketAddress addr, + int rpcTimeout) + throws InterruptedException, IOException { + Call call = new Call(md, param, cells, returnType); Connection connection = - getConnection(addr, protocol, ticket, rpcTimeout, call, this.codec, this.compressor); + getConnection(ticket, call, addr, rpcTimeout, this.codec, this.compressor); connection.writeRequest(call); // send the parameter boolean interrupted = false; //noinspection SynchronizationOnLocalVariableOrMethodParameter @@ -1347,11 +1360,10 @@ public class HBaseClient { if (exception instanceof ConnectException) { //connection refused; include the host:port in the error return (ConnectException)new ConnectException( - "Call to " + addr + " failed on connection exception: " + exception) - .initCause(exception); + "Call to " + addr + " failed on connection exception: " + exception).initCause(exception); } else if (exception instanceof SocketTimeoutException) { return (SocketTimeoutException)new SocketTimeoutException("Call to " + addr + - " failed on socket timeout exception: " + exception).initCause(exception); + " failed because " + exception).initCause(exception); } else { return (IOException)new IOException("Call to " + addr + " failed on local exception: " + exception).initCause(exception); @@ -1372,14 +1384,11 @@ public class HBaseClient { if (connection.isAlive() && connection.getRemoteAddress().getPort() == port && connection.getRemoteAddress().getHostName().equals(hostname)) { - if (connection.shouldCloseConnection.compareAndSet(false, true)) { - LOG.info("The server on " + hostname + ":" + port + - " is dead - closing the connection " + connection.remoteId); - connection.closeException = ioe; - connection.close(); - // We could do a connection.interrupt(), but it's safer not to do it, as the - // interrupted exception behavior is not defined nor enforced enough. - } + LOG.info("The server on " + hostname + ":" + port + + " is dead - stopping the connection " + connection.remoteId); + connection.closeConnection(); + // We could do a connection.interrupt(), but it's safer not to do it, as the + // interrupted exception behavior is not defined nor enforced enough. } } } @@ -1387,19 +1396,13 @@ public class HBaseClient { /* Get a connection from the pool, or create a new one and add it to the * pool. Connections to a given host/port are reused. */ - protected Connection getConnection(InetSocketAddress addr, Class protocol, - User ticket, int rpcTimeout, Call call, final Codec codec, final CompressionCodec compressor) + protected Connection getConnection(User ticket, Call call, InetSocketAddress addr, + int rpcTimeout, final Codec codec, final CompressionCodec compressor) throws IOException, InterruptedException { - if (!running.get()) { - // the client is stopped - throw new IOException("The client is stopped"); - } + if (!running.get()) throw new StoppedRpcClientException(); Connection connection; - /* we could avoid this allocation for each RPC by having a - * connectionsId object and with set() method. We need to manage the - * refs for keys in HashMap properly. For now its ok. - */ - ConnectionId remoteId = new ConnectionId(addr, protocol, ticket, rpcTimeout); + ConnectionId remoteId = + new ConnectionId(ticket, call.md.getService().getName(), addr, rpcTimeout); synchronized (connections) { connection = connections.get(remoteId); if (connection == null) { @@ -1421,40 +1424,41 @@ public class HBaseClient { } /** - * This class holds the address and the user ticket. The client connections - * to servers are uniquely identified by + * This class holds the address and the user ticket, etc. The client connections + * to servers are uniquely identified by */ protected static class ConnectionId { final InetSocketAddress address; final User ticket; final int rpcTimeout; - Class protocol; private static final int PRIME = 16777619; + final String serviceName; - ConnectionId(InetSocketAddress address, Class protocol, - User ticket, + ConnectionId(User ticket, + String serviceName, + InetSocketAddress address, int rpcTimeout) { - this.protocol = protocol; this.address = address; this.ticket = ticket; this.rpcTimeout = rpcTimeout; + this.serviceName = serviceName; + } + + String getServiceName() { + return this.serviceName; } InetSocketAddress getAddress() { return address; } - Class getProtocol() { - return protocol; - } - User getTicket() { return ticket; } @Override public String toString() { - return this.address.toString() + "/" + this.protocol + "/" + this.ticket + "/" + + return this.address.toString() + "/" + this.serviceName + "/" + this.ticket + "/" + this.rpcTimeout; } @@ -1462,18 +1466,126 @@ public class HBaseClient { public boolean equals(Object obj) { if (obj instanceof ConnectionId) { ConnectionId id = (ConnectionId) obj; - return address.equals(id.address) && protocol == id.protocol && + return address.equals(id.address) && ((ticket != null && ticket.equals(id.ticket)) || - (ticket == id.ticket)) && rpcTimeout == id.rpcTimeout; + (ticket == id.ticket)) && rpcTimeout == id.rpcTimeout && + this.serviceName == id.serviceName; } return false; } @Override // simply use the default Object#hashcode() ? public int hashCode() { - int hashcode = (address.hashCode() + PRIME * (PRIME * System.identityHashCode(protocol) ^ - (ticket == null ? 0 : ticket.hashCode()) )) ^ rpcTimeout; + int hashcode = (address.hashCode() + + PRIME * (PRIME * this.serviceName.hashCode() ^ + (ticket == null ? 0 : ticket.hashCode()) )) ^ + rpcTimeout; return hashcode; } } -} + + public static void setRpcTimeout(int t) { + rpcTimeout.set(t); + } + + public static int getRpcTimeout() { + return rpcTimeout.get(); + } + + public static void resetRpcTimeout() { + rpcTimeout.remove(); + } + + /** Make a blocking call. + * Throws exceptions if there are network problems or if the remote code + * threw an exception. + * @param md + * @param controller + * @param param + * @param returnType + * @param isa + * @param ticket Be careful which ticket you pass. A new user will mean a new Connection. + * {@link User#getCurrent()} makes a new instance of User each time so will be a new Connection + * each time. + * @param rpcTimeout + * @return A pair with the Message response and the Cell data (if any). + * @throws InterruptedException + * @throws IOException + */ + Message callBlockingMethod(MethodDescriptor md, RpcController controller, + Message param, Message returnType, final User ticket, final InetSocketAddress isa, + final int rpcTimeout) + throws ServiceException { + long startTime = 0; + if (LOG.isTraceEnabled()) { + startTime = System.currentTimeMillis(); + } + PayloadCarryingRpcController pcrc = (PayloadCarryingRpcController)controller; + CellScanner cells = null; + if (pcrc != null) { + cells = pcrc.cellScanner(); + // Clear it here so we don't by mistake try and these cells processing results. + pcrc.setCellScanner(null); + } + Pair val = null; + try { + val = call(md, param, cells, returnType, ticket, isa, rpcTimeout); + if (pcrc != null) { + // Shove the results into controller so can be carried across the proxy/pb service void. + if (val.getSecond() != null) pcrc.setCellScanner(val.getSecond()); + } else if (val.getSecond() != null) { + throw new ServiceException("Client dropping data on the floor!"); + } + + if (LOG.isTraceEnabled()) { + long callTime = System.currentTimeMillis() - startTime; + if (LOG.isTraceEnabled()) { + LOG.trace("Call: " + md.getName() + ", callTime: " + callTime + "ms"); + } + } + return val.getFirst(); + } catch (Throwable e) { + throw new ServiceException(e); + } + } + + /** + * Creates a "channel" that can be used by a blocking protobuf service. Useful setting up + * protobuf blocking stubs. + * @param sn + * @param ticket + * @param rpcTimeout + * @return A blocking rpc channel that goes via this rpc client instance. + */ + public BlockingRpcChannel createBlockingRpcChannel(final ServerName sn, + final User ticket, final int rpcTimeout) { + return new BlockingRpcChannelImplementation(this, sn, ticket, rpcTimeout); + } + + /** + * Blocking rpc channel that goes via hbase rpc. + */ + // Public so can be subclassed for tests. + public static class BlockingRpcChannelImplementation implements BlockingRpcChannel { + private final InetSocketAddress isa; + private volatile RpcClient rpcClient; + private final int rpcTimeout; + private final User ticket; + + protected BlockingRpcChannelImplementation(final RpcClient rpcClient, final ServerName sn, + final User ticket, final int rpcTimeout) { + this.isa = new InetSocketAddress(sn.getHostname(), sn.getPort()); + this.rpcClient = rpcClient; + this.rpcTimeout = rpcTimeout; + this.ticket = ticket; + } + + @Override + public Message callBlockingMethod(MethodDescriptor md, RpcController controller, + Message param, Message returnType) + throws ServiceException { + return this.rpcClient.callBlockingMethod(md, controller, param, returnType, this.ticket, + this.isa, this.rpcTimeout); + } + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientEngine.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java similarity index 58% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientEngine.java rename to hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java index 45d58cbf047..c240fd6e4e5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientEngine.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java @@ -1,5 +1,4 @@ /** - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,25 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.IpcProtocol; +import org.apache.hadoop.hbase.exceptions.HBaseIOException; -import java.io.IOException; -import java.net.InetSocketAddress; +public class StoppedRpcClientException extends HBaseIOException { + public StoppedRpcClientException() { + super(); + } -/** An RPC implementation for the client */ -@InterfaceAudience.Private -public interface RpcClientEngine { - /** Construct a client-side proxy object. */ - T getProxy(Class protocol, InetSocketAddress addr, - Configuration conf, int rpcTimeout) throws IOException; - - /** Shutdown this instance */ - void close(); - - public HBaseClient getClient(); + public StoppedRpcClientException(String msg) { + super(msg); + } } \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/TokenInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCellCodecException.java similarity index 58% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/security/TokenInfo.java rename to hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCellCodecException.java index 44b2e024663..476514fc481 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/TokenInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCellCodecException.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,24 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.hadoop.hbase.ipc; -package org.apache.hadoop.hbase.security; +public class UnsupportedCellCodecException extends FatalConnectionException { + public UnsupportedCellCodecException() { + super(); + } -import org.apache.hadoop.classification.InterfaceAudience; + public UnsupportedCellCodecException(String msg) { + super(msg); + } -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Indicates Token related information to be used in authorizing connections - * over a given RPC protocol interface. - */ -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.TYPE) -@InterfaceAudience.Private -public @interface TokenInfo { - /** The type of Token.getKind() to be handled */ - String value(); -} + public UnsupportedCellCodecException(String msg, Throwable t) { + super(msg, t); + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCompressionCodecException.java similarity index 58% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java rename to hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCompressionCodecException.java index 17632bd21d6..bee5e7d25a5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterProtocol.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/UnsupportedCompressionCodecException.java @@ -15,15 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.hadoop.hbase.ipc; -package org.apache.hadoop.hbase; +public class UnsupportedCompressionCodecException extends FatalConnectionException { + public UnsupportedCompressionCodecException() { + super(); + } -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; + public UnsupportedCompressionCodecException(String msg) { + super(msg); + } -/** - * Functions implemented by all the master protocols: e.g. {@link MasterAdminProtocol} - * and {@link MasterMonitorProtocol}. Currently, the only shared method - * {@link #isMasterRunning(com.google.protobuf.RpcController, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)} - * which is used on connection setup to check if the master has been stopped. - */ -public interface MasterProtocol extends IpcProtocol, MasterService.BlockingInterface {} \ No newline at end of file + public UnsupportedCompressionCodecException(String msg, Throwable t) { + super(msg, t); + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/WrongVersionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/WrongVersionException.java new file mode 100644 index 00000000000..a1b92f5811b --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/WrongVersionException.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +public class WrongVersionException extends FatalConnectionException { + public WrongVersionException() { + super(); + } + + public WrongVersionException(String msg) { + super(msg); + } + + public WrongVersionException(String msg, Throwable t) { + super(msg, t); + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 3b826b0fffa..5746cfa0c1c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -46,11 +46,8 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.hadoop.hbase.MasterAdminProtocol; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.Append; -import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; @@ -67,6 +64,7 @@ import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest; @@ -85,6 +83,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest; @@ -105,6 +104,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService; import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.security.access.Permission; @@ -116,6 +116,7 @@ import org.apache.hadoop.hbase.util.DynamicClassLoader; import org.apache.hadoop.hbase.util.Methods; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.io.Text; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.token.Token; import com.google.common.collect.ArrayListMultimap; @@ -222,6 +223,9 @@ public final class ProtobufUtil { if (e == null) { return new IOException(se); } + if (e instanceof RemoteException) { + e = ((RemoteException)e).unwrapRemoteException(); + } return e instanceof IOException ? (IOException) e : new IOException(se); } @@ -1206,7 +1210,7 @@ public final class ProtobufUtil { * @return the result of the Get * @throws IOException */ - public static Result get(final ClientProtocol client, + public static Result get(final ClientService.BlockingInterface client, final byte[] regionName, final Get get) throws IOException { GetRequest request = RequestConverter.buildGetRequest(regionName, get); @@ -1229,7 +1233,7 @@ public final class ProtobufUtil { * @return the row or the closestRowBefore if it doesn't exist * @throws IOException */ - public static Result getRowOrBefore(final ClientProtocol client, + public static Result getRowOrBefore(final ClientService.BlockingInterface client, final byte[] regionName, final byte[] row, final byte[] family) throws IOException { GetRequest request = @@ -1254,7 +1258,7 @@ public final class ProtobufUtil { * @return true if all are loaded * @throws IOException */ - public static boolean bulkLoadHFile(final ClientProtocol client, + public static boolean bulkLoadHFile(final ClientService.BlockingInterface client, final List> familyPaths, final byte[] regionName, boolean assignSeqNum) throws IOException { BulkLoadHFileRequest request = @@ -1268,7 +1272,7 @@ public final class ProtobufUtil { } } - public static CoprocessorServiceResponse execService(final ClientProtocol client, + public static CoprocessorServiceResponse execService(final ClientService.BlockingInterface client, final CoprocessorServiceCall call, final byte[] regionName) throws IOException { CoprocessorServiceRequest request = CoprocessorServiceRequest.newBuilder() .setCall(call).setRegion( @@ -1282,8 +1286,9 @@ public final class ProtobufUtil { } } - public static CoprocessorServiceResponse execService(final MasterAdminProtocol client, - final CoprocessorServiceCall call) throws IOException { + public static CoprocessorServiceResponse execService( + final MasterAdminService.BlockingInterface client, final CoprocessorServiceCall call) + throws IOException { CoprocessorServiceRequest request = CoprocessorServiceRequest.newBuilder() .setCall(call).setRegion( RequestConverter.buildRegionSpecifier(REGION_NAME, HConstants.EMPTY_BYTE_ARRAY)).build(); @@ -1315,7 +1320,7 @@ public final class ProtobufUtil { * @return the retrieved region info * @throws IOException */ - public static HRegionInfo getRegionInfo(final AdminProtocol admin, + public static HRegionInfo getRegionInfo(final AdminService.BlockingInterface admin, final byte[] regionName) throws IOException { try { GetRegionInfoRequest request = @@ -1337,7 +1342,7 @@ public final class ProtobufUtil { * @param transitionInZK * @throws IOException */ - public static void closeRegion(final AdminProtocol admin, + public static void closeRegion(final AdminService.BlockingInterface admin, final byte[] regionName, final boolean transitionInZK) throws IOException { CloseRegionRequest closeRegionRequest = RequestConverter.buildCloseRegionRequest(regionName, transitionInZK); @@ -1358,7 +1363,8 @@ public final class ProtobufUtil { * @return true if the region is closed * @throws IOException */ - public static boolean closeRegion(final AdminProtocol admin, final byte[] regionName, + public static boolean closeRegion(final AdminService.BlockingInterface admin, + final byte[] regionName, final int versionOfClosingNode, final ServerName destinationServer, final boolean transitionInZK) throws IOException { CloseRegionRequest closeRegionRequest = @@ -1379,7 +1385,7 @@ public final class ProtobufUtil { * @param region * @throws IOException */ - public static void openRegion(final AdminProtocol admin, + public static void openRegion(final AdminService.BlockingInterface admin, final HRegionInfo region) throws IOException { OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(region, -1); @@ -1398,7 +1404,8 @@ public final class ProtobufUtil { * @return a list of online region info * @throws IOException */ - public static List getOnlineRegions(final AdminProtocol admin) throws IOException { + public static List getOnlineRegions(final AdminService.BlockingInterface admin) + throws IOException { GetOnlineRegionRequest request = RequestConverter.buildGetOnlineRegionRequest(); GetOnlineRegionResponse response = null; try { @@ -1431,8 +1438,8 @@ public final class ProtobufUtil { * @return the server name * @throws IOException */ - public static ServerInfo getServerInfo( - final AdminProtocol admin) throws IOException { + public static ServerInfo getServerInfo(final AdminService.BlockingInterface admin) + throws IOException { GetServerInfoRequest request = RequestConverter.buildGetServerInfoRequest(); try { GetServerInfoResponse response = admin.getServerInfo(null, request); @@ -1452,8 +1459,9 @@ public final class ProtobufUtil { * @return the list of store files * @throws IOException */ - public static List getStoreFiles(final AdminProtocol admin, - final byte[] regionName, final byte[] family) throws IOException { + public static List getStoreFiles(final AdminService.BlockingInterface admin, + final byte[] regionName, final byte[] family) + throws IOException { GetStoreFileRequest request = RequestConverter.buildGetStoreFileRequest(regionName, family); try { @@ -1472,7 +1480,7 @@ public final class ProtobufUtil { * @param splitPoint * @throws IOException */ - public static void split(final AdminProtocol admin, + public static void split(final AdminService.BlockingInterface admin, final HRegionInfo hri, byte[] splitPoint) throws IOException { SplitRegionRequest request = RequestConverter.buildSplitRegionRequest(hri.getRegionName(), splitPoint); @@ -1493,7 +1501,7 @@ public final class ProtobufUtil { * two adjacent regions * @throws IOException */ - public static void mergeRegions(final AdminProtocol admin, + public static void mergeRegions(final AdminService.BlockingInterface admin, final HRegionInfo region_a, final HRegionInfo region_b, final boolean forcible) throws IOException { MergeRegionsRequest request = RequestConverter.buildMergeRegionsRequest( diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/KerberosInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/KerberosInfo.java deleted file mode 100644 index 5a15e7c3717..00000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/KerberosInfo.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.security; - -import org.apache.hadoop.classification.InterfaceAudience; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Indicates Kerberos related information to be used for authorizing connections - * over a given RPC protocol interface. - */ -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.TYPE) -@InterfaceAudience.Private -public @interface KerberosInfo { - /** Key for getting server's Kerberos principal name from Configuration */ - String serverPrincipal(); - String clientPrincipal() default ""; -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java new file mode 100644 index 00000000000..8b61181de31 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.security; + +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.TokenIdentifier.Kind; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * Maps RPC protocol interfaces to required configuration + */ +public class SecurityInfo { + /** Maps RPC service names to authentication information */ + private static ConcurrentMap infos = new ConcurrentHashMap(); + // populate info for known services + static { + infos.put(AdminProtos.AdminService.getDescriptor().getName(), + new SecurityInfo("hbase.regionserver.kerberos.principal", Kind.HBASE_AUTH_TOKEN)); + infos.put(ClientProtos.ClientService.getDescriptor().getName(), + new SecurityInfo("hbase.regionserver.kerberos.principal", Kind.HBASE_AUTH_TOKEN)); + infos.put(MasterAdminProtos.MasterAdminService.getDescriptor().getName(), + new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN)); + infos.put(MasterMonitorProtos.MasterMonitorService.getDescriptor().getName(), + new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN)); + infos.put(RegionServerStatusProtos.RegionServerStatusService.getDescriptor().getName(), + new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN)); + } + + /** + * Adds a security configuration for a new service name. Note that this will have no effect if + * the service name was already registered. + */ + public static void addInfo(String serviceName, SecurityInfo securityInfo) { + infos.putIfAbsent(serviceName, securityInfo); + } + + /** + * Returns the security configuration associated with the given service name. + */ + public static SecurityInfo getInfo(String serviceName) { + return infos.get(serviceName); + } + + private final String serverPrincipal; + private final Kind tokenKind; + + public SecurityInfo(String serverPrincipal, Kind tokenKind) { + this.serverPrincipal = serverPrincipal; + this.tokenKind = tokenKind; + } + + public String getServerPrincipal() { + return serverPrincipal; + } + + public Kind getTokenKind() { + return tokenKind; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java index 013bcf259a9..89da357725f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java @@ -116,7 +116,8 @@ public class RecoverableZooKeeper { // the identifier = processID@hostName identifier = ManagementFactory.getRuntimeMXBean().getName(); } - LOG.info("The identifier of this process is " + identifier); + LOG.info("Process identifier=" + identifier + + " connecting to ZooKeeper ensemble=" + quorumServers); this.identifier = identifier; this.id = Bytes.toBytes(identifier); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java index 6de3b6919e3..871e667fed5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java @@ -56,8 +56,7 @@ public class ZKConfig { * @return Properties holding mappings representing ZooKeeper config file. */ public static Properties makeZKProps(Configuration conf) { - if (conf.getBoolean(HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG, - false)) { + if (conf.getBoolean(HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG, false)) { LOG.warn( "Parsing ZooKeeper's " + HConstants.ZOOKEEPER_CONFIG_NAME + " file for ZK properties " + @@ -80,12 +79,9 @@ public class ZKConfig { } } } else { - if (LOG.isDebugEnabled()) { - LOG.debug( - "Skipped reading ZK properties file '" + - HConstants.ZOOKEEPER_CONFIG_NAME + - "' since '" + HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG + - "' was not set to true"); + if (LOG.isTraceEnabled()) { + LOG.trace("Skipped reading ZK properties file '" + HConstants.ZOOKEEPER_CONFIG_NAME + + "' since '" + HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG + "' was not set to true"); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index 4cecb2f8480..d2f0d043a46 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -116,8 +116,9 @@ public class ZKUtil { } int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT); - LOG.debug(identifier + " opening connection to ZooKeeper with ensemble (" + - ensemble + ")"); + if (LOG.isTraceEnabled()) { + LOG.debug(identifier + " opening connection to ZooKeeper ensemble=" + ensemble); + } int retry = conf.getInt("zookeeper.recovery.retry", 3); int retryIntervalMillis = conf.getInt("zookeeper.recovery.retry.intervalmill", 1000); @@ -419,9 +420,9 @@ public class ZKUtil { Stat s = zkw.getRecoverableZooKeeper().exists(znode, zkw); boolean exists = s != null ? true : false; if (exists) { - LOG.debug(zkw.prefix("Set watcher on existing znode " + znode)); + LOG.debug(zkw.prefix("Set watcher on existing znode=" + znode)); } else { - LOG.debug(zkw.prefix(znode+" does not exist. Watcher is set.")); + LOG.debug(zkw.prefix("Set watcher on znode that does not yet exist, " + znode)); } return exists; } catch (KeeperException e) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java index 29913551879..07b6001c057 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java @@ -78,7 +78,7 @@ public class TestSnapshotFromAdmin { // mock the master admin to our mock MasterAdminKeepAliveConnection mockMaster = Mockito.mock(MasterAdminKeepAliveConnection.class); Mockito.when(mockConnection.getConfiguration()).thenReturn(conf); - Mockito.when(mockConnection.getKeepAliveMasterAdmin()).thenReturn(mockMaster); + Mockito.when(mockConnection.getKeepAliveMasterAdminService()).thenReturn(mockMaster); // set the max wait time for the snapshot to complete TakeSnapshotResponse response = TakeSnapshotResponse.newBuilder() .setExpectedTimeout(maxWaitTime) @@ -135,7 +135,7 @@ public class TestSnapshotFromAdmin { // mock the master connection MasterAdminKeepAliveConnection master = Mockito.mock(MasterAdminKeepAliveConnection.class); - Mockito.when(mockConnection.getKeepAliveMasterAdmin()).thenReturn(master); + Mockito.when(mockConnection.getKeepAliveMasterAdminService()).thenReturn(master); TakeSnapshotResponse response = TakeSnapshotResponse.newBuilder().setExpectedTimeout(0).build(); Mockito.when( master.snapshot((RpcController) Mockito.isNull(), Mockito.any(TakeSnapshotRequest.class))) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index faf12e4fede..f2762d0995c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -572,17 +572,6 @@ public final class HConstants { */ public static int DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 10; - /** - * Parameter name for maximum attempts, used to limit the number of times the - * client will try to obtain the proxy for a given region server. - */ - public static String HBASE_CLIENT_RPC_MAXATTEMPTS = "hbase.client.rpc.maxattempts"; - - /** - * Default value of {@link #HBASE_CLIENT_RPC_MAXATTEMPTS}. - */ - public static int DEFAULT_HBASE_CLIENT_RPC_MAXATTEMPTS = 1; - /** * Parameter name for client prefetch limit, used as the maximum number of regions * info that will be prefetched. diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java index 88cd1695a83..84d7f374812 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java @@ -22,15 +22,14 @@ import java.io.File; import java.io.FileFilter; import java.io.FileInputStream; import java.io.IOException; -import java.lang.reflect.Method; -import java.lang.reflect.Modifier; import java.net.URL; import java.util.ArrayList; import java.util.Enumeration; import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.jar.*; +import java.util.jar.JarEntry; +import java.util.jar.JarInputStream; import java.util.regex.Matcher; import java.util.regex.Pattern; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java index 639b12618c1..02aa504adfe 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java @@ -23,15 +23,17 @@ import java.util.HashMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterManager.ServiceType; -import org.apache.hadoop.hbase.client.AdminProtocol; -import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.exceptions.MasterNotRunningException; import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; @@ -86,12 +88,14 @@ public class DistributedHBaseCluster extends HBaseCluster { } @Override - public AdminProtocol getAdminProtocol(ServerName serverName) throws IOException { + public AdminProtos.AdminService.BlockingInterface getAdminProtocol(ServerName serverName) + throws IOException { return admin.getConnection().getAdmin(serverName); } @Override - public ClientProtocol getClientProtocol(ServerName serverName) throws IOException { + public ClientProtos.ClientService.BlockingInterface getClientProtocol(ServerName serverName) + throws IOException { return admin.getConnection().getClient(serverName); } @@ -133,13 +137,15 @@ public class DistributedHBaseCluster extends HBaseCluster { } @Override - public MasterAdminProtocol getMasterAdmin() throws IOException { + public MasterAdminProtos.MasterAdminService.BlockingInterface getMasterAdmin() + throws IOException { HConnection conn = HConnectionManager.getConnection(conf); return conn.getMasterAdmin(); } @Override - public MasterMonitorProtocol getMasterMonitor() throws IOException { + public MasterMonitorProtos.MasterMonitorService.BlockingInterface getMasterMonitor() + throws IOException { HConnection conn = HConnectionManager.getConnection(conf); return conn.getMasterMonitor(); } @@ -195,7 +201,8 @@ public class DistributedHBaseCluster extends HBaseCluster { return null; } - AdminProtocol client = connection.getAdmin(regionLoc.getServerName()); + AdminProtos.AdminService.BlockingInterface client = + connection.getAdmin(regionLoc.getServerName()); ServerInfo info = ProtobufUtil.getServerInfo(client); return ProtobufUtil.toServerName(info.getServerName()); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRebalanceAndKillServersTargeted.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRebalanceAndKillServersTargeted.java index 6cc209b3e51..1f4c2793b86 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRebalanceAndKillServersTargeted.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRebalanceAndKillServersTargeted.java @@ -103,7 +103,7 @@ public class IntegrationTestRebalanceAndKillServersTargeted extends IngestIntegr @SuppressWarnings("unchecked") public void setUp() throws Exception { Configuration conf = HBaseConfiguration.create(); - conf.set(HConnectionManager.RETRIES_BY_SERVER, "true"); + conf.set(HConnectionManager.RETRIES_BY_SERVER_KEY, "true"); super.setUp(NUM_SLAVES_BASE, conf); ChaosMonkey.Policy chaosPolicy = new ChaosMonkey.PeriodicRandomActionPolicy( diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestsDriver.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestsDriver.java index 65650ea4bfd..c851b20a9d9 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestsDriver.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestsDriver.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; -import java.util.List; import java.util.Set; import java.util.regex.Pattern; @@ -113,5 +112,4 @@ public class IntegrationTestsDriver extends AbstractHBaseTool { return result.wasSuccessful() ? 0 : 1; } - } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java index 388c12c71e5..f2c6ed3c489 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java @@ -20646,6 +20646,11 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest request, com.google.protobuf.RpcCallback done); + public abstract void isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -20867,6 +20872,14 @@ public final class MasterAdminProtos { impl.isRestoreSnapshotDone(controller, request, done); } + @java.lang.Override + public void isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, + com.google.protobuf.RpcCallback done) { + impl.isMasterRunning(controller, request, done); + } + }; } @@ -20943,6 +20956,8 @@ public final class MasterAdminProtos { return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest)request); case 26: return impl.isRestoreSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest)request); + case 27: + return impl.isMasterRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -21011,6 +21026,8 @@ public final class MasterAdminProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest.getDefaultInstance(); case 26: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest.getDefaultInstance(); + case 27: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -21079,6 +21096,8 @@ public final class MasterAdminProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse.getDefaultInstance(); case 26: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance(); + case 27: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -21222,6 +21241,11 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest request, com.google.protobuf.RpcCallback done); + public abstract void isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -21379,6 +21403,11 @@ public final class MasterAdminProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 27: + this.isMasterRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -21447,6 +21476,8 @@ public final class MasterAdminProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest.getDefaultInstance(); case 26: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest.getDefaultInstance(); + case 27: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -21515,6 +21546,8 @@ public final class MasterAdminProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse.getDefaultInstance(); case 26: return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance(); + case 27: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -21940,6 +21973,21 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance())); } + + public void isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(27), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -22082,6 +22130,11 @@ public final class MasterAdminProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -22414,6 +22467,18 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(27), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance()); + } + } } @@ -22686,114 +22751,117 @@ public final class MasterAdminProtos { descriptor; static { java.lang.String[] descriptorData = { - "\n\021MasterAdmin.proto\032\013hbase.proto\032\014Client" + - ".proto\"R\n\020AddColumnRequest\022\021\n\ttableName\030" + - "\001 \002(\014\022+\n\016columnFamilies\030\002 \002(\0132\023.ColumnFa" + - "milySchema\"\023\n\021AddColumnResponse\"<\n\023Delet" + - "eColumnRequest\022\021\n\ttableName\030\001 \002(\014\022\022\n\ncol" + - "umnName\030\002 \002(\014\"\026\n\024DeleteColumnResponse\"U\n" + - "\023ModifyColumnRequest\022\021\n\ttableName\030\001 \002(\014\022" + - "+\n\016columnFamilies\030\002 \002(\0132\023.ColumnFamilySc" + - "hema\"\026\n\024ModifyColumnResponse\"Z\n\021MoveRegi" + - "onRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecif", - "ier\022#\n\016destServerName\030\002 \001(\0132\013.ServerName" + - "\"\024\n\022MoveRegionResponse\"~\n\035DispatchMergin" + - "gRegionsRequest\022!\n\007regionA\030\001 \002(\0132\020.Regio" + - "nSpecifier\022!\n\007regionB\030\002 \002(\0132\020.RegionSpec" + - "ifier\022\027\n\010forcible\030\003 \001(\010:\005false\" \n\036Dispat" + - "chMergingRegionsResponse\"7\n\023AssignRegion" + - "Request\022 \n\006region\030\001 \002(\0132\020.RegionSpecifie" + - "r\"\026\n\024AssignRegionResponse\"O\n\025UnassignReg" + + "\n\021MasterAdmin.proto\032\014Master.proto\032\013hbase" + + ".proto\032\014Client.proto\"R\n\020AddColumnRequest" + + "\022\021\n\ttableName\030\001 \002(\014\022+\n\016columnFamilies\030\002 " + + "\002(\0132\023.ColumnFamilySchema\"\023\n\021AddColumnRes" + + "ponse\"<\n\023DeleteColumnRequest\022\021\n\ttableNam" + + "e\030\001 \002(\014\022\022\n\ncolumnName\030\002 \002(\014\"\026\n\024DeleteCol" + + "umnResponse\"U\n\023ModifyColumnRequest\022\021\n\tta" + + "bleName\030\001 \002(\014\022+\n\016columnFamilies\030\002 \002(\0132\023." + + "ColumnFamilySchema\"\026\n\024ModifyColumnRespon" + + "se\"Z\n\021MoveRegionRequest\022 \n\006region\030\001 \002(\0132", + "\020.RegionSpecifier\022#\n\016destServerName\030\002 \001(" + + "\0132\013.ServerName\"\024\n\022MoveRegionResponse\"~\n\035" + + "DispatchMergingRegionsRequest\022!\n\007regionA" + + "\030\001 \002(\0132\020.RegionSpecifier\022!\n\007regionB\030\002 \002(" + + "\0132\020.RegionSpecifier\022\027\n\010forcible\030\003 \001(\010:\005f" + + "alse\" \n\036DispatchMergingRegionsResponse\"7" + + "\n\023AssignRegionRequest\022 \n\006region\030\001 \002(\0132\020." + + "RegionSpecifier\"\026\n\024AssignRegionResponse\"" + + "O\n\025UnassignRegionRequest\022 \n\006region\030\001 \002(\013" + + "2\020.RegionSpecifier\022\024\n\005force\030\002 \001(\010:\005false", + "\"\030\n\026UnassignRegionResponse\"8\n\024OfflineReg" + "ionRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpeci" + - "fier\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026UnassignRe", - "gionResponse\"8\n\024OfflineRegionRequest\022 \n\006" + - "region\030\001 \002(\0132\020.RegionSpecifier\"\027\n\025Offlin" + - "eRegionResponse\"J\n\022CreateTableRequest\022!\n" + - "\013tableSchema\030\001 \002(\0132\014.TableSchema\022\021\n\tspli" + - "tKeys\030\002 \003(\014\"\025\n\023CreateTableResponse\"\'\n\022De" + - "leteTableRequest\022\021\n\ttableName\030\001 \002(\014\"\025\n\023D" + - "eleteTableResponse\"\'\n\022EnableTableRequest" + - "\022\021\n\ttableName\030\001 \002(\014\"\025\n\023EnableTableRespon" + - "se\"(\n\023DisableTableRequest\022\021\n\ttableName\030\001" + - " \002(\014\"\026\n\024DisableTableResponse\"J\n\022ModifyTa", - "bleRequest\022\021\n\ttableName\030\001 \002(\014\022!\n\013tableSc" + - "hema\030\002 \002(\0132\014.TableSchema\"\025\n\023ModifyTableR" + - "esponse\"\021\n\017ShutdownRequest\"\022\n\020ShutdownRe" + - "sponse\"\023\n\021StopMasterRequest\"\024\n\022StopMaste" + - "rResponse\"\020\n\016BalanceRequest\"&\n\017BalanceRe" + - "sponse\022\023\n\013balancerRan\030\001 \002(\010\"<\n\031SetBalanc" + - "erRunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchron" + - "ous\030\002 \001(\010\"6\n\032SetBalancerRunningResponse\022" + - "\030\n\020prevBalanceValue\030\001 \001(\010\"\024\n\022CatalogScan" + - "Request\")\n\023CatalogScanResponse\022\022\n\nscanRe", - "sult\030\001 \001(\005\"-\n\033EnableCatalogJanitorReques" + - "t\022\016\n\006enable\030\001 \002(\010\"1\n\034EnableCatalogJanito" + - "rResponse\022\021\n\tprevValue\030\001 \001(\010\" \n\036IsCatalo" + - "gJanitorEnabledRequest\"0\n\037IsCatalogJanit" + - "orEnabledResponse\022\r\n\005value\030\001 \002(\010\"=\n\023Take" + - "SnapshotRequest\022&\n\010snapshot\030\001 \002(\0132\024.Snap" + - "shotDescription\"/\n\024TakeSnapshotResponse\022" + - "\027\n\017expectedTimeout\030\001 \002(\003\"\025\n\023ListSnapshot" + - "Request\"?\n\024ListSnapshotResponse\022\'\n\tsnaps" + - "hots\030\001 \003(\0132\024.SnapshotDescription\"?\n\025Dele", - "teSnapshotRequest\022&\n\010snapshot\030\001 \002(\0132\024.Sn" + - "apshotDescription\"\030\n\026DeleteSnapshotRespo" + - "nse\"@\n\026RestoreSnapshotRequest\022&\n\010snapsho" + - "t\030\001 \002(\0132\024.SnapshotDescription\"\031\n\027Restore" + - "SnapshotResponse\"?\n\025IsSnapshotDoneReques" + - "t\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescriptio" + - "n\"U\n\026IsSnapshotDoneResponse\022\023\n\004done\030\001 \001(" + - "\010:\005false\022&\n\010snapshot\030\002 \001(\0132\024.SnapshotDes" + - "cription\"F\n\034IsRestoreSnapshotDoneRequest" + - "\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescription", - "\"3\n\035IsRestoreSnapshotDoneResponse\022\022\n\004don" + - "e\030\001 \001(\010:\004true2\377\r\n\022MasterAdminService\0222\n\t" + - "addColumn\022\021.AddColumnRequest\032\022.AddColumn" + - "Response\022;\n\014deleteColumn\022\024.DeleteColumnR" + - "equest\032\025.DeleteColumnResponse\022;\n\014modifyC" + - "olumn\022\024.ModifyColumnRequest\032\025.ModifyColu" + - "mnResponse\0225\n\nmoveRegion\022\022.MoveRegionReq" + - "uest\032\023.MoveRegionResponse\022Y\n\026dispatchMer" + - "gingRegions\022\036.DispatchMergingRegionsRequ" + - "est\032\037.DispatchMergingRegionsResponse\022;\n\014", - "assignRegion\022\024.AssignRegionRequest\032\025.Ass" + - "ignRegionResponse\022A\n\016unassignRegion\022\026.Un" + - "assignRegionRequest\032\027.UnassignRegionResp" + - "onse\022>\n\rofflineRegion\022\025.OfflineRegionReq" + - "uest\032\026.OfflineRegionResponse\0228\n\013deleteTa" + - "ble\022\023.DeleteTableRequest\032\024.DeleteTableRe" + - "sponse\0228\n\013enableTable\022\023.EnableTableReque" + - "st\032\024.EnableTableResponse\022;\n\014disableTable" + - "\022\024.DisableTableRequest\032\025.DisableTableRes" + - "ponse\0228\n\013modifyTable\022\023.ModifyTableReques", - "t\032\024.ModifyTableResponse\0228\n\013createTable\022\023" + - ".CreateTableRequest\032\024.CreateTableRespons" + - "e\022/\n\010shutdown\022\020.ShutdownRequest\032\021.Shutdo" + - "wnResponse\0225\n\nstopMaster\022\022.StopMasterReq" + - "uest\032\023.StopMasterResponse\022,\n\007balance\022\017.B" + - "alanceRequest\032\020.BalanceResponse\022M\n\022setBa" + - "lancerRunning\022\032.SetBalancerRunningReques" + - "t\032\033.SetBalancerRunningResponse\022;\n\016runCat" + - "alogScan\022\023.CatalogScanRequest\032\024.CatalogS" + - "canResponse\022S\n\024enableCatalogJanitor\022\034.En", - "ableCatalogJanitorRequest\032\035.EnableCatalo" + - "gJanitorResponse\022\\\n\027isCatalogJanitorEnab" + - "led\022\037.IsCatalogJanitorEnabledRequest\032 .I" + - "sCatalogJanitorEnabledResponse\022L\n\021execMa" + - "sterService\022\032.CoprocessorServiceRequest\032" + - "\033.CoprocessorServiceResponse\0227\n\010snapshot" + - "\022\024.TakeSnapshotRequest\032\025.TakeSnapshotRes" + - "ponse\022D\n\025getCompletedSnapshots\022\024.ListSna" + - "pshotRequest\032\025.ListSnapshotResponse\022A\n\016d" + - "eleteSnapshot\022\026.DeleteSnapshotRequest\032\027.", - "DeleteSnapshotResponse\022A\n\016isSnapshotDone" + - "\022\026.IsSnapshotDoneRequest\032\027.IsSnapshotDon" + - "eResponse\022D\n\017restoreSnapshot\022\027.RestoreSn" + - "apshotRequest\032\030.RestoreSnapshotResponse\022" + - "V\n\025isRestoreSnapshotDone\022\035.IsRestoreSnap" + - "shotDoneRequest\032\036.IsRestoreSnapshotDoneR" + - "esponseBG\n*org.apache.hadoop.hbase.proto" + - "buf.generatedB\021MasterAdminProtosH\001\210\001\001\240\001\001" + "fier\"\027\n\025OfflineRegionResponse\"J\n\022CreateT" + + "ableRequest\022!\n\013tableSchema\030\001 \002(\0132\014.Table" + + "Schema\022\021\n\tsplitKeys\030\002 \003(\014\"\025\n\023CreateTable" + + "Response\"\'\n\022DeleteTableRequest\022\021\n\ttableN" + + "ame\030\001 \002(\014\"\025\n\023DeleteTableResponse\"\'\n\022Enab" + + "leTableRequest\022\021\n\ttableName\030\001 \002(\014\"\025\n\023Ena" + + "bleTableResponse\"(\n\023DisableTableRequest\022" + + "\021\n\ttableName\030\001 \002(\014\"\026\n\024DisableTableRespon", + "se\"J\n\022ModifyTableRequest\022\021\n\ttableName\030\001 " + + "\002(\014\022!\n\013tableSchema\030\002 \002(\0132\014.TableSchema\"\025" + + "\n\023ModifyTableResponse\"\021\n\017ShutdownRequest" + + "\"\022\n\020ShutdownResponse\"\023\n\021StopMasterReques" + + "t\"\024\n\022StopMasterResponse\"\020\n\016BalanceReques" + + "t\"&\n\017BalanceResponse\022\023\n\013balancerRan\030\001 \002(" + + "\010\"<\n\031SetBalancerRunningRequest\022\n\n\002on\030\001 \002" + + "(\010\022\023\n\013synchronous\030\002 \001(\010\"6\n\032SetBalancerRu" + + "nningResponse\022\030\n\020prevBalanceValue\030\001 \001(\010\"" + + "\024\n\022CatalogScanRequest\")\n\023CatalogScanResp", + "onse\022\022\n\nscanResult\030\001 \001(\005\"-\n\033EnableCatalo" + + "gJanitorRequest\022\016\n\006enable\030\001 \002(\010\"1\n\034Enabl" + + "eCatalogJanitorResponse\022\021\n\tprevValue\030\001 \001" + + "(\010\" \n\036IsCatalogJanitorEnabledRequest\"0\n\037" + + "IsCatalogJanitorEnabledResponse\022\r\n\005value" + + "\030\001 \002(\010\"=\n\023TakeSnapshotRequest\022&\n\010snapsho" + + "t\030\001 \002(\0132\024.SnapshotDescription\"/\n\024TakeSna" + + "pshotResponse\022\027\n\017expectedTimeout\030\001 \002(\003\"\025" + + "\n\023ListSnapshotRequest\"?\n\024ListSnapshotRes" + + "ponse\022\'\n\tsnapshots\030\001 \003(\0132\024.SnapshotDescr", + "iption\"?\n\025DeleteSnapshotRequest\022&\n\010snaps" + + "hot\030\001 \002(\0132\024.SnapshotDescription\"\030\n\026Delet" + + "eSnapshotResponse\"@\n\026RestoreSnapshotRequ" + + "est\022&\n\010snapshot\030\001 \002(\0132\024.SnapshotDescript" + + "ion\"\031\n\027RestoreSnapshotResponse\"?\n\025IsSnap" + + "shotDoneRequest\022&\n\010snapshot\030\001 \001(\0132\024.Snap" + + "shotDescription\"U\n\026IsSnapshotDoneRespons" + + "e\022\023\n\004done\030\001 \001(\010:\005false\022&\n\010snapshot\030\002 \001(\013" + + "2\024.SnapshotDescription\"F\n\034IsRestoreSnaps" + + "hotDoneRequest\022&\n\010snapshot\030\001 \001(\0132\024.Snaps", + "hotDescription\"3\n\035IsRestoreSnapshotDoneR" + + "esponse\022\022\n\004done\030\001 \001(\010:\004true2\305\016\n\022MasterAd" + + "minService\0222\n\taddColumn\022\021.AddColumnReque" + + "st\032\022.AddColumnResponse\022;\n\014deleteColumn\022\024" + + ".DeleteColumnRequest\032\025.DeleteColumnRespo" + + "nse\022;\n\014modifyColumn\022\024.ModifyColumnReques" + + "t\032\025.ModifyColumnResponse\0225\n\nmoveRegion\022\022" + + ".MoveRegionRequest\032\023.MoveRegionResponse\022" + + "Y\n\026dispatchMergingRegions\022\036.DispatchMerg" + + "ingRegionsRequest\032\037.DispatchMergingRegio", + "nsResponse\022;\n\014assignRegion\022\024.AssignRegio" + + "nRequest\032\025.AssignRegionResponse\022A\n\016unass" + + "ignRegion\022\026.UnassignRegionRequest\032\027.Unas" + + "signRegionResponse\022>\n\rofflineRegion\022\025.Of" + + "flineRegionRequest\032\026.OfflineRegionRespon" + + "se\0228\n\013deleteTable\022\023.DeleteTableRequest\032\024" + + ".DeleteTableResponse\0228\n\013enableTable\022\023.En" + + "ableTableRequest\032\024.EnableTableResponse\022;" + + "\n\014disableTable\022\024.DisableTableRequest\032\025.D" + + "isableTableResponse\0228\n\013modifyTable\022\023.Mod", + "ifyTableRequest\032\024.ModifyTableResponse\0228\n" + + "\013createTable\022\023.CreateTableRequest\032\024.Crea" + + "teTableResponse\022/\n\010shutdown\022\020.ShutdownRe" + + "quest\032\021.ShutdownResponse\0225\n\nstopMaster\022\022" + + ".StopMasterRequest\032\023.StopMasterResponse\022" + + ",\n\007balance\022\017.BalanceRequest\032\020.BalanceRes" + + "ponse\022M\n\022setBalancerRunning\022\032.SetBalance" + + "rRunningRequest\032\033.SetBalancerRunningResp" + + "onse\022;\n\016runCatalogScan\022\023.CatalogScanRequ" + + "est\032\024.CatalogScanResponse\022S\n\024enableCatal", + "ogJanitor\022\034.EnableCatalogJanitorRequest\032" + + "\035.EnableCatalogJanitorResponse\022\\\n\027isCata" + + "logJanitorEnabled\022\037.IsCatalogJanitorEnab" + + "ledRequest\032 .IsCatalogJanitorEnabledResp" + + "onse\022L\n\021execMasterService\022\032.CoprocessorS" + + "erviceRequest\032\033.CoprocessorServiceRespon" + + "se\0227\n\010snapshot\022\024.TakeSnapshotRequest\032\025.T" + + "akeSnapshotResponse\022D\n\025getCompletedSnaps" + + "hots\022\024.ListSnapshotRequest\032\025.ListSnapsho" + + "tResponse\022A\n\016deleteSnapshot\022\026.DeleteSnap", + "shotRequest\032\027.DeleteSnapshotResponse\022A\n\016" + + "isSnapshotDone\022\026.IsSnapshotDoneRequest\032\027" + + ".IsSnapshotDoneResponse\022D\n\017restoreSnapsh" + + "ot\022\027.RestoreSnapshotRequest\032\030.RestoreSna" + + "pshotResponse\022V\n\025isRestoreSnapshotDone\022\035" + + ".IsRestoreSnapshotDoneRequest\032\036.IsRestor" + + "eSnapshotDoneResponse\022D\n\017isMasterRunning" + + "\022\027.IsMasterRunningRequest\032\030.IsMasterRunn" + + "ingResponseBG\n*org.apache.hadoop.hbase.p" + + "rotobuf.generatedB\021MasterAdminProtosH\001\210\001", + "\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -23222,6 +23290,7 @@ public final class MasterAdminProtos { com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor(), }, assigner); diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java index 1f36d156caa..c97ef672a63 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java @@ -2632,6 +2632,11 @@ public final class MasterMonitorProtos { org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest request, com.google.protobuf.RpcCallback done); + public abstract void isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -2661,6 +2666,14 @@ public final class MasterMonitorProtos { impl.getClusterStatus(controller, request, done); } + @java.lang.Override + public void isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, + com.google.protobuf.RpcCallback done) { + impl.isMasterRunning(controller, request, done); + } + }; } @@ -2689,6 +2702,8 @@ public final class MasterMonitorProtos { return impl.getTableDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest)request); case 2: return impl.getClusterStatus(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest)request); + case 3: + return impl.isMasterRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -2709,6 +2724,8 @@ public final class MasterMonitorProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest.getDefaultInstance(); case 2: return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -2729,6 +2746,8 @@ public final class MasterMonitorProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse.getDefaultInstance(); case 2: return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -2752,6 +2771,11 @@ public final class MasterMonitorProtos { org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest request, com.google.protobuf.RpcCallback done); + public abstract void isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -2789,6 +2813,11 @@ public final class MasterMonitorProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 3: + this.isMasterRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -2809,6 +2838,8 @@ public final class MasterMonitorProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest.getDefaultInstance(); case 2: return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -2829,6 +2860,8 @@ public final class MasterMonitorProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse.getDefaultInstance(); case 2: return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -2894,6 +2927,21 @@ public final class MasterMonitorProtos { org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse.getDefaultInstance())); } + + public void isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -2916,6 +2964,11 @@ public final class MasterMonitorProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -2960,6 +3013,18 @@ public final class MasterMonitorProtos { org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance()); + } + } } @@ -3002,25 +3067,27 @@ public final class MasterMonitorProtos { descriptor; static { java.lang.String[] descriptorData = { - "\n\023MasterMonitor.proto\032\013hbase.proto\032\023Clus" + - "terStatus.proto\"0\n\033GetSchemaAlterStatusR" + - "equest\022\021\n\ttableName\030\001 \002(\014\"P\n\034GetSchemaAl" + - "terStatusResponse\022\032\n\022yetToUpdateRegions\030" + - "\001 \001(\r\022\024\n\014totalRegions\030\002 \001(\r\"0\n\032GetTableD" + - "escriptorsRequest\022\022\n\ntableNames\030\001 \003(\t\"@\n" + - "\033GetTableDescriptorsResponse\022!\n\013tableSch" + - "ema\030\001 \003(\0132\014.TableSchema\"\031\n\027GetClusterSta" + - "tusRequest\"A\n\030GetClusterStatusResponse\022%" + - "\n\rclusterStatus\030\001 \002(\0132\016.ClusterStatus2\206\002", - "\n\024MasterMonitorService\022S\n\024getSchemaAlter" + - "Status\022\034.GetSchemaAlterStatusRequest\032\035.G" + - "etSchemaAlterStatusResponse\022P\n\023getTableD" + - "escriptors\022\033.GetTableDescriptorsRequest\032" + - "\034.GetTableDescriptorsResponse\022G\n\020getClus" + - "terStatus\022\030.GetClusterStatusRequest\032\031.Ge" + - "tClusterStatusResponseBI\n*org.apache.had" + - "oop.hbase.protobuf.generatedB\023MasterMoni" + - "torProtosH\001\210\001\001\240\001\001" + "\n\023MasterMonitor.proto\032\014Master.proto\032\013hba" + + "se.proto\032\023ClusterStatus.proto\"0\n\033GetSche" + + "maAlterStatusRequest\022\021\n\ttableName\030\001 \002(\014\"" + + "P\n\034GetSchemaAlterStatusResponse\022\032\n\022yetTo" + + "UpdateRegions\030\001 \001(\r\022\024\n\014totalRegions\030\002 \001(" + + "\r\"0\n\032GetTableDescriptorsRequest\022\022\n\ntable" + + "Names\030\001 \003(\t\"@\n\033GetTableDescriptorsRespon" + + "se\022!\n\013tableSchema\030\001 \003(\0132\014.TableSchema\"\031\n" + + "\027GetClusterStatusRequest\"A\n\030GetClusterSt" + + "atusResponse\022%\n\rclusterStatus\030\001 \002(\0132\016.Cl", + "usterStatus2\314\002\n\024MasterMonitorService\022S\n\024" + + "getSchemaAlterStatus\022\034.GetSchemaAlterSta" + + "tusRequest\032\035.GetSchemaAlterStatusRespons" + + "e\022P\n\023getTableDescriptors\022\033.GetTableDescr" + + "iptorsRequest\032\034.GetTableDescriptorsRespo" + + "nse\022G\n\020getClusterStatus\022\030.GetClusterStat" + + "usRequest\032\031.GetClusterStatusResponse\022D\n\017" + + "isMasterRunning\022\027.IsMasterRunningRequest" + + "\032\030.IsMasterRunningResponseBI\n*org.apache" + + ".hadoop.hbase.protobuf.generatedB\023Master", + "MonitorProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -3081,6 +3148,7 @@ public final class MasterMonitorProtos { com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(), }, assigner); diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java index b94ef493b61..4cdaffcb675 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java @@ -535,9 +535,9 @@ public final class RPCProtos { org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo(); org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); - // optional string protocol = 2 [default = "org.apache.hadoop.hbase.client.ClientProtocol"]; - boolean hasProtocol(); - String getProtocol(); + // optional string serviceName = 2; + boolean hasServiceName(); + String getServiceName(); // optional string cellBlockCodecClass = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; boolean hasCellBlockCodecClass(); @@ -589,14 +589,14 @@ public final class RPCProtos { return userInfo_; } - // optional string protocol = 2 [default = "org.apache.hadoop.hbase.client.ClientProtocol"]; - public static final int PROTOCOL_FIELD_NUMBER = 2; - private java.lang.Object protocol_; - public boolean hasProtocol() { + // optional string serviceName = 2; + public static final int SERVICENAME_FIELD_NUMBER = 2; + private java.lang.Object serviceName_; + public boolean hasServiceName() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public String getProtocol() { - java.lang.Object ref = protocol_; + public String getServiceName() { + java.lang.Object ref = serviceName_; if (ref instanceof String) { return (String) ref; } else { @@ -604,17 +604,17 @@ public final class RPCProtos { (com.google.protobuf.ByteString) ref; String s = bs.toStringUtf8(); if (com.google.protobuf.Internal.isValidUtf8(bs)) { - protocol_ = s; + serviceName_ = s; } return s; } } - private com.google.protobuf.ByteString getProtocolBytes() { - java.lang.Object ref = protocol_; + private com.google.protobuf.ByteString getServiceNameBytes() { + java.lang.Object ref = serviceName_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); - protocol_ = b; + serviceName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; @@ -687,7 +687,7 @@ public final class RPCProtos { private void initFields() { userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); - protocol_ = "org.apache.hadoop.hbase.client.ClientProtocol"; + serviceName_ = ""; cellBlockCodecClass_ = "org.apache.hadoop.hbase.codec.KeyValueCodec"; cellBlockCompressorClass_ = ""; } @@ -713,7 +713,7 @@ public final class RPCProtos { output.writeMessage(1, userInfo_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getProtocolBytes()); + output.writeBytes(2, getServiceNameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, getCellBlockCodecClassBytes()); @@ -736,7 +736,7 @@ public final class RPCProtos { } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getProtocolBytes()); + .computeBytesSize(2, getServiceNameBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream @@ -774,10 +774,10 @@ public final class RPCProtos { result = result && getUserInfo() .equals(other.getUserInfo()); } - result = result && (hasProtocol() == other.hasProtocol()); - if (hasProtocol()) { - result = result && getProtocol() - .equals(other.getProtocol()); + result = result && (hasServiceName() == other.hasServiceName()); + if (hasServiceName()) { + result = result && getServiceName() + .equals(other.getServiceName()); } result = result && (hasCellBlockCodecClass() == other.hasCellBlockCodecClass()); if (hasCellBlockCodecClass()) { @@ -802,9 +802,9 @@ public final class RPCProtos { hash = (37 * hash) + USERINFO_FIELD_NUMBER; hash = (53 * hash) + getUserInfo().hashCode(); } - if (hasProtocol()) { - hash = (37 * hash) + PROTOCOL_FIELD_NUMBER; - hash = (53 * hash) + getProtocol().hashCode(); + if (hasServiceName()) { + hash = (37 * hash) + SERVICENAME_FIELD_NUMBER; + hash = (53 * hash) + getServiceName().hashCode(); } if (hasCellBlockCodecClass()) { hash = (37 * hash) + CELLBLOCKCODECCLASS_FIELD_NUMBER; @@ -937,7 +937,7 @@ public final class RPCProtos { userInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); - protocol_ = "org.apache.hadoop.hbase.client.ClientProtocol"; + serviceName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); cellBlockCodecClass_ = "org.apache.hadoop.hbase.codec.KeyValueCodec"; bitField0_ = (bitField0_ & ~0x00000004); @@ -992,7 +992,7 @@ public final class RPCProtos { if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.protocol_ = protocol_; + result.serviceName_ = serviceName_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } @@ -1020,8 +1020,8 @@ public final class RPCProtos { if (other.hasUserInfo()) { mergeUserInfo(other.getUserInfo()); } - if (other.hasProtocol()) { - setProtocol(other.getProtocol()); + if (other.hasServiceName()) { + setServiceName(other.getServiceName()); } if (other.hasCellBlockCodecClass()) { setCellBlockCodecClass(other.getCellBlockCodecClass()); @@ -1077,7 +1077,7 @@ public final class RPCProtos { } case 18: { bitField0_ |= 0x00000002; - protocol_ = input.readBytes(); + serviceName_ = input.readBytes(); break; } case 26: { @@ -1186,39 +1186,39 @@ public final class RPCProtos { return userInfoBuilder_; } - // optional string protocol = 2 [default = "org.apache.hadoop.hbase.client.ClientProtocol"]; - private java.lang.Object protocol_ = "org.apache.hadoop.hbase.client.ClientProtocol"; - public boolean hasProtocol() { + // optional string serviceName = 2; + private java.lang.Object serviceName_ = ""; + public boolean hasServiceName() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public String getProtocol() { - java.lang.Object ref = protocol_; + public String getServiceName() { + java.lang.Object ref = serviceName_; if (!(ref instanceof String)) { String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - protocol_ = s; + serviceName_ = s; return s; } else { return (String) ref; } } - public Builder setProtocol(String value) { + public Builder setServiceName(String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; - protocol_ = value; + serviceName_ = value; onChanged(); return this; } - public Builder clearProtocol() { + public Builder clearServiceName() { bitField0_ = (bitField0_ & ~0x00000002); - protocol_ = getDefaultInstance().getProtocol(); + serviceName_ = getDefaultInstance().getServiceName(); onChanged(); return this; } - void setProtocol(com.google.protobuf.ByteString value) { + void setServiceName(com.google.protobuf.ByteString value) { bitField0_ |= 0x00000002; - protocol_ = value; + serviceName_ = value; onChanged(); } @@ -3982,25 +3982,23 @@ public final class RPCProtos { java.lang.String[] descriptorData = { "\n\tRPC.proto\032\rTracing.proto\032\013hbase.proto\"" + ":\n\017UserInformation\022\025\n\reffectiveUser\030\001 \002(" + - "\t\022\020\n\010realUser\030\002 \001(\t\"\343\001\n\020ConnectionHeader" + - "\022\"\n\010userInfo\030\001 \001(\0132\020.UserInformation\022?\n\010" + - "protocol\030\002 \001(\t:-org.apache.hadoop.hbase." + - "client.ClientProtocol\022H\n\023cellBlockCodecC" + - "lass\030\003 \001(\t:+org.apache.hadoop.hbase.code" + - "c.KeyValueCodec\022 \n\030cellBlockCompressorCl" + - "ass\030\004 \001(\t\"\037\n\rCellBlockMeta\022\016\n\006length\030\001 \001" + - "(\r\"w\n\021ExceptionResponse\022\032\n\022exceptionClas", - "sName\030\001 \001(\t\022\022\n\nstackTrace\030\002 \001(\t\022\020\n\010hostn" + - "ame\030\003 \001(\t\022\014\n\004port\030\004 \001(\005\022\022\n\ndoNotRetry\030\005 " + - "\001(\010\"\216\001\n\rRequestHeader\022\016\n\006callId\030\001 \001(\r\022\034\n" + - "\ttraceInfo\030\002 \001(\0132\t.RPCTInfo\022\022\n\nmethodNam" + - "e\030\003 \001(\t\022\024\n\014requestParam\030\004 \001(\010\022%\n\rcellBlo" + - "ckMeta\030\005 \001(\0132\016.CellBlockMeta\"n\n\016Response" + - "Header\022\016\n\006callId\030\001 \001(\r\022%\n\texception\030\002 \001(" + - "\0132\022.ExceptionResponse\022%\n\rcellBlockMeta\030\003" + - " \001(\0132\016.CellBlockMetaB<\n*org.apache.hadoo" + - "p.hbase.protobuf.generatedB\tRPCProtosH\001\240", - "\001\001" + "\t\022\020\n\010realUser\030\002 \001(\t\"\267\001\n\020ConnectionHeader" + + "\022\"\n\010userInfo\030\001 \001(\0132\020.UserInformation\022\023\n\013" + + "serviceName\030\002 \001(\t\022H\n\023cellBlockCodecClass" + + "\030\003 \001(\t:+org.apache.hadoop.hbase.codec.Ke" + + "yValueCodec\022 \n\030cellBlockCompressorClass\030" + + "\004 \001(\t\"\037\n\rCellBlockMeta\022\016\n\006length\030\001 \001(\r\"w" + + "\n\021ExceptionResponse\022\032\n\022exceptionClassNam" + + "e\030\001 \001(\t\022\022\n\nstackTrace\030\002 \001(\t\022\020\n\010hostname\030", + "\003 \001(\t\022\014\n\004port\030\004 \001(\005\022\022\n\ndoNotRetry\030\005 \001(\010\"" + + "\216\001\n\rRequestHeader\022\016\n\006callId\030\001 \001(\r\022\034\n\ttra" + + "ceInfo\030\002 \001(\0132\t.RPCTInfo\022\022\n\nmethodName\030\003 " + + "\001(\t\022\024\n\014requestParam\030\004 \001(\010\022%\n\rcellBlockMe" + + "ta\030\005 \001(\0132\016.CellBlockMeta\"n\n\016ResponseHead" + + "er\022\016\n\006callId\030\001 \001(\r\022%\n\texception\030\002 \001(\0132\022." + + "ExceptionResponse\022%\n\rcellBlockMeta\030\003 \001(\013" + + "2\016.CellBlockMetaB<\n*org.apache.hadoop.hb" + + "ase.protobuf.generatedB\tRPCProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -4020,7 +4018,7 @@ public final class RPCProtos { internal_static_ConnectionHeader_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ConnectionHeader_descriptor, - new java.lang.String[] { "UserInfo", "Protocol", "CellBlockCodecClass", "CellBlockCompressorClass", }, + new java.lang.String[] { "UserInfo", "ServiceName", "CellBlockCodecClass", "CellBlockCompressorClass", }, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader.class, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader.Builder.class); internal_static_CellBlockMeta_descriptor = diff --git a/hbase-protocol/src/main/protobuf/MasterAdmin.proto b/hbase-protocol/src/main/protobuf/MasterAdmin.proto index 3e95532624b..5339e36bf2d 100644 --- a/hbase-protocol/src/main/protobuf/MasterAdmin.proto +++ b/hbase-protocol/src/main/protobuf/MasterAdmin.proto @@ -18,6 +18,8 @@ // This file contains protocol buffers that are used for MasterAdminProtocol. +import "Master.proto"; + option java_package = "org.apache.hadoop.hbase.protobuf.generated"; option java_outer_classname = "MasterAdminProtos"; option java_generic_services = true; @@ -354,7 +356,7 @@ service MasterAdminService { /** * List completed snapshots. - * @return a list of snapshot descriptors for completed snapshots + * Returns a list of snapshot descriptors for completed snapshots */ rpc getCompletedSnapshots(ListSnapshotRequest) returns(ListSnapshotResponse); @@ -379,4 +381,7 @@ service MasterAdminService { * Determine if the snapshot restore is done yet. */ rpc isRestoreSnapshotDone(IsRestoreSnapshotDoneRequest) returns(IsRestoreSnapshotDoneResponse); + + /** return true if master is available */ + rpc isMasterRunning(IsMasterRunningRequest) returns(IsMasterRunningResponse); } diff --git a/hbase-protocol/src/main/protobuf/MasterMonitor.proto b/hbase-protocol/src/main/protobuf/MasterMonitor.proto index 38738e21d54..a6054cf0230 100644 --- a/hbase-protocol/src/main/protobuf/MasterMonitor.proto +++ b/hbase-protocol/src/main/protobuf/MasterMonitor.proto @@ -17,6 +17,7 @@ */ // This file contains protocol buffers that are used for MasterMonitorProtocol. +import "Master.proto"; option java_package = "org.apache.hadoop.hbase.protobuf.generated"; option java_outer_classname = "MasterMonitorProtos"; @@ -63,4 +64,7 @@ service MasterMonitorService { /** Return cluster status. */ rpc getClusterStatus(GetClusterStatusRequest) returns(GetClusterStatusResponse); + + /** return true if master is available */ + rpc isMasterRunning(IsMasterRunningRequest) returns(IsMasterRunningResponse); } diff --git a/hbase-protocol/src/main/protobuf/RPC.proto b/hbase-protocol/src/main/protobuf/RPC.proto index e396370ff38..7c65e0fdd99 100644 --- a/hbase-protocol/src/main/protobuf/RPC.proto +++ b/hbase-protocol/src/main/protobuf/RPC.proto @@ -79,7 +79,7 @@ message UserInformation { // This is sent on connection setup after the connection preamble is sent. message ConnectionHeader { optional UserInformation userInfo = 1; - optional string protocol = 2 [default = "org.apache.hadoop.hbase.client.ClientProtocol"]; + optional string serviceName = 2; // Cell block codec we will use sending over optional cell blocks. Server throws exception // if cannot deal. optional string cellBlockCodecClass = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java deleted file mode 100644 index 0ec10034a15..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionServerStatusProtocol.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; -import org.apache.hadoop.hbase.security.TokenInfo; -import org.apache.hadoop.hbase.security.KerberosInfo; - -/** - * Protocol that a RegionServer uses to communicate its status to the Master. - */ -@KerberosInfo( - serverPrincipal = "hbase.master.kerberos.principal") -@TokenInfo("HBASE_AUTH_TOKEN") -@InterfaceAudience.Private -@InterfaceStability.Evolving -public interface RegionServerStatusProtocol -extends RegionServerStatusService.BlockingInterface, IpcProtocol {} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java index de2e25d95fd..698e3a7a63a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java @@ -53,10 +53,9 @@ public class ZNodeClearer { */ public static void writeMyEphemeralNodeOnDisk(String fileContent) { String fileName = ZNodeClearer.getMyEphemeralNodeFileName(); - if (fileName == null) { - LOG.warn("No filename given to save the znode used, it won't be saved " + - "(Environment variable HBASE_ZNODE_FILE is not set)."); + LOG.warn("Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared " + + "on crash by start scripts (Longer MTTR!)"); return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java index faec41c7bef..ece1c4f5db2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java @@ -196,8 +196,6 @@ public class HFileSystem extends FilterFileSystem { * @return true if the interceptor was added, false otherwise. */ static boolean addLocationsOrderInterceptor(Configuration conf, final ReorderBlocks lrb) { - LOG.debug("Starting addLocationsOrderInterceptor with class " + lrb.getClass()); - if (!conf.getBoolean("hbase.filesystem.reorder.blocks", true)) { // activated by default LOG.debug("addLocationsOrderInterceptor configured to false"); return false; @@ -212,8 +210,8 @@ public class HFileSystem extends FilterFileSystem { } if (!(fs instanceof DistributedFileSystem)) { - LOG.warn("The file system is not a DistributedFileSystem." + - "Not adding block location reordering"); + LOG.debug("The file system is not a DistributedFileSystem. " + + "Skipping on block location reordering"); return false; } @@ -243,7 +241,8 @@ public class HFileSystem extends FilterFileSystem { ClientProtocol cp1 = createReorderingProxy(namenode, lrb, conf); nf.set(dfsc, cp1); - LOG.info("Added intercepting call to namenode#getBlockLocations"); + LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" + + " using class " + lrb.getClass()); } catch (NoSuchFieldException e) { LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e); return false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index 1dbad25d3e2..fb6b1a686c3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -347,8 +347,7 @@ public class CacheConfig { * @param conf The current configuration. * @return The block cache or null. */ - private static synchronized BlockCache instantiateBlockCache( - Configuration conf) { + private static synchronized BlockCache instantiateBlockCache(Configuration conf) { if (globalBlockCache != null) return globalBlockCache; if (blockCacheDisabled) return null; @@ -366,14 +365,12 @@ public class CacheConfig { // Calculate the amount of heap to give the heap. MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); long lruCacheSize = (long) (mu.getMax() * cachePercentage); - int blockSize = conf.getInt("hbase.offheapcache.minblocksize", - HConstants.DEFAULT_BLOCKSIZE); + int blockSize = conf.getInt("hbase.offheapcache.minblocksize", HConstants.DEFAULT_BLOCKSIZE); long offHeapCacheSize = (long) (conf.getFloat("hbase.offheapcache.percentage", (float) 0) * DirectMemoryUtils.getDirectMemorySize()); if (offHeapCacheSize <= 0) { - String bucketCacheIOEngineName = conf - .get(BUCKET_CACHE_IOENGINE_KEY, null); + String bucketCacheIOEngineName = conf.get(BUCKET_CACHE_IOENGINE_KEY, null); float bucketCachePercentage = conf.getFloat(BUCKET_CACHE_SIZE_KEY, 0F); // A percentage of max heap size or a absolute value with unit megabytes long bucketCacheSize = (long) (bucketCachePercentage < 1 ? mu.getMax() @@ -407,10 +404,9 @@ public class CacheConfig { throw new RuntimeException(ioex); } } - LOG.info("Allocating LruBlockCache with maximum size " - + StringUtils.humanReadableInt(lruCacheSize)); - LruBlockCache lruCache = new LruBlockCache(lruCacheSize, - StoreFile.DEFAULT_BLOCKSIZE_SMALL); + LOG.info("Allocating LruBlockCache with maximum size " + + StringUtils.humanReadableInt(lruCacheSize)); + LruBlockCache lruCache = new LruBlockCache(lruCacheSize, StoreFile.DEFAULT_BLOCKSIZE_SMALL); lruCache.setVictimCache(bucketCache); if (bucketCache != null && combinedWithLru) { globalBlockCache = new CombinedBlockCache(lruCache, bucketCache); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java new file mode 100644 index 00000000000..f91ecab9fc5 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/EmptyServiceNameException.java @@ -0,0 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +@SuppressWarnings("serial") +public class EmptyServiceNameException extends FatalConnectionException {} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java deleted file mode 100644 index 6cbda3b9c23..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java +++ /dev/null @@ -1,2270 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.lang.reflect.Method; -import java.net.BindException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.ServerSocket; -import java.net.Socket; -import java.net.SocketException; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import java.nio.channels.CancelledKeyException; -import java.nio.channels.Channels; -import java.nio.channels.ClosedChannelException; -import java.nio.channels.ReadableByteChannel; -import java.nio.channels.SelectionKey; -import java.nio.channels.Selector; -import java.nio.channels.ServerSocketChannel; -import java.nio.channels.SocketChannel; -import java.nio.channels.WritableByteChannel; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.atomic.AtomicInteger; - -import javax.security.sasl.Sasl; -import javax.security.sasl.SaslException; -import javax.security.sasl.SaslServer; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.IpcProtocol; -import org.apache.hadoop.hbase.codec.Codec; -import org.apache.hadoop.hbase.exceptions.CallerDisconnectedException; -import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException; -import org.apache.hadoop.hbase.exceptions.RegionMovedException; -import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException; -import org.apache.hadoop.hbase.io.ByteBufferOutputStream; -import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; -import org.apache.hadoop.hbase.monitoring.TaskMonitor; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.CellBlockMeta; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation; -import org.apache.hadoop.hbase.security.HBaseSaslRpcServer; -import org.apache.hadoop.hbase.security.AuthMethod; -import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler; -import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler; -import org.apache.hadoop.hbase.security.SaslStatus; -import org.apache.hadoop.hbase.security.SaslUtil; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.io.BytesWritable; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableUtils; -import org.apache.hadoop.io.compress.CompressionCodec; -import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; -import org.apache.hadoop.security.authorize.AuthorizationException; -import org.apache.hadoop.security.authorize.ProxyUsers; -import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; -import org.apache.hadoop.security.token.SecretManager; -import org.apache.hadoop.security.token.SecretManager.InvalidToken; -import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.util.StringUtils; -import org.cliffc.high_scale_lib.Counter; -import org.cloudera.htrace.Sampler; -import org.cloudera.htrace.Span; -import org.cloudera.htrace.Trace; -import org.cloudera.htrace.TraceInfo; -import org.cloudera.htrace.impl.NullSpan; - -import com.google.common.base.Function; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.google.protobuf.CodedInputStream; -import com.google.protobuf.Message; -import com.google.protobuf.Message.Builder; -import com.google.protobuf.TextFormat; -// Uses Writables doing sasl - -/** A client for an IPC service. IPC calls take a single Protobuf message as a - * parameter, and return a single Protobuf message as their value. A service runs on - * a port and is defined by a parameter class and a value class. - * - * - *

Copied local so can fix HBASE-900. - * - * @see HBaseClient - */ -@InterfaceAudience.Private -public abstract class HBaseServer implements RpcServer { - public static final Log LOG = LogFactory.getLog("org.apache.hadoop.ipc.HBaseServer"); - private final boolean authorize; - protected boolean isSecurityEnabled; - - public static final byte CURRENT_VERSION = 0; - - /** - * How many calls/handler are allowed in the queue. - */ - private static final int DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER = 10; - - /** - * The maximum size that we can hold in the IPC queue - */ - private static final int DEFAULT_MAX_CALLQUEUE_SIZE = - 1024 * 1024 * 1024; - - static final int BUFFER_INITIAL_SIZE = 1024; - - private static final String WARN_DELAYED_CALLS = - "hbase.ipc.warn.delayedrpc.number"; - - private static final int DEFAULT_WARN_DELAYED_CALLS = 1000; - - private final int warnDelayedCalls; - - private AtomicInteger delayedCalls; - private final IPCUtil ipcUtil; - - private static final String AUTH_FAILED_FOR = "Auth failed for "; - private static final String AUTH_SUCCESSFUL_FOR = "Auth successful for "; - private static final Log AUDITLOG = - LogFactory.getLog("SecurityLogger."+Server.class.getName()); - protected SecretManager secretManager; - protected ServiceAuthorizationManager authManager; - - protected static final ThreadLocal SERVER = - new ThreadLocal(); - private volatile boolean started = false; - private static final ReflectionCache methodCache = new ReflectionCache(); - - private static final Map> PROTOCOL_CACHE = - new ConcurrentHashMap>(); - - @SuppressWarnings("unchecked") - static Class getProtocolClass( - String protocolName, Configuration conf) - throws ClassNotFoundException { - Class protocol = - PROTOCOL_CACHE.get(protocolName); - - if (protocol == null) { - protocol = (Class) - conf.getClassByName(protocolName); - PROTOCOL_CACHE.put(protocolName, protocol); - } - return protocol; - } - - /** Returns the server instance called under or null. May be called under - * {@code #call(Class, RpcRequestBody, long, MonitoredRPCHandler)} implementations, - * and under protobuf methods of parameters and return values. - * Permits applications to access the server context. - * @return HBaseServer - */ - public static RpcServer get() { - return SERVER.get(); - } - - /** This is set to Call object before Handler invokes an RPC and reset - * after the call returns. - */ - protected static final ThreadLocal CurCall = new ThreadLocal(); - - /** Returns the remote side ip address when invoked inside an RPC - * Returns null incase of an error. - * @return InetAddress - */ - public static InetAddress getRemoteIp() { - Call call = CurCall.get(); - if (call != null) { - return call.connection.socket.getInetAddress(); - } - return null; - } - /** Returns remote address as a string when invoked inside an RPC. - * Returns null in case of an error. - * @return String - */ - public static String getRemoteAddress() { - Call call = CurCall.get(); - if (call != null) { - return call.connection.getHostAddress(); - } - return null; - } - - protected String bindAddress; - protected int port; // port we listen on - private int handlerCount; // number of handler threads - private int priorityHandlerCount; - private int readThreads; // number of read threads - protected int maxIdleTime; // the maximum idle time after - // which a client may be - // disconnected - protected int thresholdIdleConnections; // the number of idle - // connections after which we - // will start cleaning up idle - // connections - int maxConnectionsToNuke; // the max number of - // connections to nuke - // during a cleanup - - protected MetricsHBaseServer metrics; - - protected Configuration conf; - - private int maxQueueLength; - private int maxQueueSize; - protected int socketSendBufferSize; - protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm - protected final boolean tcpKeepAlive; // if T then use keepalives - protected final long purgeTimeout; // in milliseconds - - volatile protected boolean running = true; // true while server runs - protected BlockingQueue callQueue; // queued calls - protected final Counter callQueueSize = new Counter(); - protected BlockingQueue priorityCallQueue; - - protected int highPriorityLevel; // what level a high priority call is at - - protected final List connectionList = - Collections.synchronizedList(new LinkedList()); - //maintain a list - //of client connections - private Listener listener = null; - protected Responder responder = null; - protected int numConnections = 0; - private Handler[] handlers = null; - private Handler[] priorityHandlers = null; - /** replication related queue; */ - protected BlockingQueue replicationQueue; - private int numOfReplicationHandlers = 0; - private Handler[] replicationHandlers = null; - - protected HBaseRPCErrorHandler errorHandler = null; - - /** - * A convenience method to bind to a given address and report - * better exceptions if the address is not a valid host. - * @param socket the socket to bind - * @param address the address to bind to - * @param backlog the number of connections allowed in the queue - * @throws BindException if the address can't be bound - * @throws UnknownHostException if the address isn't a valid host name - * @throws IOException other random errors from bind - */ - public static void bind(ServerSocket socket, InetSocketAddress address, - int backlog) throws IOException { - try { - socket.bind(address, backlog); - } catch (BindException e) { - BindException bindException = - new BindException("Problem binding to " + address + " : " + - e.getMessage()); - bindException.initCause(e); - throw bindException; - } catch (SocketException e) { - // If they try to bind to a different host's address, give a better - // error message. - if ("Unresolved address".equals(e.getMessage())) { - throw new UnknownHostException("Invalid hostname for server: " + - address.getHostName()); - } - throw e; - } - } - - /** A call queued for handling. */ - protected class Call implements RpcCallContext { - protected int id; // the client's call id - protected Method method; - protected Message param; // the parameter passed - // Optional cell data passed outside of protobufs. - protected CellScanner cellScanner; - protected Connection connection; // connection to client - protected long timestamp; // the time received when response is null - // the time served when response is not null - protected ByteBuffer response; // the response for this call - protected boolean delayResponse; - protected Responder responder; - protected boolean delayReturnValue; // if the return value should be - // set at call completion - protected long size; // size of current call - protected boolean isError; - protected TraceInfo tinfo; - - public Call(int id, Method method, Message param, CellScanner cellScanner, - Connection connection, Responder responder, long size, TraceInfo tinfo) { - this.id = id; - this.method = method; - this.param = param; - this.cellScanner = cellScanner; - this.connection = connection; - this.timestamp = System.currentTimeMillis(); - this.response = null; - this.delayResponse = false; - this.responder = responder; - this.isError = false; - this.size = size; - this.tinfo = tinfo; - } - - @Override - public String toString() { - return "callId: " + this.id + " methodName: " + - ((this.method != null)? this.method.getName(): null) + " param: " + - (this.param != null? TextFormat.shortDebugString(this.param): "") + - " from " + connection.toString(); - } - - protected synchronized void setSaslTokenResponse(ByteBuffer response) { - this.response = response; - } - - protected synchronized void setResponse(Object m, final CellScanner cells, - Throwable t, String errorMsg) { - if (this.isError) return; - if (t != null) this.isError = true; - ByteBufferOutputStream bbos = null; - try { - ResponseHeader.Builder headerBuilder = ResponseHeader.newBuilder(); - // Presume it a pb Message. Could be null. - Message result = (Message)m; - // Call id. - headerBuilder.setCallId(this.id); - if (t != null) { - ExceptionResponse.Builder exceptionBuilder = ExceptionResponse.newBuilder(); - exceptionBuilder.setExceptionClassName(t.getClass().getName()); - exceptionBuilder.setStackTrace(errorMsg); - exceptionBuilder.setDoNotRetry(t instanceof DoNotRetryIOException); - if (t instanceof RegionMovedException) { - // Special casing for this exception. This is only one carrying a payload. - // Do this instead of build a generic system for allowing exceptions carry - // any kind of payload. - RegionMovedException rme = (RegionMovedException)t; - exceptionBuilder.setHostname(rme.getHostname()); - exceptionBuilder.setPort(rme.getPort()); - } - // Set the exception as the result of the method invocation. - headerBuilder.setException(exceptionBuilder.build()); - } - ByteBuffer cellBlock = - ipcUtil.buildCellBlock(this.connection.codec, this.connection.compressionCodec, cells); - if (cellBlock != null) { - CellBlockMeta.Builder cellBlockBuilder = CellBlockMeta.newBuilder(); - // Presumes the cellBlock bytebuffer has been flipped so limit has total size in it. - cellBlockBuilder.setLength(cellBlock.limit()); - headerBuilder.setCellBlockMeta(cellBlockBuilder.build()); - } - Message header = headerBuilder.build(); - bbos = IPCUtil.write(header, result, cellBlock); - if (connection.useWrap) { - wrapWithSasl(bbos); - } - if (LOG.isDebugEnabled()) { - LOG.debug("Header " + TextFormat.shortDebugString(header) + - ", result " + (result != null? TextFormat.shortDebugString(result): "null")); - } - } catch (IOException e) { - LOG.warn("Exception while creating response " + e); - } - ByteBuffer bb = null; - if (bbos != null) { - // TODO: If SASL, maybe buffer already been flipped and written? - bb = bbos.getByteBuffer(); - bb.position(0); - } - this.response = bb; - } - - private void wrapWithSasl(ByteBufferOutputStream response) - throws IOException { - if (connection.useSasl) { - // getByteBuffer calls flip() - ByteBuffer buf = response.getByteBuffer(); - byte[] token; - // synchronization may be needed since there can be multiple Handler - // threads using saslServer to wrap responses. - synchronized (connection.saslServer) { - token = connection.saslServer.wrap(buf.array(), - buf.arrayOffset(), buf.remaining()); - } - if (LOG.isDebugEnabled()) - LOG.debug("Adding saslServer wrapped token of size " + token.length - + " as call response."); - buf.clear(); - DataOutputStream saslOut = new DataOutputStream(response); - saslOut.writeInt(token.length); - saslOut.write(token, 0, token.length); - } - } - - @Override - public synchronized void endDelay(Object result) throws IOException { - assert this.delayResponse; - assert this.delayReturnValue || result == null; - this.delayResponse = false; - delayedCalls.decrementAndGet(); - if (this.delayReturnValue) { - this.setResponse(result, null, null, null); - } - this.responder.doRespond(this); - } - - @Override - public synchronized void endDelay() throws IOException { - this.endDelay(null); - } - - @Override - public synchronized void startDelay(boolean delayReturnValue) { - assert !this.delayResponse; - this.delayResponse = true; - this.delayReturnValue = delayReturnValue; - int numDelayed = delayedCalls.incrementAndGet(); - if (numDelayed > warnDelayedCalls) { - LOG.warn("Too many delayed calls: limit " + warnDelayedCalls + - " current " + numDelayed); - } - } - - @Override - public synchronized void endDelayThrowing(Throwable t) throws IOException { - this.setResponse(null, null, t, StringUtils.stringifyException(t)); - this.delayResponse = false; - this.sendResponseIfReady(); - } - - @Override - public synchronized boolean isDelayed() { - return this.delayResponse; - } - - @Override - public synchronized boolean isReturnValueDelayed() { - return this.delayReturnValue; - } - - @Override - public void throwExceptionIfCallerDisconnected() throws CallerDisconnectedException { - if (!connection.channel.isOpen()) { - long afterTime = System.currentTimeMillis() - timestamp; - throw new CallerDisconnectedException( - "Aborting call " + this + " after " + afterTime + " ms, since " + - "caller disconnected"); - } - } - - public long getSize() { - return this.size; - } - - /** - * If we have a response, and delay is not set, then respond - * immediately. Otherwise, do not respond to client. This is - * called the by the RPC code in the context of the Handler thread. - */ - public synchronized void sendResponseIfReady() throws IOException { - if (!this.delayResponse) { - this.responder.doRespond(this); - } - } - } - - /** Listens on the socket. Creates jobs for the handler threads*/ - private class Listener extends Thread { - - private ServerSocketChannel acceptChannel = null; //the accept channel - private Selector selector = null; //the selector that we use for the server - private Reader[] readers = null; - private int currentReader = 0; - private InetSocketAddress address; //the address we bind at - private Random rand = new Random(); - private long lastCleanupRunTime = 0; //the last time when a cleanup connec- - //-tion (for idle connections) ran - private long cleanupInterval = 10000; //the minimum interval between - //two cleanup runs - private int backlogLength = conf.getInt("ipc.server.listen.queue.size", 128); - - private ExecutorService readPool; - - public Listener() throws IOException { - address = new InetSocketAddress(bindAddress, port); - // Create a new server socket and set to non blocking mode - acceptChannel = ServerSocketChannel.open(); - acceptChannel.configureBlocking(false); - - // Bind the server socket to the local host and port - bind(acceptChannel.socket(), address, backlogLength); - port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port - // create a selector; - selector= Selector.open(); - - readers = new Reader[readThreads]; - readPool = Executors.newFixedThreadPool(readThreads, - new ThreadFactoryBuilder().setNameFormat( - "IPC Reader %d on port " + port).setDaemon(true).build()); - for (int i = 0; i < readThreads; ++i) { - Reader reader = new Reader(); - readers[i] = reader; - readPool.execute(reader); - } - LOG.info(getName() + ": started " + readThreads + " reader(s) in Listener."); - - // Register accepts on the server socket with the selector. - acceptChannel.register(selector, SelectionKey.OP_ACCEPT); - this.setName("IPC Server listener on " + port); - this.setDaemon(true); - } - - - private class Reader implements Runnable { - private volatile boolean adding = false; - private final Selector readSelector; - - Reader() throws IOException { - this.readSelector = Selector.open(); - } - public void run() { - try { - doRunLoop(); - } finally { - try { - readSelector.close(); - } catch (IOException ioe) { - LOG.error(getName() + ": error closing read selector in " + getName(), ioe); - } - } - } - - private synchronized void doRunLoop() { - while (running) { - SelectionKey key = null; - try { - readSelector.select(); - while (adding) { - this.wait(1000); - } - - Iterator iter = readSelector.selectedKeys().iterator(); - while (iter.hasNext()) { - key = iter.next(); - iter.remove(); - if (key.isValid()) { - if (key.isReadable()) { - doRead(key); - } - } - key = null; - } - } catch (InterruptedException e) { - if (running) { // unexpected -- log it - LOG.info(getName() + ": unexpectedly interrupted: " + - StringUtils.stringifyException(e)); - } - } catch (IOException ex) { - LOG.error(getName() + ": error in Reader", ex); - } - } - } - - /** - * This gets reader into the state that waits for the new channel - * to be registered with readSelector. If it was waiting in select() - * the thread will be woken up, otherwise whenever select() is called - * it will return even if there is nothing to read and wait - * in while(adding) for finishAdd call - */ - public void startAdd() { - adding = true; - readSelector.wakeup(); - } - - public synchronized SelectionKey registerChannel(SocketChannel channel) - throws IOException { - return channel.register(readSelector, SelectionKey.OP_READ); - } - - public synchronized void finishAdd() { - adding = false; - this.notify(); - } - } - - /** cleanup connections from connectionList. Choose a random range - * to scan and also have a limit on the number of the connections - * that will be cleanedup per run. The criteria for cleanup is the time - * for which the connection was idle. If 'force' is true then all - * connections will be looked at for the cleanup. - * @param force all connections will be looked at for cleanup - */ - private void cleanupConnections(boolean force) { - if (force || numConnections > thresholdIdleConnections) { - long currentTime = System.currentTimeMillis(); - if (!force && (currentTime - lastCleanupRunTime) < cleanupInterval) { - return; - } - int start = 0; - int end = numConnections - 1; - if (!force) { - start = rand.nextInt() % numConnections; - end = rand.nextInt() % numConnections; - int temp; - if (end < start) { - temp = start; - start = end; - end = temp; - } - } - int i = start; - int numNuked = 0; - while (i <= end) { - Connection c; - synchronized (connectionList) { - try { - c = connectionList.get(i); - } catch (Exception e) {return;} - } - if (c.timedOut(currentTime)) { - if (LOG.isDebugEnabled()) - LOG.debug(getName() + ": disconnecting client " + c.getHostAddress()); - closeConnection(c); - numNuked++; - end--; - //noinspection UnusedAssignment - c = null; - if (!force && numNuked == maxConnectionsToNuke) break; - } - else i++; - } - lastCleanupRunTime = System.currentTimeMillis(); - } - } - - @Override - public void run() { - LOG.info(getName() + ": starting"); - SERVER.set(HBaseServer.this); - - while (running) { - SelectionKey key = null; - try { - selector.select(); // FindBugs IS2_INCONSISTENT_SYNC - Iterator iter = selector.selectedKeys().iterator(); - while (iter.hasNext()) { - key = iter.next(); - iter.remove(); - try { - if (key.isValid()) { - if (key.isAcceptable()) - doAccept(key); - } - } catch (IOException ignored) { - } - key = null; - } - } catch (OutOfMemoryError e) { - if (errorHandler != null) { - if (errorHandler.checkOOME(e)) { - LOG.info(getName() + ": exiting on OutOfMemoryError"); - closeCurrentConnection(key, e); - cleanupConnections(true); - return; - } - } else { - // we can run out of memory if we have too many threads - // log the event and sleep for a minute and give - // some thread(s) a chance to finish - LOG.warn(getName() + ": OutOfMemoryError in server select", e); - closeCurrentConnection(key, e); - cleanupConnections(true); - try { Thread.sleep(60000); } catch (Exception ignored) {} - } - } catch (Exception e) { - closeCurrentConnection(key, e); - } - cleanupConnections(false); - } - LOG.info(getName() + ": stopping"); - - synchronized (this) { - try { - acceptChannel.close(); - selector.close(); - } catch (IOException ignored) { } - - selector= null; - acceptChannel= null; - - // clean up all connections - while (!connectionList.isEmpty()) { - closeConnection(connectionList.remove(0)); - } - } - } - - private void closeCurrentConnection(SelectionKey key, Throwable e) { - if (key != null) { - Connection c = (Connection)key.attachment(); - if (c != null) { - if (LOG.isDebugEnabled()) { - LOG.debug(getName() + ": disconnecting client " + c.getHostAddress() + - (e != null ? " on error " + e.getMessage() : "")); - } - closeConnection(c); - key.attach(null); - } - } - } - - InetSocketAddress getAddress() { - return (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress(); - } - - void doAccept(SelectionKey key) throws IOException, OutOfMemoryError { - Connection c; - ServerSocketChannel server = (ServerSocketChannel) key.channel(); - - SocketChannel channel; - while ((channel = server.accept()) != null) { - channel.configureBlocking(false); - channel.socket().setTcpNoDelay(tcpNoDelay); - channel.socket().setKeepAlive(tcpKeepAlive); - - Reader reader = getReader(); - try { - reader.startAdd(); - SelectionKey readKey = reader.registerChannel(channel); - c = getConnection(channel, System.currentTimeMillis()); - readKey.attach(c); - synchronized (connectionList) { - connectionList.add(numConnections, c); - numConnections++; - } - if (LOG.isDebugEnabled()) - LOG.debug(getName() + ": connection from " + c.toString() + - "; # active connections: " + numConnections + - "; # queued calls: " + callQueue.size()); - } finally { - reader.finishAdd(); - } - } - } - - void doRead(SelectionKey key) throws InterruptedException { - int count = 0; - Connection c = (Connection)key.attachment(); - if (c == null) { - return; - } - c.setLastContact(System.currentTimeMillis()); - try { - count = c.readAndProcess(); - } catch (InterruptedException ieo) { - throw ieo; - } catch (Exception e) { - LOG.warn(getName() + ": count of bytes read: " + count, e); - count = -1; //so that the (count < 0) block is executed - } - if (count < 0) { - if (LOG.isDebugEnabled()) { - LOG.debug(getName() + ": disconnecting client " + c.getHostAddress() + - ", because count=" + count + - ". Number of active connections: " + numConnections); - } - closeConnection(c); - // c = null; - } else { - c.setLastContact(System.currentTimeMillis()); - } - } - - synchronized void doStop() { - if (selector != null) { - selector.wakeup(); - Thread.yield(); - } - if (acceptChannel != null) { - try { - acceptChannel.socket().close(); - } catch (IOException e) { - LOG.info(getName() + ": exception in closing listener socket. " + e); - } - } - readPool.shutdownNow(); - } - - // The method that will return the next reader to work with - // Simplistic implementation of round robin for now - Reader getReader() { - currentReader = (currentReader + 1) % readers.length; - return readers[currentReader]; - } - } - - // Sends responses of RPC back to clients. - protected class Responder extends Thread { - private final Selector writeSelector; - private int pending; // connections waiting to register - - Responder() throws IOException { - this.setName("IPC Server Responder"); - this.setDaemon(true); - writeSelector = Selector.open(); // create a selector - pending = 0; - } - - @Override - public void run() { - LOG.info(getName() + ": starting"); - SERVER.set(HBaseServer.this); - try { - doRunLoop(); - } finally { - LOG.info(getName() + ": stopping"); - try { - writeSelector.close(); - } catch (IOException ioe) { - LOG.error(getName() + ": couldn't close write selector", ioe); - } - } - } - - private void doRunLoop() { - long lastPurgeTime = 0; // last check for old calls. - - while (running) { - try { - waitPending(); // If a channel is being registered, wait. - writeSelector.select(purgeTimeout); - Iterator iter = writeSelector.selectedKeys().iterator(); - while (iter.hasNext()) { - SelectionKey key = iter.next(); - iter.remove(); - try { - if (key.isValid() && key.isWritable()) { - doAsyncWrite(key); - } - } catch (IOException e) { - LOG.info(getName() + ": asyncWrite", e); - } - } - long now = System.currentTimeMillis(); - if (now < lastPurgeTime + purgeTimeout) { - continue; - } - lastPurgeTime = now; - // - // If there were some calls that have not been sent out for a - // long time, discard them. - // - if (LOG.isDebugEnabled()) LOG.debug(getName() + ": checking for old call responses."); - ArrayList calls; - - // get the list of channels from list of keys. - synchronized (writeSelector.keys()) { - calls = new ArrayList(writeSelector.keys().size()); - iter = writeSelector.keys().iterator(); - while (iter.hasNext()) { - SelectionKey key = iter.next(); - Call call = (Call)key.attachment(); - if (call != null && key.channel() == call.connection.channel) { - calls.add(call); - } - } - } - - for(Call call : calls) { - try { - doPurge(call, now); - } catch (IOException e) { - LOG.warn(getName() + ": error in purging old calls " + e); - } - } - } catch (OutOfMemoryError e) { - if (errorHandler != null) { - if (errorHandler.checkOOME(e)) { - LOG.info(getName() + ": exiting on OutOfMemoryError"); - return; - } - } else { - // - // we can run out of memory if we have too many threads - // log the event and sleep for a minute and give - // some thread(s) a chance to finish - // - LOG.warn(getName() + ": OutOfMemoryError in server select", e); - try { Thread.sleep(60000); } catch (Exception ignored) {} - } - } catch (Exception e) { - LOG.warn(getName() + ": exception in Responder " + - StringUtils.stringifyException(e)); - } - } - LOG.info(getName() + ": stopped"); - } - - private void doAsyncWrite(SelectionKey key) throws IOException { - Call call = (Call)key.attachment(); - if (call == null) { - return; - } - if (key.channel() != call.connection.channel) { - throw new IOException("doAsyncWrite: bad channel"); - } - - synchronized(call.connection.responseQueue) { - if (processResponse(call.connection.responseQueue, false)) { - try { - key.interestOps(0); - } catch (CancelledKeyException e) { - /* The Listener/reader might have closed the socket. - * We don't explicitly cancel the key, so not sure if this will - * ever fire. - * This warning could be removed. - */ - LOG.warn("Exception while changing ops : " + e); - } - } - } - } - - // - // Remove calls that have been pending in the responseQueue - // for a long time. - // - private void doPurge(Call call, long now) throws IOException { - synchronized (call.connection.responseQueue) { - Iterator iter = call.connection.responseQueue.listIterator(0); - while (iter.hasNext()) { - Call nextCall = iter.next(); - if (now > nextCall.timestamp + purgeTimeout) { - closeConnection(nextCall.connection); - break; - } - } - } - } - - // Processes one response. Returns true if there are no more pending - // data for this channel. - // - private boolean processResponse(final LinkedList responseQueue, boolean inHandler) - throws IOException { - boolean error = true; - boolean done = false; // there is more data for this channel. - int numElements; - Call call = null; - try { - //noinspection SynchronizationOnLocalVariableOrMethodParameter - synchronized (responseQueue) { - // - // If there are no items for this channel, then we are done - // - numElements = responseQueue.size(); - if (numElements == 0) { - error = false; - return true; // no more data for this channel. - } - // - // Extract the first call - // - call = responseQueue.removeFirst(); - SocketChannel channel = call.connection.channel; - // - // Send as much data as we can in the non-blocking fashion - // - int numBytes = channelWrite(channel, call.response); - if (numBytes < 0) { - return true; - } - if (!call.response.hasRemaining()) { - call.connection.decRpcCount(); - //noinspection RedundantIfStatement - if (numElements == 1) { // last call fully processes. - done = true; // no more data for this channel. - } else { - done = false; // more calls pending to be sent. - } - if (LOG.isDebugEnabled()) { - LOG.debug(getName() + ": callId: " + call.id + " sent, wrote " + numBytes + - " bytes."); - } - } else { - // - // If we were unable to write the entire response out, then - // insert in Selector queue. - // - call.connection.responseQueue.addFirst(call); - - if (inHandler) { - // set the serve time when the response has to be sent later - call.timestamp = System.currentTimeMillis(); - if (enqueueInSelector(call)) - done = true; - } - if (LOG.isDebugEnabled()) { - LOG.debug(getName() + call.toString() + " partially sent, wrote " + - numBytes + " bytes."); - } - } - error = false; // everything went off well - } - } finally { - if (error && call != null) { - LOG.warn(getName() + call.toString() + ": output error"); - done = true; // error. no more data for this channel. - closeConnection(call.connection); - } - } - return done; - } - - // - // Enqueue for background thread to send responses out later. - // - private boolean enqueueInSelector(Call call) throws IOException { - boolean done = false; - incPending(); - try { - // Wake up the thread blocked on select, only then can the call - // to channel.register() complete. - SocketChannel channel = call.connection.channel; - writeSelector.wakeup(); - channel.register(writeSelector, SelectionKey.OP_WRITE, call); - } catch (ClosedChannelException e) { - //It's OK. Channel might be closed else where. - done = true; - } finally { - decPending(); - } - return done; - } - - // - // Enqueue a response from the application. - // - void doRespond(Call call) throws IOException { - // set the serve time when the response has to be sent later - call.timestamp = System.currentTimeMillis(); - - boolean doRegister = false; - synchronized (call.connection.responseQueue) { - call.connection.responseQueue.addLast(call); - if (call.connection.responseQueue.size() == 1) { - doRegister = !processResponse(call.connection.responseQueue, false); - } - } - if (doRegister) { - enqueueInSelector(call); - } - } - - private synchronized void incPending() { // call waiting to be enqueued. - pending++; - } - - private synchronized void decPending() { // call done enqueueing. - pending--; - notify(); - } - - private synchronized void waitPending() throws InterruptedException { - while (pending > 0) { - wait(); - } - } - } - - @SuppressWarnings("serial") - public static class CallQueueTooBigException extends IOException { - CallQueueTooBigException() { - super(); - } - } - - private Function, Integer> qosFunction = null; - - /** - * Gets the QOS level for this call. If it is higher than the highPriorityLevel and there - * are priorityHandlers available it will be processed in it's own thread set. - * - * @param newFunc - */ - @Override - public void setQosFunction(Function, Integer> newFunc) { - qosFunction = newFunc; - } - - protected int getQosLevel(Pair headerAndParam) { - if (qosFunction == null) return 0; - Integer res = qosFunction.apply(headerAndParam); - return res == null? 0: res; - } - - /** Reads calls from a connection and queues them for handling. */ - public class Connection { - // If initial preamble with version and magic has been read or not. - private boolean connectionPreambleRead = false; - // If the connection header has been read or not. - private boolean connectionHeaderRead = false; - protected SocketChannel channel; - private ByteBuffer data; - private ByteBuffer dataLengthBuffer; - protected final LinkedList responseQueue; - private volatile int rpcCount = 0; // number of outstanding rpcs - private long lastContact; - private InetAddress addr; - protected Socket socket; - // Cache the remote host & port info so that even if the socket is - // disconnected, we can say where it used to connect to. - protected String hostAddress; - protected int remotePort; - ConnectionHeader connectionHeader; - /** - * Codec the client asked use. - */ - private Codec codec; - /** - * Compression codec the client asked us use. - */ - private CompressionCodec compressionCodec; - Class protocol; - protected UserGroupInformation user = null; - private AuthMethod authMethod; - private boolean saslContextEstablished; - private boolean skipInitialSaslHandshake; - private ByteBuffer unwrappedData; - // When is this set? FindBugs wants to know! Says NP - private ByteBuffer unwrappedDataLengthBuffer; - boolean useSasl; - SaslServer saslServer; - private boolean useWrap = false; - // Fake 'call' for failed authorization response - private static final int AUTHROIZATION_FAILED_CALLID = -1; - private final Call authFailedCall = - new Call(AUTHROIZATION_FAILED_CALLID, null, null, null, this, null, 0, null); - private ByteArrayOutputStream authFailedResponse = - new ByteArrayOutputStream(); - // Fake 'call' for SASL context setup - private static final int SASL_CALLID = -33; - private final Call saslCall = - new Call(SASL_CALLID, null, null, null, this, null, 0, null); - - public UserGroupInformation attemptingUser = null; // user name before auth - - public Connection(SocketChannel channel, long lastContact) { - this.channel = channel; - this.lastContact = lastContact; - this.data = null; - this.dataLengthBuffer = ByteBuffer.allocate(4); - this.socket = channel.socket(); - this.addr = socket.getInetAddress(); - if (addr == null) { - this.hostAddress = "*Unknown*"; - } else { - this.hostAddress = addr.getHostAddress(); - } - this.remotePort = socket.getPort(); - this.responseQueue = new LinkedList(); - if (socketSendBufferSize != 0) { - try { - socket.setSendBufferSize(socketSendBufferSize); - } catch (IOException e) { - LOG.warn("Connection: unable to set socket send buffer size to " + - socketSendBufferSize); - } - } - } - - @Override - public String toString() { - return getHostAddress() + ":" + remotePort; - } - - public String getHostAddress() { - return hostAddress; - } - - public InetAddress getHostInetAddress() { - return addr; - } - - public int getRemotePort() { - return remotePort; - } - - public void setLastContact(long lastContact) { - this.lastContact = lastContact; - } - - public long getLastContact() { - return lastContact; - } - - /* Return true if the connection has no outstanding rpc */ - private boolean isIdle() { - return rpcCount == 0; - } - - /* Decrement the outstanding RPC count */ - protected void decRpcCount() { - rpcCount--; - } - - /* Increment the outstanding RPC count */ - protected void incRpcCount() { - rpcCount++; - } - - protected boolean timedOut(long currentTime) { - return isIdle() && currentTime - lastContact > maxIdleTime; - } - - private UserGroupInformation getAuthorizedUgi(String authorizedId) - throws IOException { - if (authMethod == AuthMethod.DIGEST) { - TokenIdentifier tokenId = HBaseSaslRpcServer.getIdentifier(authorizedId, - secretManager); - UserGroupInformation ugi = tokenId.getUser(); - if (ugi == null) { - throw new AccessControlException( - "Can't retrieve username from tokenIdentifier."); - } - ugi.addTokenIdentifier(tokenId); - return ugi; - } else { - return UserGroupInformation.createRemoteUser(authorizedId); - } - } - - private void saslReadAndProcess(byte[] saslToken) throws IOException, - InterruptedException { - if (saslContextEstablished) { - if (LOG.isDebugEnabled()) - LOG.debug("Have read input token of size " + saslToken.length - + " for processing by saslServer.unwrap()"); - - if (!useWrap) { - processOneRpc(saslToken); - } else { - byte[] plaintextData = saslServer.unwrap(saslToken, 0, - saslToken.length); - processUnwrappedData(plaintextData); - } - } else { - byte[] replyToken = null; - try { - if (saslServer == null) { - switch (authMethod) { - case DIGEST: - if (secretManager == null) { - throw new AccessControlException( - "Server is not configured to do DIGEST authentication."); - } - saslServer = Sasl.createSaslServer(AuthMethod.DIGEST - .getMechanismName(), null, SaslUtil.SASL_DEFAULT_REALM, - SaslUtil.SASL_PROPS, new SaslDigestCallbackHandler( - secretManager, this)); - break; - default: - UserGroupInformation current = UserGroupInformation - .getCurrentUser(); - String fullName = current.getUserName(); - if (LOG.isDebugEnabled()) { - LOG.debug("Kerberos principal name is " + fullName); - } - final String names[] = SaslUtil.splitKerberosName(fullName); - if (names.length != 3) { - throw new AccessControlException( - "Kerberos principal name does NOT have the expected " - + "hostname part: " + fullName); - } - current.doAs(new PrivilegedExceptionAction() { - @Override - public Object run() throws SaslException { - saslServer = Sasl.createSaslServer(AuthMethod.KERBEROS - .getMechanismName(), names[0], names[1], - SaslUtil.SASL_PROPS, new SaslGssCallbackHandler()); - return null; - } - }); - } - if (saslServer == null) - throw new AccessControlException( - "Unable to find SASL server implementation for " - + authMethod.getMechanismName()); - if (LOG.isDebugEnabled()) { - LOG.debug("Created SASL server with mechanism = " + authMethod.getMechanismName()); - } - } - if (LOG.isDebugEnabled()) { - LOG.debug("Have read input token of size " + saslToken.length - + " for processing by saslServer.evaluateResponse()"); - } - replyToken = saslServer.evaluateResponse(saslToken); - } catch (IOException e) { - IOException sendToClient = e; - Throwable cause = e; - while (cause != null) { - if (cause instanceof InvalidToken) { - sendToClient = (InvalidToken) cause; - break; - } - cause = cause.getCause(); - } - doRawSaslReply(SaslStatus.ERROR, null, sendToClient.getClass().getName(), - sendToClient.getLocalizedMessage()); - metrics.authenticationFailure(); - String clientIP = this.toString(); - // attempting user could be null - AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser); - throw e; - } - if (replyToken != null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Will send token of size " + replyToken.length - + " from saslServer."); - } - doRawSaslReply(SaslStatus.SUCCESS, new BytesWritable(replyToken), null, - null); - } - if (saslServer.isComplete()) { - String qop = (String) saslServer.getNegotiatedProperty(Sasl.QOP); - useWrap = qop != null && !"auth".equalsIgnoreCase(qop); - user = getAuthorizedUgi(saslServer.getAuthorizationID()); - if (LOG.isDebugEnabled()) { - LOG.debug("SASL server context established. Authenticated client: " - + user + ". Negotiated QoP is " - + saslServer.getNegotiatedProperty(Sasl.QOP)); - } - metrics.authenticationSuccess(); - AUDITLOG.info(AUTH_SUCCESSFUL_FOR + user); - saslContextEstablished = true; - } - } - } - /** - * No protobuf encoding of raw sasl messages - */ - private void doRawSaslReply(SaslStatus status, Writable rv, - String errorClass, String error) throws IOException { - //In my testing, have noticed that sasl messages are usually - //in the ballpark of 100-200. That's why the initialcapacity is 256. - ByteBufferOutputStream saslResponse = new ByteBufferOutputStream(256); - DataOutputStream out = new DataOutputStream(saslResponse); - out.writeInt(status.state); // write status - if (status == SaslStatus.SUCCESS) { - rv.write(out); - } else { - WritableUtils.writeString(out, errorClass); - WritableUtils.writeString(out, error); - } - saslCall.setSaslTokenResponse(saslResponse.getByteBuffer()); - saslCall.responder = responder; - saslCall.sendResponseIfReady(); - } - - private void disposeSasl() { - if (saslServer != null) { - try { - saslServer.dispose(); - saslServer = null; - } catch (SaslException ignored) { - } - } - } - - /** - * Read off the wire. - * @return Returns -1 if failure (and caller will close connection) else return how many - * bytes were read and processed - * @throws IOException - * @throws InterruptedException - */ - public int readAndProcess() throws IOException, InterruptedException { - while (true) { - // Try and read in an int. If new connection, the int will hold the 'HBas' HEADER. If it - // does, read in the rest of the connection preamble, the version and the auth method. - // Else it will be length of the data to read (or -1 if a ping). We catch the integer - // length into the 4-byte this.dataLengthBuffer. - int count; - if (this.dataLengthBuffer.remaining() > 0) { - count = channelRead(channel, this.dataLengthBuffer); - if (count < 0 || this.dataLengthBuffer.remaining() > 0) { - return count; - } - } - // If we have not read the connection setup preamble, look to see if that is on the wire. - if (!connectionPreambleRead) { - // Check for 'HBas' magic. - this.dataLengthBuffer.flip(); - if (!HConstants.RPC_HEADER.equals(dataLengthBuffer)) { - return doBadPreambleHandling("Expected HEADER=" + - Bytes.toStringBinary(HConstants.RPC_HEADER.array()) + - " but received HEADER=" + Bytes.toStringBinary(dataLengthBuffer.array())); - } - // Now read the next two bytes, the version and the auth to use. - ByteBuffer versionAndAuthBytes = ByteBuffer.allocate(2); - count = channelRead(channel, versionAndAuthBytes); - if (count < 0 || versionAndAuthBytes.remaining() > 0) { - return count; - } - int version = versionAndAuthBytes.get(0); - byte authbyte = versionAndAuthBytes.get(1); - this.authMethod = AuthMethod.valueOf(authbyte); - if (version != CURRENT_VERSION || authMethod == null) { - return doBadPreambleHandling("serverVersion=" + CURRENT_VERSION + - ", clientVersion=" + version + ", authMethod=" + authbyte + - ", authSupported=" + (authMethod != null)); - } - if (isSecurityEnabled && authMethod == AuthMethod.SIMPLE) { - AccessControlException ae = new AccessControlException("Authentication is required"); - setupResponse(authFailedResponse, authFailedCall, ae, ae.getMessage()); - responder.doRespond(authFailedCall); - throw ae; - } - if (!isSecurityEnabled && authMethod != AuthMethod.SIMPLE) { - doRawSaslReply(SaslStatus.SUCCESS, new IntWritable( - SaslUtil.SWITCH_TO_SIMPLE_AUTH), null, null); - authMethod = AuthMethod.SIMPLE; - // client has already sent the initial Sasl message and we - // should ignore it. Both client and server should fall back - // to simple auth from now on. - skipInitialSaslHandshake = true; - } - if (authMethod != AuthMethod.SIMPLE) { - useSasl = true; - } - connectionPreambleRead = true; - // Preamble checks out. Go around again to read actual connection header. - dataLengthBuffer.clear(); - continue; - } - // We have read a length and we have read the preamble. It is either the connection header - // or it is a request. - if (data == null) { - dataLengthBuffer.flip(); - int dataLength = dataLengthBuffer.getInt(); - if (dataLength == HBaseClient.PING_CALL_ID) { - if (!useWrap) { //covers the !useSasl too - dataLengthBuffer.clear(); - return 0; //ping message - } - } - if (dataLength < 0) { - throw new IllegalArgumentException("Unexpected data length " - + dataLength + "!! from " + getHostAddress()); - } - data = ByteBuffer.allocate(dataLength); - incRpcCount(); // Increment the rpc count - } - count = channelRead(channel, data); - if (data.remaining() == 0) { - dataLengthBuffer.clear(); - data.flip(); - if (skipInitialSaslHandshake) { - data = null; - skipInitialSaslHandshake = false; - continue; - } - boolean headerRead = connectionHeaderRead; - if (useSasl) { - saslReadAndProcess(data.array()); - } else { - processOneRpc(data.array()); - } - this.data = null; - if (!headerRead) { - continue; - } - } else { - // More to read still; go around again. - if (LOG.isTraceEnabled()) LOG.trace("Continue to read rest of data " + data.remaining()); - continue; - } - return count; - } - } - - private int doBadPreambleHandling(final String errMsg) throws IOException { - String msg = errMsg + "; cannot communicate with client at " + hostAddress + ":" + port; - LOG.warn(msg); - Call fakeCall = new Call(-1, null, null, null, this, responder, -1, null); - setupResponse(null, fakeCall, new FatalConnectionException(msg), msg); - responder.doRespond(fakeCall); - // Returning -1 closes out the connection. - return -1; - } - - // Reads the connection header following version - private void processConnectionHeader(byte[] buf) throws IOException { - this.connectionHeader = ConnectionHeader.parseFrom(buf); - try { - String protocolClassName = connectionHeader.getProtocol(); - if (protocolClassName != null) { - protocol = getProtocolClass(connectionHeader.getProtocol(), conf); - } - } catch (ClassNotFoundException cnfe) { - throw new IOException("Unknown protocol: " + connectionHeader.getProtocol()); - } - setupCellBlockCodecs(this.connectionHeader); - - UserGroupInformation protocolUser = createUser(connectionHeader); - if (!useSasl) { - user = protocolUser; - if (user != null) { - user.setAuthenticationMethod(AuthMethod.SIMPLE.authenticationMethod); - } - } else { - // user is authenticated - user.setAuthenticationMethod(authMethod.authenticationMethod); - //Now we check if this is a proxy user case. If the protocol user is - //different from the 'user', it is a proxy user scenario. However, - //this is not allowed if user authenticated with DIGEST. - if ((protocolUser != null) - && (!protocolUser.getUserName().equals(user.getUserName()))) { - if (authMethod == AuthMethod.DIGEST) { - // Not allowed to doAs if token authentication is used - throw new AccessControlException("Authenticated user (" + user - + ") doesn't match what the client claims to be (" - + protocolUser + ")"); - } else { - // Effective user can be different from authenticated user - // for simple auth or kerberos auth - // The user is the real user. Now we create a proxy user - UserGroupInformation realUser = user; - user = UserGroupInformation.createProxyUser(protocolUser - .getUserName(), realUser); - // Now the user is a proxy user, set Authentication method Proxy. - user.setAuthenticationMethod(AuthenticationMethod.PROXY); - } - } - } - } - - /** - * Set up cell block codecs - * @param header - * @throws FatalConnectionException - */ - private void setupCellBlockCodecs(final ConnectionHeader header) - throws FatalConnectionException { - // TODO: Plug in other supported decoders. - if (!header.hasCellBlockCodecClass()) throw new FatalConnectionException("No codec"); - String className = header.getCellBlockCodecClass(); - try { - this.codec = (Codec)Class.forName(className).newInstance(); - } catch (Exception e) { - throw new FatalConnectionException("Unsupported codec " + className, e); - } - if (!header.hasCellBlockCompressorClass()) return; - className = header.getCellBlockCompressorClass(); - try { - this.compressionCodec = (CompressionCodec)Class.forName(className).newInstance(); - } catch (Exception e) { - throw new FatalConnectionException("Unsupported codec " + className, e); - } - } - - private void processUnwrappedData(byte[] inBuf) throws IOException, - InterruptedException { - ReadableByteChannel ch = Channels.newChannel(new ByteArrayInputStream( - inBuf)); - // Read all RPCs contained in the inBuf, even partial ones - while (true) { - int count = -1; - if (unwrappedDataLengthBuffer.remaining() > 0) { - count = channelRead(ch, unwrappedDataLengthBuffer); - if (count <= 0 || unwrappedDataLengthBuffer.remaining() > 0) - return; - } - - if (unwrappedData == null) { - unwrappedDataLengthBuffer.flip(); - int unwrappedDataLength = unwrappedDataLengthBuffer.getInt(); - - if (unwrappedDataLength == HBaseClient.PING_CALL_ID) { - if (LOG.isDebugEnabled()) - LOG.debug("Received ping message"); - unwrappedDataLengthBuffer.clear(); - continue; // ping message - } - unwrappedData = ByteBuffer.allocate(unwrappedDataLength); - } - - count = channelRead(ch, unwrappedData); - if (count <= 0 || unwrappedData.remaining() > 0) - return; - - if (unwrappedData.remaining() == 0) { - unwrappedDataLengthBuffer.clear(); - unwrappedData.flip(); - processOneRpc(unwrappedData.array()); - unwrappedData = null; - } - } - } - - private void processOneRpc(byte[] buf) throws IOException, - InterruptedException { - if (connectionHeaderRead) { - processRequest(buf); - } else { - processConnectionHeader(buf); - this.connectionHeaderRead = true; - if (!authorizeConnection()) { - throw new AccessControlException("Connection from " + this - + " for protocol " + connectionHeader.getProtocol() - + " is unauthorized for user " + user); - } - } - } - - /** - * @param buf Has the request header and the request param and optionally encoded data buffer - * all in this one array. - * @throws IOException - * @throws InterruptedException - */ - protected void processRequest(byte[] buf) throws IOException, InterruptedException { - long totalRequestSize = buf.length; - int offset = 0; - // Here we read in the header. We avoid having pb - // do its default 4k allocation for CodedInputStream. We force it to use backing array. - CodedInputStream cis = CodedInputStream.newInstance(buf, offset, buf.length); - int headerSize = cis.readRawVarint32(); - offset = cis.getTotalBytesRead(); - RequestHeader header = - RequestHeader.newBuilder().mergeFrom(buf, offset, headerSize).build(); - offset += headerSize; - int id = header.getCallId(); - if (LOG.isDebugEnabled()) { - LOG.debug("RequestHeader " + TextFormat.shortDebugString(header) + - " totalRequestSize: " + totalRequestSize + " bytes"); - } - // Enforcing the call queue size, this triggers a retry in the client - // This is a bit late to be doing this check - we have already read in the total request. - if ((totalRequestSize + callQueueSize.get()) > maxQueueSize) { - final Call callTooBig = - new Call(id, null, null, null, this, responder, totalRequestSize, null); - ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream(); - setupResponse(responseBuffer, callTooBig, new CallQueueTooBigException(), - "Call queue is full, is ipc.server.max.callqueue.size too small?"); - responder.doRespond(callTooBig); - return; - } - Method method = null; - Message param = null; - CellScanner cellScanner = null; - try { - if (header.hasRequestParam() && header.getRequestParam()) { - method = methodCache.getMethod(this.protocol, header.getMethodName()); - Message m = methodCache.getMethodArgType(method); - // Check that there is a param to deserialize. - if (m != null) { - Builder builder = null; - builder = m.newBuilderForType(); - // To read the varint, I need an inputstream; might as well be a CIS. - cis = CodedInputStream.newInstance(buf, offset, buf.length); - int paramSize = cis.readRawVarint32(); - offset += cis.getTotalBytesRead(); - if (builder != null) { - builder.mergeFrom(buf, offset, paramSize); - param = builder.build(); - } - offset += paramSize; - } - } - if (header.hasCellBlockMeta()) { - cellScanner = ipcUtil.createCellScanner(this.codec, this.compressionCodec, - buf, offset, buf.length); - } - } catch (Throwable t) { - String msg = "Unable to read call parameter from client " + getHostAddress(); - LOG.warn(msg, t); - final Call readParamsFailedCall = - new Call(id, null, null, null, this, responder, totalRequestSize, null); - ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream(); - setupResponse(responseBuffer, readParamsFailedCall, t, - msg + "; " + t.getMessage()); - responder.doRespond(readParamsFailedCall); - return; - } - - Call call = null; - if (header.hasTraceInfo()) { - call = new Call(id, method, param, cellScanner, this, responder, totalRequestSize, - new TraceInfo(header.getTraceInfo().getTraceId(), header.getTraceInfo().getParentId())); - } else { - call = new Call(id, method, param, cellScanner, this, responder, totalRequestSize, null); - } - callQueueSize.add(totalRequestSize); - Pair headerAndParam = new Pair(header, param); - if (priorityCallQueue != null && getQosLevel(headerAndParam) > highPriorityLevel) { - priorityCallQueue.put(call); - } else if (replicationQueue != null && - getQosLevel(headerAndParam) == HConstants.REPLICATION_QOS) { - replicationQueue.put(call); - } else { - callQueue.put(call); // queue the call; maybe blocked here - } - } - - private boolean authorizeConnection() throws IOException { - try { - // If auth method is DIGEST, the token was obtained by the - // real user for the effective user, therefore not required to - // authorize real user. doAs is allowed only for simple or kerberos - // authentication - if (user != null && user.getRealUser() != null - && (authMethod != AuthMethod.DIGEST)) { - ProxyUsers.authorize(user, this.getHostAddress(), conf); - } - authorize(user, connectionHeader, getHostInetAddress()); - if (LOG.isDebugEnabled()) { - LOG.debug("Authorized " + TextFormat.shortDebugString(connectionHeader)); - } - metrics.authorizationSuccess(); - } catch (AuthorizationException ae) { - LOG.debug("Connection authorization failed: " + ae.getMessage(), ae); - metrics.authorizationFailure(); - setupResponse(authFailedResponse, authFailedCall, ae, ae.getMessage()); - responder.doRespond(authFailedCall); - return false; - } - return true; - } - - protected synchronized void close() { - disposeSasl(); - data = null; - this.dataLengthBuffer = null; - if (!channel.isOpen()) - return; - try {socket.shutdownOutput();} catch(Exception ignored) {} // FindBugs DE_MIGHT_IGNORE - if (channel.isOpen()) { - try {channel.close();} catch(Exception ignored) {} - } - try {socket.close();} catch(Exception ignored) {} - } - - private UserGroupInformation createUser(ConnectionHeader head) { - UserGroupInformation ugi = null; - - if (!head.hasUserInfo()) { - return null; - } - UserInformation userInfoProto = head.getUserInfo(); - String effectiveUser = null; - if (userInfoProto.hasEffectiveUser()) { - effectiveUser = userInfoProto.getEffectiveUser(); - } - String realUser = null; - if (userInfoProto.hasRealUser()) { - realUser = userInfoProto.getRealUser(); - } - if (effectiveUser != null) { - if (realUser != null) { - UserGroupInformation realUserUgi = - UserGroupInformation.createRemoteUser(realUser); - ugi = UserGroupInformation.createProxyUser(effectiveUser, realUserUgi); - } else { - ugi = UserGroupInformation.createRemoteUser(effectiveUser); - } - } - return ugi; - } - } - - /** Handles queued calls . */ - private class Handler extends Thread { - private final BlockingQueue myCallQueue; - private MonitoredRPCHandler status; - - public Handler(final BlockingQueue cq, int instanceNumber) { - this.myCallQueue = cq; - this.setDaemon(true); - - String threadName = "IPC Server handler " + instanceNumber + " on " + port; - if (cq == priorityCallQueue) { - // this is just an amazing hack, but it works. - threadName = "PRI " + threadName; - } else if (cq == replicationQueue) { - threadName = "REPL " + threadName; - } - this.setName(threadName); - this.status = TaskMonitor.get().createRPCStatus(threadName); - } - - @Override - public void run() { - LOG.info(getName() + ": starting"); - status.setStatus("starting"); - SERVER.set(HBaseServer.this); - while (running) { - try { - status.pause("Waiting for a call"); - Call call = myCallQueue.take(); // pop the queue; maybe blocked here - status.setStatus("Setting up call"); - status.setConnection(call.connection.getHostAddress(), call.connection.getRemotePort()); - if (LOG.isDebugEnabled()) { - UserGroupInformation remoteUser = call.connection.user; - LOG.debug(call.toString() + " executing as " + - ((remoteUser == null)? "NULL principal": remoteUser.getUserName())); - } - Throwable errorThrowable = null; - String error = null; - Pair resultPair = null; - CurCall.set(call); - Span currentRequestSpan = NullSpan.getInstance(); - try { - if (!started) { - throw new ServerNotRunningYetException("Server is not running yet"); - } - if (call.tinfo != null) { - currentRequestSpan = Trace.startSpan( - "handling " + call.toString(), call.tinfo, Sampler.ALWAYS); - } - RequestContext.set(User.create(call.connection.user), getRemoteIp(), - call.connection.protocol); - - // make the call - resultPair = call(call.connection.protocol, call.method, call.param, call.cellScanner, - call.timestamp, status); - } catch (Throwable e) { - LOG.debug(getName() + ": " + call.toString() + " error: " + e, e); - errorThrowable = e; - error = StringUtils.stringifyException(e); - } finally { - currentRequestSpan.stop(); - // Must always clear the request context to avoid leaking - // credentials between requests. - RequestContext.clear(); - } - CurCall.set(null); - callQueueSize.add(call.getSize() * -1); - // Set the response for undelayed calls and delayed calls with - // undelayed responses. - if (!call.isDelayed() || !call.isReturnValueDelayed()) { - Message param = resultPair != null? resultPair.getFirst(): null; - CellScanner cells = resultPair != null? resultPair.getSecond(): null; - call.setResponse(param, cells, errorThrowable, error); - } - call.sendResponseIfReady(); - status.markComplete("Sent response"); - } catch (InterruptedException e) { - if (running) { // unexpected -- log it - LOG.info(getName() + ": caught: " + StringUtils.stringifyException(e)); - } - } catch (OutOfMemoryError e) { - if (errorHandler != null) { - if (errorHandler.checkOOME(e)) { - LOG.info(getName() + ": exiting on OutOfMemoryError"); - return; - } - } else { - // rethrow if no handler - throw e; - } - } catch (ClosedChannelException cce) { - LOG.warn(getName() + ": caught a ClosedChannelException, " + - "this means that the server was processing a " + - "request but the client went away. The error message was: " + - cce.getMessage()); - } catch (Exception e) { - LOG.warn(getName() + ": caught: " + StringUtils.stringifyException(e)); - } - } - LOG.info(getName() + ": exiting"); - } - } - - /* Constructs a server listening on the named port and address. Parameters passed must - * be of the named class. The handlerCount determines - * the number of handler threads that will be used to process calls. - * - */ - protected HBaseServer(String bindAddress, int port, - int handlerCount, - int priorityHandlerCount, Configuration conf, String serverName, - int highPriorityLevel) - throws IOException { - this.bindAddress = bindAddress; - this.conf = conf; - this.port = port; - this.handlerCount = handlerCount; - this.priorityHandlerCount = priorityHandlerCount; - this.socketSendBufferSize = 0; - this.maxQueueLength = - this.conf.getInt("ipc.server.max.callqueue.length", - handlerCount * DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); - this.maxQueueSize = - this.conf.getInt("ipc.server.max.callqueue.size", - DEFAULT_MAX_CALLQUEUE_SIZE); - this.readThreads = conf.getInt( - "ipc.server.read.threadpool.size", - 10); - this.callQueue = new LinkedBlockingQueue(maxQueueLength); - if (priorityHandlerCount > 0) { - this.priorityCallQueue = new LinkedBlockingQueue(maxQueueLength); // TODO hack on size - } else { - this.priorityCallQueue = null; - } - this.highPriorityLevel = highPriorityLevel; - this.maxIdleTime = 2*conf.getInt("ipc.client.connection.maxidletime", 1000); - this.maxConnectionsToNuke = conf.getInt("ipc.client.kill.max", 10); - this.thresholdIdleConnections = conf.getInt("ipc.client.idlethreshold", 4000); - this.purgeTimeout = conf.getLong("ipc.client.call.purge.timeout", - 2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT); - this.numOfReplicationHandlers = conf.getInt("hbase.regionserver.replication.handler.count", 3); - if (numOfReplicationHandlers > 0) { - this.replicationQueue = new LinkedBlockingQueue(maxQueueSize); - } - // Start the listener here and let it bind to the port - listener = new Listener(); - this.port = listener.getAddress().getPort(); - - this.metrics = new MetricsHBaseServer( - serverName, new MetricsHBaseServerWrapperImpl(this)); - this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", true); - this.tcpKeepAlive = conf.getBoolean("ipc.server.tcpkeepalive", true); - - this.warnDelayedCalls = conf.getInt(WARN_DELAYED_CALLS, - DEFAULT_WARN_DELAYED_CALLS); - this.delayedCalls = new AtomicInteger(0); - this.ipcUtil = new IPCUtil(conf); - - - // Create the responder here - responder = new Responder(); - this.authorize = - conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false); - this.isSecurityEnabled = User.isHBaseSecurityEnabled(this.conf); - if (isSecurityEnabled) { - HBaseSaslRpcServer.init(conf); - } - } - - /** - * Subclasses of HBaseServer can override this to provide their own - * Connection implementations. - */ - protected Connection getConnection(SocketChannel channel, long time) { - return new Connection(channel, time); - } - - /** - * Setup response for the IPC Call. - * - * @param response buffer to serialize the response into - * @param call {@link Call} to which we are setting up the response - * @param status {@link Status} of the IPC call - * @param errorClass error class, if the the call failed - * @param error error message, if the call failed - * @throws IOException - */ - private void setupResponse(ByteArrayOutputStream response, Call call, Throwable t, String error) - throws IOException { - if (response != null) response.reset(); - call.setResponse(null, null, t, error); - } - - protected void closeConnection(Connection connection) { - synchronized (connectionList) { - if (connectionList.remove(connection)) { - numConnections--; - } - } - connection.close(); - } - - Configuration getConf() { - return conf; - } - - /** Sets the socket buffer size used for responding to RPCs. - * @param size send size - */ - @Override - public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; } - - /** Starts the service. Must be called before any calls will be handled. */ - @Override - public void start() { - startThreads(); - openServer(); - } - - /** - * Open a previously started server. - */ - @Override - public void openServer() { - started = true; - } - - /** - * Starts the service threads but does not allow requests to be responded yet. - * Client will get {@link ServerNotRunningYetException} instead. - */ - @Override - public synchronized void startThreads() { - responder.start(); - listener.start(); - handlers = startHandlers(callQueue, handlerCount); - priorityHandlers = startHandlers(priorityCallQueue, priorityHandlerCount); - replicationHandlers = startHandlers(replicationQueue, numOfReplicationHandlers); - } - - private Handler[] startHandlers(BlockingQueue queue, int numOfHandlers) { - if (numOfHandlers <= 0) { - return null; - } - Handler[] handlers = new Handler[numOfHandlers]; - for (int i = 0; i < numOfHandlers; i++) { - handlers[i] = new Handler(queue, i); - handlers[i].start(); - } - return handlers; - } - - public SecretManager getSecretManager() { - return this.secretManager; - } - - @SuppressWarnings("unchecked") - public void setSecretManager(SecretManager secretManager) { - this.secretManager = (SecretManager) secretManager; - } - - /** Stops the service. No new calls will be handled after this is called. */ - @Override - public synchronized void stop() { - LOG.info("Stopping server on " + port); - running = false; - stopHandlers(handlers); - stopHandlers(priorityHandlers); - stopHandlers(replicationHandlers); - listener.interrupt(); - listener.doStop(); - responder.interrupt(); - notifyAll(); - } - - private void stopHandlers(Handler[] handlers) { - if (handlers != null) { - for (Handler handler : handlers) { - if (handler != null) { - handler.interrupt(); - } - } - } - } - - /** Wait for the server to be stopped. - * Does not wait for all subthreads to finish. - * See {@link #stop()}. - * @throws InterruptedException e - */ - @Override - public synchronized void join() throws InterruptedException { - while (running) { - wait(); - } - } - - /** - * Return the socket (ip+port) on which the RPC server is listening to. - * @return the socket (ip+port) on which the RPC server is listening to. - */ - @Override - public synchronized InetSocketAddress getListenerAddress() { - return listener.getAddress(); - } - - /** - * Set the handler for calling out of RPC for error conditions. - * @param handler the handler implementation - */ - @Override - public void setErrorHandler(HBaseRPCErrorHandler handler) { - this.errorHandler = handler; - } - - /** - * Returns the metrics instance for reporting RPC call statistics - */ - public MetricsHBaseServer getMetrics() { - return metrics; - } - - /** - * Authorize the incoming client connection. - * - * @param user client user - * @param connection incoming connection - * @param addr InetAddress of incoming connection - * @throws org.apache.hadoop.security.authorize.AuthorizationException when the client isn't authorized to talk the protocol - */ - @SuppressWarnings("static-access") - public void authorize(UserGroupInformation user, - ConnectionHeader connection, - InetAddress addr - ) throws AuthorizationException { - if (authorize) { - Class protocol = null; - try { - protocol = getProtocolClass(connection.getProtocol(), getConf()); - } catch (ClassNotFoundException cfne) { - throw new AuthorizationException("Unknown protocol: " + - connection.getProtocol()); - } - authManager.authorize(user != null ? user : null, - protocol, getConf(), addr); - } - } - - /** - * When the read or write buffer size is larger than this limit, i/o will be - * done in chunks of this size. Most RPC requests and responses would be - * be smaller. - */ - private static int NIO_BUFFER_LIMIT = 64 * 1024; //should not be more than 64KB. - - /** - * This is a wrapper around {@link java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)}. - * If the amount of data is large, it writes to channel in smaller chunks. - * This is to avoid jdk from creating many direct buffers as the size of - * buffer increases. This also minimizes extra copies in NIO layer - * as a result of multiple write operations required to write a large - * buffer. - * - * @param channel writable byte channel to write to - * @param buffer buffer to write - * @return number of bytes written - * @throws java.io.IOException e - * @see java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer) - */ - protected int channelWrite(WritableByteChannel channel, - ByteBuffer buffer) throws IOException { - - int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? - channel.write(buffer) : channelIO(null, channel, buffer); - if (count > 0) { - metrics.sentBytes(count); - } - return count; - } - - /** - * This is a wrapper around {@link java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)}. - * If the amount of data is large, it writes to channel in smaller chunks. - * This is to avoid jdk from creating many direct buffers as the size of - * ByteBuffer increases. There should not be any performance degredation. - * - * @param channel writable byte channel to write on - * @param buffer buffer to write - * @return number of bytes written - * @throws java.io.IOException e - * @see java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer) - */ - protected int channelRead(ReadableByteChannel channel, - ByteBuffer buffer) throws IOException { - - int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? - channel.read(buffer) : channelIO(channel, null, buffer); - if (count > 0) { - metrics.receivedBytes(count); - } - return count; - } - - /** - * Helper for {@link #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer)} - * and {@link #channelWrite(java.nio.channels.WritableByteChannel, java.nio.ByteBuffer)}. Only - * one of readCh or writeCh should be non-null. - * - * @param readCh read channel - * @param writeCh write channel - * @param buf buffer to read or write into/out of - * @return bytes written - * @throws java.io.IOException e - * @see #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer) - * @see #channelWrite(java.nio.channels.WritableByteChannel, java.nio.ByteBuffer) - */ - private static int channelIO(ReadableByteChannel readCh, - WritableByteChannel writeCh, - ByteBuffer buf) throws IOException { - - int originalLimit = buf.limit(); - int initialRemaining = buf.remaining(); - int ret = 0; - - while (buf.remaining() > 0) { - try { - int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT); - buf.limit(buf.position() + ioSize); - - ret = (readCh == null) ? writeCh.write(buf) : readCh.read(buf); - - if (ret < ioSize) { - break; - } - - } finally { - buf.limit(originalLimit); - } - } - - int nBytes = initialRemaining - buf.remaining(); - return (nBytes > 0) ? nBytes : ret; - } - - /** - * Needed for delayed calls. We need to be able to store the current call - * so that we can complete it later. - * @return Call the server is currently handling. - */ - public static RpcCallContext getCurrentCall() { - return CurCall.get(); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServerRPC.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServerRPC.java deleted file mode 100644 index d7d4566c983..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServerRPC.java +++ /dev/null @@ -1,123 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.IpcProtocol; -import org.apache.hadoop.util.ReflectionUtils; - -/** - * A simple RPC mechanism. - * - * This is a local hbase copy of the hadoop RPC so we can do things like - * address HADOOP-414 for hbase-only and try other hbase-specific - * optimizations. Class has been renamed to avoid confusing it w/ hadoop - * versions. - *

- * - * - * A protocol is a Java interface. All parameters and return types must - * be Protobuf objects. - * All methods in the protocol should throw only IOException. No field data of - * the protocol instance is transmitted. - * - * This class provides the server side implementation. - */ -@InterfaceAudience.Private -public class HBaseServerRPC { - // Leave this out in the hadoop ipc package but keep class name. Do this - // so that we dont' get the logging of this class's invocations by doing our - // blanket enabling DEBUG on the o.a.h.h. package. - protected static final Log LOG = - LogFactory.getLog("org.apache.hadoop.ipc.HBaseServerRPC"); - - // cache of RpcEngines by protocol - private static final Map, RpcServerEngine> PROTOCOL_ENGINES = - new HashMap, RpcServerEngine>(); - - /** - * Configuration key for the {@link org.apache.hadoop.hbase.ipc.RpcServerEngine} implementation to - * load to handle connection protocols. Handlers for individual protocols can be - * configured using {@code "hbase.rpc.server.engine." + protocol.class.name}. - */ - public static final String RPC_ENGINE_PROP = "hbase.rpc.server.engine"; - - private HBaseServerRPC() { - super(); - } // no public ctor - - // set a protocol to use a non-default RpcEngine - static void setProtocolEngine(Configuration conf, - Class protocol, Class engine) { - conf.setClass(RPC_ENGINE_PROP + "." + protocol.getName(), engine, RpcServerEngine.class); - } - - // return the RpcEngine configured to handle a protocol - static synchronized RpcServerEngine getProtocolEngine(Class protocol, - Configuration conf) { - RpcServerEngine engine = PROTOCOL_ENGINES.get(protocol); - if (engine == null) { - // check for a configured default engine - Class defaultEngine = - conf.getClass(RPC_ENGINE_PROP, ProtobufRpcServerEngine.class); - - // check for a per interface override - Class impl = conf.getClass(RPC_ENGINE_PROP + "." + protocol.getName(), - defaultEngine); - LOG.debug("Using " + impl.getName() + " for " + protocol.getName()); - engine = (RpcServerEngine) ReflectionUtils.newInstance(impl, conf); - PROTOCOL_ENGINES.put(protocol, engine); - } - return engine; - } - - /** - * Construct a server for a protocol implementation instance. - */ - public static RpcServer getServer(Class protocol, - final Object instance, - final Class[] ifaces, - String bindAddress, - int port, - final int numHandlers, - int metaHandlerCount, - final boolean verbose, - Configuration conf, - int highPriorityLevel) - throws IOException { - return getProtocolEngine(protocol, conf). - getServer(instance, - ifaces, - bindAddress, - port, - numHandlers, - metaHandlerCount, - verbose, - conf, - highPriorityLevel); - } -} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java index 97dd816e4a7..ab5484c8586 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java @@ -21,9 +21,9 @@ package org.apache.hadoop.hbase.ipc; public class MetricsHBaseServerWrapperImpl implements MetricsHBaseServerWrapper { - private HBaseServer server; + private RpcServer server; - MetricsHBaseServerWrapperImpl(HBaseServer server) { + MetricsHBaseServerWrapperImpl(RpcServer server) { this.server = server; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcServerEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcServerEngine.java deleted file mode 100644 index 3d38c1ec35f..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcServerEngine.java +++ /dev/null @@ -1,302 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import java.io.IOException; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.HashMap; -import java.util.Map; - -import org.apache.commons.logging.Log; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.IpcProtocol; -import org.apache.hadoop.hbase.client.Operation; -import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; -import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.security.HBasePolicyProvider; -import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; -import org.codehaus.jackson.map.ObjectMapper; - -import com.google.protobuf.Message; -import com.google.protobuf.ServiceException; -import com.google.protobuf.TextFormat; -/** - * The {@link RpcServerEngine} implementation for ProtoBuf-based RPCs. - */ -@InterfaceAudience.Private -class ProtobufRpcServerEngine implements RpcServerEngine { - ProtobufRpcServerEngine() { - super(); - } - - @Override - public Server getServer(Object instance, Class[] ifaces, - String bindAddress, int port, int numHandlers, int metaHandlerCount, - boolean verbose, Configuration conf, int highPriorityLevel) - throws IOException { - return new Server(instance, ifaces, conf, bindAddress, port, numHandlers, - metaHandlerCount, verbose, highPriorityLevel); - } - - public static class Server extends HBaseServer { - boolean verbose; - Object instance; - Class implementation; - private static final String WARN_RESPONSE_TIME = - "hbase.ipc.warn.response.time"; - private static final String WARN_RESPONSE_SIZE = - "hbase.ipc.warn.response.size"; - - /** Default value for above params */ - private static final int DEFAULT_WARN_RESPONSE_TIME = 10000; // milliseconds - private static final int DEFAULT_WARN_RESPONSE_SIZE = 100 * 1024 * 1024; - - private final int warnResponseTime; - private final int warnResponseSize; - - private static String classNameBase(String className) { - String[] names = className.split("\\.", -1); - if (names == null || names.length == 0) { - return className; - } - return names[names.length-1]; - } - - public Server(Object instance, final Class[] ifaces, - Configuration conf, String bindAddress, int port, - int numHandlers, int metaHandlerCount, boolean verbose, - int highPriorityLevel) - throws IOException { - super(bindAddress, port, numHandlers, metaHandlerCount, - conf, classNameBase(instance.getClass().getName()), - highPriorityLevel); - this.instance = instance; - this.implementation = instance.getClass(); - this.verbose = verbose; - - this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME, - DEFAULT_WARN_RESPONSE_TIME); - this.warnResponseSize = conf.getInt(WARN_RESPONSE_SIZE, - DEFAULT_WARN_RESPONSE_SIZE); - this.verbose = verbose; - this.instance = instance; - this.implementation = instance.getClass(); - } - - private AuthenticationTokenSecretManager createSecretManager(){ - if (!isSecurityEnabled || - !(instance instanceof org.apache.hadoop.hbase.Server)) { - return null; - } - org.apache.hadoop.hbase.Server server = - (org.apache.hadoop.hbase.Server)instance; - Configuration conf = server.getConfiguration(); - long keyUpdateInterval = - conf.getLong("hbase.auth.key.update.interval", 24*60*60*1000); - long maxAge = - conf.getLong("hbase.auth.token.max.lifetime", 7*24*60*60*1000); - return new AuthenticationTokenSecretManager(conf, server.getZooKeeper(), - server.getServerName().toString(), keyUpdateInterval, maxAge); - } - - @Override - public void startThreads() { - AuthenticationTokenSecretManager mgr = createSecretManager(); - if (mgr != null) { - setSecretManager(mgr); - mgr.start(); - } - this.authManager = new ServiceAuthorizationManager(); - HBasePolicyProvider.init(conf, authManager); - - // continue with base startup - super.startThreads(); - } - - @Override - /** - * This is a server side method, which is invoked over RPC. On success - * the return response has protobuf response payload. On failure, the - * exception name and the stack trace are returned in the protobuf response. - */ - public Pair call(Class protocol, - Method method, Message param, CellScanner cellScanner, long receiveTime, - MonitoredRPCHandler status) - throws IOException { - try { - if (verbose) { - LOG.info("callId: " + CurCall.get().id + " protocol: " + protocol.getName() + - " method: " + method.getName()); - } - status.setRPC(method.getName(), new Object[]{param}, receiveTime); - // TODO: Review after we add in encoded data blocks. - status.setRPCPacket(param); - status.resume("Servicing call"); - //get an instance of the method arg type - Message result; - Object impl = null; - if (protocol.isAssignableFrom(this.implementation)) { - impl = this.instance; - } else { - throw new UnknownProtocolException(protocol); - } - PayloadCarryingRpcController controller = null; - long startTime = System.currentTimeMillis(); - if (method.getParameterTypes().length == 2) { - // Always create a controller. Some invocations may not pass data in but will pass - // data out and they'll need a controller instance to carry it for them. - controller = new PayloadCarryingRpcController(cellScanner); - result = (Message)method.invoke(impl, controller, param); - } else { - throw new ServiceException("Wrong number of parameters for method: [" + - method.getName() + "]" + ", wanted: 2, actual: " + method.getParameterTypes().length); - } - int processingTime = (int) (System.currentTimeMillis() - startTime); - int qTime = (int) (startTime-receiveTime); - if (LOG.isTraceEnabled()) { - LOG.trace(CurCall.get().toString() + - " response: " + TextFormat.shortDebugString(result) + - " served: " + protocol.getSimpleName() + - " queueTime: " + qTime + - " processingTime: " + processingTime); - } - metrics.dequeuedCall(qTime); - metrics.processedCall(processingTime); - if (verbose) { - log("Return " + TextFormat.shortDebugString(result), LOG); - } - long responseSize = result.getSerializedSize(); - // log any RPC responses that are slower than the configured warn - // response time or larger than configured warning size - boolean tooSlow = (processingTime > warnResponseTime && warnResponseTime > -1); - boolean tooLarge = (responseSize > warnResponseSize && warnResponseSize > -1); - if (tooSlow || tooLarge) { - // when tagging, we let TooLarge trump TooSmall to keep output simple - // note that large responses will often also be slow. - // TOOD: This output is useless.... output the serialized pb as toString but do a - // short form, shorter than TextFormat.shortDebugString(proto). - StringBuilder buffer = new StringBuilder(256); - buffer.append(method.getName()); - buffer.append("("); - buffer.append(param.getClass().getName()); - buffer.append(")"); - logResponse(new Object[]{param}, - method.getName(), buffer.toString(), (tooLarge ? "TooLarge" : "TooSlow"), - status.getClient(), startTime, processingTime, qTime, - responseSize); - } - return new Pair(result, - controller != null? controller.cellScanner(): null); - } catch (InvocationTargetException e) { - Throwable target = e.getTargetException(); - if (target instanceof IOException) { - throw (IOException)target; - } - if (target instanceof ServiceException) { - throw ProtobufUtil.getRemoteException((ServiceException)target); - } - IOException ioe = new IOException(target.toString()); - ioe.setStackTrace(target.getStackTrace()); - throw ioe; - } catch (Throwable e) { - if (!(e instanceof IOException)) { - LOG.error("Unexpected throwable object ", e); - } - IOException ioe = new IOException(e.toString()); - ioe.setStackTrace(e.getStackTrace()); - throw ioe; - } - } - - /** - * Logs an RPC response to the LOG file, producing valid JSON objects for - * client Operations. - * @param params The parameters received in the call. - * @param methodName The name of the method invoked - * @param call The string representation of the call - * @param tag The tag that will be used to indicate this event in the log. - * @param clientAddress The address of the client who made this call. - * @param startTime The time that the call was initiated, in ms. - * @param processingTime The duration that the call took to run, in ms. - * @param qTime The duration that the call spent on the queue - * prior to being initiated, in ms. - * @param responseSize The size in bytes of the response buffer. - */ - void logResponse(Object[] params, String methodName, String call, String tag, - String clientAddress, long startTime, int processingTime, int qTime, - long responseSize) - throws IOException { - // for JSON encoding - ObjectMapper mapper = new ObjectMapper(); - // base information that is reported regardless of type of call - Map responseInfo = new HashMap(); - responseInfo.put("starttimems", startTime); - responseInfo.put("processingtimems", processingTime); - responseInfo.put("queuetimems", qTime); - responseInfo.put("responsesize", responseSize); - responseInfo.put("client", clientAddress); - responseInfo.put("class", instance.getClass().getSimpleName()); - responseInfo.put("method", methodName); - if (params.length == 2 && instance instanceof HRegionServer && - params[0] instanceof byte[] && - params[1] instanceof Operation) { - // if the slow process is a query, we want to log its table as well - // as its own fingerprint - byte [] tableName = - HRegionInfo.parseRegionName((byte[]) params[0])[0]; - responseInfo.put("table", Bytes.toStringBinary(tableName)); - // annotate the response map with operation details - responseInfo.putAll(((Operation) params[1]).toMap()); - // report to the log file - LOG.warn("(operation" + tag + "): " + - mapper.writeValueAsString(responseInfo)); - } else if (params.length == 1 && instance instanceof HRegionServer && - params[0] instanceof Operation) { - // annotate the response map with operation details - responseInfo.putAll(((Operation) params[0]).toMap()); - // report to the log file - LOG.warn("(operation" + tag + "): " + - mapper.writeValueAsString(responseInfo)); - } else { - // can't get JSON details, so just report call.toString() along with - // a more generic tag. - responseInfo.put("call", call); - LOG.warn("(response" + tag + "): " + - mapper.writeValueAsString(responseInfo)); - } - } - - protected static void log(String value, Log LOG) { - String v = value; - final int max = 100; - if (v != null && v.length() > max) - v = v.substring(0, max) + "..."; - LOG.info(v); - } - } -} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RequestContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RequestContext.java index eefb6b88d64..4adad49ef50 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RequestContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RequestContext.java @@ -21,9 +21,10 @@ package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.IpcProtocol; import org.apache.hadoop.hbase.security.User; +import com.google.protobuf.BlockingService; + import java.net.InetAddress; /** @@ -87,15 +88,14 @@ public class RequestContext { * Initializes the client credentials for the current request. * @param user * @param remoteAddress - * @param protocol + * @param service */ public static void set(User user, - InetAddress remoteAddress, - Class protocol) { + InetAddress remoteAddress, BlockingService service) { RequestContext ctx = instance.get(); ctx.user = user; ctx.remoteAddress = remoteAddress; - ctx.protocol = protocol; + ctx.service = service; ctx.inRequest = true; } @@ -106,21 +106,20 @@ public class RequestContext { RequestContext ctx = instance.get(); ctx.user = null; ctx.remoteAddress = null; - ctx.protocol = null; + ctx.service = null; ctx.inRequest = false; } private User user; private InetAddress remoteAddress; - private Class protocol; + private BlockingService service; // indicates we're within a RPC request invocation private boolean inRequest; - private RequestContext(User user, InetAddress remoteAddr, - Class protocol) { + private RequestContext(User user, InetAddress remoteAddr, BlockingService service) { this.user = user; this.remoteAddress = remoteAddr; - this.protocol = protocol; + this.service = service; } public User getUser() { @@ -131,8 +130,8 @@ public class RequestContext { return remoteAddress; } - public Class getProtocol() { - return protocol; + public BlockingService getService() { + return this.service; } public boolean isInRequest() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ResponseFlag.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ResponseFlag.java index 7354379e6fb..ebd005e750c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ResponseFlag.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ResponseFlag.java @@ -21,7 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; /** * Utility for managing the flag byte passed in response to a - * {@link HBaseServer.Call} + * {@link RpcServer.Call} */ @InterfaceAudience.Private class ResponseFlag { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java index acea988b1aa..44c29046ff7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java @@ -20,12 +20,10 @@ package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.exceptions.CallerDisconnectedException; public interface RpcCallContext extends Delayable { - /** * Throw an exception if the caller who made this IPC call has disconnected. * If called from outside the context of IPC, this does nothing. * @throws CallerDisconnectedException */ void throwExceptionIfCallerDisconnected() throws CallerDisconnectedException; - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 972b9b1b16d..9e902e9be11 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -1,4 +1,4 @@ -/* +/** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -8,7 +8,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,58 +19,2474 @@ package org.apache.hadoop.hbase.ipc; -import java.io.IOException; -import java.lang.reflect.Method; -import java.net.InetSocketAddress; +import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.net.BindException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.net.SocketException; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.nio.channels.CancelledKeyException; +import java.nio.channels.Channels; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.nio.channels.WritableByteChannel; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicInteger; + +import javax.security.sasl.Sasl; +import javax.security.sasl.SaslException; +import javax.security.sasl.SaslServer; + +import com.google.common.collect.Lists; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.IpcProtocol; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.client.Operation; +import org.apache.hadoop.hbase.codec.Codec; +import org.apache.hadoop.hbase.exceptions.CallerDisconnectedException; +import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException; +import org.apache.hadoop.hbase.exceptions.RegionMovedException; +import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException; +import org.apache.hadoop.hbase.io.ByteBufferOutputStream; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; +import org.apache.hadoop.hbase.monitoring.TaskMonitor; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.CellBlockMeta; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.security.AuthMethod; +import org.apache.hadoop.hbase.security.HBasePolicyProvider; +import org.apache.hadoop.hbase.security.HBaseSaslRpcServer; +import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler; +import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler; +import org.apache.hadoop.hbase.security.SaslStatus; +import org.apache.hadoop.hbase.security.SaslUtil; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.PolicyProvider; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; +import org.apache.hadoop.security.token.SecretManager; +import org.apache.hadoop.security.token.SecretManager.InvalidToken; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.util.StringUtils; +import org.cliffc.high_scale_lib.Counter; +import org.cloudera.htrace.Sampler; +import org.cloudera.htrace.Span; +import org.cloudera.htrace.Trace; +import org.cloudera.htrace.TraceInfo; +import org.cloudera.htrace.impl.NullSpan; +import org.codehaus.jackson.map.ObjectMapper; import com.google.common.base.Function; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.protobuf.BlockingService; +import com.google.protobuf.CodedInputStream; +import com.google.protobuf.Descriptors.MethodDescriptor; import com.google.protobuf.Message; +import com.google.protobuf.Message.Builder; +import com.google.protobuf.ServiceException; +import com.google.protobuf.TextFormat; +// Uses Writables doing sasl +/** + * An RPC server that hosts protobuf described Services. + *

Once was copied from Hadoop to local to fix HBASE-900 but deviated long ago. + * + * @see RpcClient + */ @InterfaceAudience.Private -public interface RpcServer { - // TODO: Needs cleanup. Why a 'start', and then a 'startThreads' and an 'openServer'? - // Also, the call takes a RpcRequestBody, an already composed combination of - // rpc Request and metadata. Should disentangle metadata and rpc Request Message. +public class RpcServer implements RpcServerInterface { + // The logging package is deliberately outside of standard o.a.h.h package so it is not on + // by default. + public static final Log LOG = LogFactory.getLog("org.apache.hadoop.ipc.RpcServer"); - void setSocketSendBufSize(int size); + private final boolean authorize; + private boolean isSecurityEnabled; - void start(); + public static final byte CURRENT_VERSION = 0; - void stop(); - - void join() throws InterruptedException; - - InetSocketAddress getListenerAddress(); - - /** Called for each call. - * @param method Method to invoke. - * @param param parameter - * @param receiveTime time - * @param status - * @return Message Protobuf response Message and optionally the Cells that make up the response. - * @throws java.io.IOException e + /** + * How many calls/handler are allowed in the queue. */ - Pair call(Class protocol, Method method, - Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) - throws IOException; + private static final int DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER = 10; - void setErrorHandler(HBaseRPCErrorHandler handler); + /** + * The maximum size that we can hold in the IPC queue + */ + private static final int DEFAULT_MAX_CALLQUEUE_SIZE = 1024 * 1024 * 1024; - void openServer(); + static final int BUFFER_INITIAL_SIZE = 1024; - void startThreads(); + private static final String WARN_DELAYED_CALLS = "hbase.ipc.warn.delayedrpc.number"; + + private static final int DEFAULT_WARN_DELAYED_CALLS = 1000; + + private final int warnDelayedCalls; + + private AtomicInteger delayedCalls; + private final IPCUtil ipcUtil; + + private static final String AUTH_FAILED_FOR = "Auth failed for "; + private static final String AUTH_SUCCESSFUL_FOR = "Auth successful for "; + private static final Log AUDITLOG = LogFactory.getLog("SecurityLogger." + + Server.class.getName()); + protected SecretManager secretManager; + protected ServiceAuthorizationManager authManager; + + protected static final ThreadLocal SERVER = new ThreadLocal(); + private volatile boolean started = false; + + /** This is set to Call object before Handler invokes an RPC and reset + * after the call returns. + */ + protected static final ThreadLocal CurCall = new ThreadLocal(); + + protected final InetSocketAddress isa; + protected int port; // port we listen on + private int handlerCount; // number of handler threads + private int priorityHandlerCount; + private int readThreads; // number of read threads + protected int maxIdleTime; // the maximum idle time after + // which a client may be + // disconnected + protected int thresholdIdleConnections; // the number of idle + // connections after which we + // will start cleaning up idle + // connections + int maxConnectionsToNuke; // the max number of + // connections to nuke + // during a cleanup + + protected MetricsHBaseServer metrics; + + protected final Configuration conf; + + private int maxQueueLength; + private int maxQueueSize; + protected int socketSendBufferSize; + protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm + protected final boolean tcpKeepAlive; // if T then use keepalives + protected final long purgeTimeout; // in milliseconds + + volatile protected boolean running = true; // true while server runs + protected BlockingQueue callQueue; // queued calls + protected final Counter callQueueSize = new Counter(); + protected BlockingQueue priorityCallQueue; + + protected int highPriorityLevel; // what level a high priority call is at + + protected final List connectionList = + Collections.synchronizedList(new LinkedList()); + //maintain a list + //of client connections + private Listener listener = null; + protected Responder responder = null; + protected int numConnections = 0; + private Handler[] handlers = null; + private Handler[] priorityHandlers = null; + /** replication related queue; */ + protected BlockingQueue replicationQueue; + private int numOfReplicationHandlers = 0; + private Handler[] replicationHandlers = null; + + protected HBaseRPCErrorHandler errorHandler = null; + + private static final String WARN_RESPONSE_TIME = "hbase.ipc.warn.response.time"; + private static final String WARN_RESPONSE_SIZE = "hbase.ipc.warn.response.size"; + + /** Default value for above params */ + private static final int DEFAULT_WARN_RESPONSE_TIME = 10000; // milliseconds + private static final int DEFAULT_WARN_RESPONSE_SIZE = 100 * 1024 * 1024; + + private final int warnResponseTime; + private final int warnResponseSize; + private final Object serverInstance; + private final List services; + + /** + * Datastructure that holds all necessary to a method invocation and then afterward, carries + * the result. + */ + class Call implements RpcCallContext { + protected int id; // the client's call id + protected BlockingService service; + protected MethodDescriptor md; + protected Message param; // the parameter passed + // Optional cell data passed outside of protobufs. + protected CellScanner cellScanner; + protected Connection connection; // connection to client + protected long timestamp; // the time received when response is null + // the time served when response is not null + protected ByteBuffer response; // the response for this call + protected boolean delayResponse; + protected Responder responder; + protected boolean delayReturnValue; // if the return value should be + // set at call completion + protected long size; // size of current call + protected boolean isError; + protected TraceInfo tinfo; + + Call(int id, final BlockingService service, final MethodDescriptor md, Message param, + CellScanner cellScanner, Connection connection, Responder responder, long size, + TraceInfo tinfo) { + this.id = id; + this.service = service; + this.md = md; + this.param = param; + this.cellScanner = cellScanner; + this.connection = connection; + this.timestamp = System.currentTimeMillis(); + this.response = null; + this.delayResponse = false; + this.responder = responder; + this.isError = false; + this.size = size; + this.tinfo = tinfo; + } + + @Override + public String toString() { + String serviceName = this.connection.service != null? + this.connection.service.getDescriptorForType().getName(): "null"; + return "callId: " + this.id + " service: " + serviceName + " methodName: " + + ((this.md != null)? this.md.getName(): null) + " param: " + + (this.param != null? IPCUtil.getRequestShortTextFormat(this.param): "") + + " connection: " + connection.toString(); + } + + protected synchronized void setSaslTokenResponse(ByteBuffer response) { + this.response = response; + } + + protected synchronized void setResponse(Object m, final CellScanner cells, + Throwable t, String errorMsg) { + if (this.isError) return; + if (t != null) this.isError = true; + ByteBufferOutputStream bbos = null; + try { + ResponseHeader.Builder headerBuilder = ResponseHeader.newBuilder(); + // Presume it a pb Message. Could be null. + Message result = (Message)m; + // Call id. + headerBuilder.setCallId(this.id); + if (t != null) { + ExceptionResponse.Builder exceptionBuilder = ExceptionResponse.newBuilder(); + exceptionBuilder.setExceptionClassName(t.getClass().getName()); + exceptionBuilder.setStackTrace(errorMsg); + exceptionBuilder.setDoNotRetry(t instanceof DoNotRetryIOException); + if (t instanceof RegionMovedException) { + // Special casing for this exception. This is only one carrying a payload. + // Do this instead of build a generic system for allowing exceptions carry + // any kind of payload. + RegionMovedException rme = (RegionMovedException)t; + exceptionBuilder.setHostname(rme.getHostname()); + exceptionBuilder.setPort(rme.getPort()); + } + // Set the exception as the result of the method invocation. + headerBuilder.setException(exceptionBuilder.build()); + } + ByteBuffer cellBlock = + ipcUtil.buildCellBlock(this.connection.codec, this.connection.compressionCodec, cells); + if (cellBlock != null) { + CellBlockMeta.Builder cellBlockBuilder = CellBlockMeta.newBuilder(); + // Presumes the cellBlock bytebuffer has been flipped so limit has total size in it. + cellBlockBuilder.setLength(cellBlock.limit()); + headerBuilder.setCellBlockMeta(cellBlockBuilder.build()); + } + Message header = headerBuilder.build(); + bbos = IPCUtil.write(header, result, cellBlock); + if (connection.useWrap) { + wrapWithSasl(bbos); + } + } catch (IOException e) { + LOG.warn("Exception while creating response " + e); + } + ByteBuffer bb = null; + if (bbos != null) { + // TODO: If SASL, maybe buffer already been flipped and written? + bb = bbos.getByteBuffer(); + bb.position(0); + } + this.response = bb; + } + + private void wrapWithSasl(ByteBufferOutputStream response) + throws IOException { + if (connection.useSasl) { + // getByteBuffer calls flip() + ByteBuffer buf = response.getByteBuffer(); + byte[] token; + // synchronization may be needed since there can be multiple Handler + // threads using saslServer to wrap responses. + synchronized (connection.saslServer) { + token = connection.saslServer.wrap(buf.array(), + buf.arrayOffset(), buf.remaining()); + } + if (LOG.isDebugEnabled()) + LOG.debug("Adding saslServer wrapped token of size " + token.length + + " as call response."); + buf.clear(); + DataOutputStream saslOut = new DataOutputStream(response); + saslOut.writeInt(token.length); + saslOut.write(token, 0, token.length); + } + } + + @Override + public synchronized void endDelay(Object result) throws IOException { + assert this.delayResponse; + assert this.delayReturnValue || result == null; + this.delayResponse = false; + delayedCalls.decrementAndGet(); + if (this.delayReturnValue) { + this.setResponse(result, null, null, null); + } + this.responder.doRespond(this); + } + + @Override + public synchronized void endDelay() throws IOException { + this.endDelay(null); + } + + @Override + public synchronized void startDelay(boolean delayReturnValue) { + assert !this.delayResponse; + this.delayResponse = true; + this.delayReturnValue = delayReturnValue; + int numDelayed = delayedCalls.incrementAndGet(); + if (numDelayed > warnDelayedCalls) { + LOG.warn("Too many delayed calls: limit " + warnDelayedCalls + " current " + numDelayed); + } + } + + @Override + public synchronized void endDelayThrowing(Throwable t) throws IOException { + this.setResponse(null, null, t, StringUtils.stringifyException(t)); + this.delayResponse = false; + this.sendResponseIfReady(); + } + + @Override + public synchronized boolean isDelayed() { + return this.delayResponse; + } + + @Override + public synchronized boolean isReturnValueDelayed() { + return this.delayReturnValue; + } + + @Override + public void throwExceptionIfCallerDisconnected() throws CallerDisconnectedException { + if (!connection.channel.isOpen()) { + long afterTime = System.currentTimeMillis() - timestamp; + throw new CallerDisconnectedException( + "Aborting call " + this + " after " + afterTime + " ms, since " + + "caller disconnected"); + } + } + + public long getSize() { + return this.size; + } + + /** + * If we have a response, and delay is not set, then respond + * immediately. Otherwise, do not respond to client. This is + * called the by the RPC code in the context of the Handler thread. + */ + public synchronized void sendResponseIfReady() throws IOException { + if (!this.delayResponse) { + this.responder.doRespond(this); + } + } + } + + /** Listens on the socket. Creates jobs for the handler threads*/ + private class Listener extends Thread { + + private ServerSocketChannel acceptChannel = null; //the accept channel + private Selector selector = null; //the selector that we use for the server + private Reader[] readers = null; + private int currentReader = 0; + private Random rand = new Random(); + private long lastCleanupRunTime = 0; //the last time when a cleanup connec- + //-tion (for idle connections) ran + private long cleanupInterval = 10000; //the minimum interval between + //two cleanup runs + private int backlogLength = conf.getInt("ipc.server.listen.queue.size", 128); + + private ExecutorService readPool; + + public Listener(final String name) throws IOException { + super(name); + // Create a new server socket and set to non blocking mode + acceptChannel = ServerSocketChannel.open(); + acceptChannel.configureBlocking(false); + + // Bind the server socket to the local host and port + bind(acceptChannel.socket(), isa, backlogLength); + port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port + // create a selector; + selector= Selector.open(); + + readers = new Reader[readThreads]; + readPool = Executors.newFixedThreadPool(readThreads, + new ThreadFactoryBuilder().setNameFormat( + "IPC Reader %d on port " + port).setDaemon(true).build()); + for (int i = 0; i < readThreads; ++i) { + Reader reader = new Reader(); + readers[i] = reader; + readPool.execute(reader); + } + LOG.info(getName() + ": started " + readThreads + " reader(s)."); + + // Register accepts on the server socket with the selector. + acceptChannel.register(selector, SelectionKey.OP_ACCEPT); + this.setName("IPC Server listener on " + port); + this.setDaemon(true); + } + + + private class Reader implements Runnable { + private volatile boolean adding = false; + private final Selector readSelector; + + Reader() throws IOException { + this.readSelector = Selector.open(); + } + public void run() { + try { + doRunLoop(); + } finally { + try { + readSelector.close(); + } catch (IOException ioe) { + LOG.error(getName() + ": error closing read selector in " + getName(), ioe); + } + } + } + + private synchronized void doRunLoop() { + while (running) { + SelectionKey key = null; + try { + readSelector.select(); + while (adding) { + this.wait(1000); + } + + Iterator iter = readSelector.selectedKeys().iterator(); + while (iter.hasNext()) { + key = iter.next(); + iter.remove(); + if (key.isValid()) { + if (key.isReadable()) { + doRead(key); + } + } + key = null; + } + } catch (InterruptedException e) { + if (running) { // unexpected -- log it + LOG.info(getName() + ": unexpectedly interrupted: " + + StringUtils.stringifyException(e)); + } + } catch (IOException ex) { + LOG.error(getName() + ": error in Reader", ex); + } + } + } + + /** + * This gets reader into the state that waits for the new channel + * to be registered with readSelector. If it was waiting in select() + * the thread will be woken up, otherwise whenever select() is called + * it will return even if there is nothing to read and wait + * in while(adding) for finishAdd call + */ + public void startAdd() { + adding = true; + readSelector.wakeup(); + } + + public synchronized SelectionKey registerChannel(SocketChannel channel) + throws IOException { + return channel.register(readSelector, SelectionKey.OP_READ); + } + + public synchronized void finishAdd() { + adding = false; + this.notify(); + } + } + + /** cleanup connections from connectionList. Choose a random range + * to scan and also have a limit on the number of the connections + * that will be cleanedup per run. The criteria for cleanup is the time + * for which the connection was idle. If 'force' is true then all + * connections will be looked at for the cleanup. + * @param force all connections will be looked at for cleanup + */ + private void cleanupConnections(boolean force) { + if (force || numConnections > thresholdIdleConnections) { + long currentTime = System.currentTimeMillis(); + if (!force && (currentTime - lastCleanupRunTime) < cleanupInterval) { + return; + } + int start = 0; + int end = numConnections - 1; + if (!force) { + start = rand.nextInt() % numConnections; + end = rand.nextInt() % numConnections; + int temp; + if (end < start) { + temp = start; + start = end; + end = temp; + } + } + int i = start; + int numNuked = 0; + while (i <= end) { + Connection c; + synchronized (connectionList) { + try { + c = connectionList.get(i); + } catch (Exception e) {return;} + } + if (c.timedOut(currentTime)) { + if (LOG.isDebugEnabled()) + LOG.debug(getName() + ": disconnecting client " + c.getHostAddress()); + closeConnection(c); + numNuked++; + end--; + //noinspection UnusedAssignment + c = null; + if (!force && numNuked == maxConnectionsToNuke) break; + } + else i++; + } + lastCleanupRunTime = System.currentTimeMillis(); + } + } + + @Override + public void run() { + LOG.info(getName() + ": starting"); + SERVER.set(RpcServer.this); + + while (running) { + SelectionKey key = null; + try { + selector.select(); // FindBugs IS2_INCONSISTENT_SYNC + Iterator iter = selector.selectedKeys().iterator(); + while (iter.hasNext()) { + key = iter.next(); + iter.remove(); + try { + if (key.isValid()) { + if (key.isAcceptable()) + doAccept(key); + } + } catch (IOException ignored) { + } + key = null; + } + } catch (OutOfMemoryError e) { + if (errorHandler != null) { + if (errorHandler.checkOOME(e)) { + LOG.info(getName() + ": exiting on OutOfMemoryError"); + closeCurrentConnection(key, e); + cleanupConnections(true); + return; + } + } else { + // we can run out of memory if we have too many threads + // log the event and sleep for a minute and give + // some thread(s) a chance to finish + LOG.warn(getName() + ": OutOfMemoryError in server select", e); + closeCurrentConnection(key, e); + cleanupConnections(true); + try { Thread.sleep(60000); } catch (Exception ignored) {} + } + } catch (Exception e) { + closeCurrentConnection(key, e); + } + cleanupConnections(false); + } + LOG.info(getName() + ": stopping"); + + synchronized (this) { + try { + acceptChannel.close(); + selector.close(); + } catch (IOException ignored) { } + + selector= null; + acceptChannel= null; + + // clean up all connections + while (!connectionList.isEmpty()) { + closeConnection(connectionList.remove(0)); + } + } + } + + private void closeCurrentConnection(SelectionKey key, Throwable e) { + if (key != null) { + Connection c = (Connection)key.attachment(); + if (c != null) { + if (LOG.isDebugEnabled()) { + LOG.debug(getName() + ": disconnecting client " + c.getHostAddress() + + (e != null ? " on error " + e.getMessage() : "")); + } + closeConnection(c); + key.attach(null); + } + } + } + + InetSocketAddress getAddress() { + return (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress(); + } + + void doAccept(SelectionKey key) throws IOException, OutOfMemoryError { + Connection c; + ServerSocketChannel server = (ServerSocketChannel) key.channel(); + + SocketChannel channel; + while ((channel = server.accept()) != null) { + channel.configureBlocking(false); + channel.socket().setTcpNoDelay(tcpNoDelay); + channel.socket().setKeepAlive(tcpKeepAlive); + + Reader reader = getReader(); + try { + reader.startAdd(); + SelectionKey readKey = reader.registerChannel(channel); + c = getConnection(channel, System.currentTimeMillis()); + readKey.attach(c); + synchronized (connectionList) { + connectionList.add(numConnections, c); + numConnections++; + } + if (LOG.isDebugEnabled()) + LOG.debug(getName() + ": connection from " + c.toString() + + "; # active connections: " + numConnections + + "; # queued calls: " + callQueue.size()); + } finally { + reader.finishAdd(); + } + } + } + + void doRead(SelectionKey key) throws InterruptedException { + int count = 0; + Connection c = (Connection)key.attachment(); + if (c == null) { + return; + } + c.setLastContact(System.currentTimeMillis()); + try { + count = c.readAndProcess(); + } catch (InterruptedException ieo) { + throw ieo; + } catch (Exception e) { + LOG.warn(getName() + ": count of bytes read: " + count, e); + count = -1; //so that the (count < 0) block is executed + } + if (count < 0) { + if (LOG.isDebugEnabled()) { + LOG.debug(getName() + ": DISCONNECTING client " + c.toString() + + " because read count=" + count + + ". Number of active connections: " + numConnections); + } + closeConnection(c); + // c = null; + } else { + c.setLastContact(System.currentTimeMillis()); + } + } + + synchronized void doStop() { + if (selector != null) { + selector.wakeup(); + Thread.yield(); + } + if (acceptChannel != null) { + try { + acceptChannel.socket().close(); + } catch (IOException e) { + LOG.info(getName() + ": exception in closing listener socket. " + e); + } + } + readPool.shutdownNow(); + } + + // The method that will return the next reader to work with + // Simplistic implementation of round robin for now + Reader getReader() { + currentReader = (currentReader + 1) % readers.length; + return readers[currentReader]; + } + } + + // Sends responses of RPC back to clients. + protected class Responder extends Thread { + private final Selector writeSelector; + private int pending; // connections waiting to register + + Responder() throws IOException { + this.setName("IPC Server Responder"); + this.setDaemon(true); + writeSelector = Selector.open(); // create a selector + pending = 0; + } + + @Override + public void run() { + LOG.info(getName() + ": starting"); + SERVER.set(RpcServer.this); + try { + doRunLoop(); + } finally { + LOG.info(getName() + ": stopping"); + try { + writeSelector.close(); + } catch (IOException ioe) { + LOG.error(getName() + ": couldn't close write selector", ioe); + } + } + } + + private void doRunLoop() { + long lastPurgeTime = 0; // last check for old calls. + + while (running) { + try { + waitPending(); // If a channel is being registered, wait. + writeSelector.select(purgeTimeout); + Iterator iter = writeSelector.selectedKeys().iterator(); + while (iter.hasNext()) { + SelectionKey key = iter.next(); + iter.remove(); + try { + if (key.isValid() && key.isWritable()) { + doAsyncWrite(key); + } + } catch (IOException e) { + LOG.info(getName() + ": asyncWrite", e); + } + } + long now = System.currentTimeMillis(); + if (now < lastPurgeTime + purgeTimeout) { + continue; + } + lastPurgeTime = now; + // + // If there were some calls that have not been sent out for a + // long time, discard them. + // + if (LOG.isDebugEnabled()) LOG.debug(getName() + ": checking for old call responses."); + ArrayList calls; + + // get the list of channels from list of keys. + synchronized (writeSelector.keys()) { + calls = new ArrayList(writeSelector.keys().size()); + iter = writeSelector.keys().iterator(); + while (iter.hasNext()) { + SelectionKey key = iter.next(); + Call call = (Call)key.attachment(); + if (call != null && key.channel() == call.connection.channel) { + calls.add(call); + } + } + } + + for(Call call : calls) { + try { + doPurge(call, now); + } catch (IOException e) { + LOG.warn(getName() + ": error in purging old calls " + e); + } + } + } catch (OutOfMemoryError e) { + if (errorHandler != null) { + if (errorHandler.checkOOME(e)) { + LOG.info(getName() + ": exiting on OutOfMemoryError"); + return; + } + } else { + // + // we can run out of memory if we have too many threads + // log the event and sleep for a minute and give + // some thread(s) a chance to finish + // + LOG.warn(getName() + ": OutOfMemoryError in server select", e); + try { Thread.sleep(60000); } catch (Exception ignored) {} + } + } catch (Exception e) { + LOG.warn(getName() + ": exception in Responder " + + StringUtils.stringifyException(e)); + } + } + LOG.info(getName() + ": stopped"); + } + + private void doAsyncWrite(SelectionKey key) throws IOException { + Call call = (Call)key.attachment(); + if (call == null) { + return; + } + if (key.channel() != call.connection.channel) { + throw new IOException("doAsyncWrite: bad channel"); + } + + synchronized(call.connection.responseQueue) { + if (processResponse(call.connection.responseQueue, false)) { + try { + key.interestOps(0); + } catch (CancelledKeyException e) { + /* The Listener/reader might have closed the socket. + * We don't explicitly cancel the key, so not sure if this will + * ever fire. + * This warning could be removed. + */ + LOG.warn("Exception while changing ops : " + e); + } + } + } + } + + // + // Remove calls that have been pending in the responseQueue + // for a long time. + // + private void doPurge(Call call, long now) throws IOException { + synchronized (call.connection.responseQueue) { + Iterator iter = call.connection.responseQueue.listIterator(0); + while (iter.hasNext()) { + Call nextCall = iter.next(); + if (now > nextCall.timestamp + purgeTimeout) { + closeConnection(nextCall.connection); + break; + } + } + } + } + + // Processes one response. Returns true if there are no more pending + // data for this channel. + // + private boolean processResponse(final LinkedList responseQueue, boolean inHandler) + throws IOException { + boolean error = true; + boolean done = false; // there is more data for this channel. + int numElements; + Call call = null; + try { + //noinspection SynchronizationOnLocalVariableOrMethodParameter + synchronized (responseQueue) { + // + // If there are no items for this channel, then we are done + // + numElements = responseQueue.size(); + if (numElements == 0) { + error = false; + return true; // no more data for this channel. + } + // + // Extract the first call + // + call = responseQueue.removeFirst(); + SocketChannel channel = call.connection.channel; + // + // Send as much data as we can in the non-blocking fashion + // + int numBytes = channelWrite(channel, call.response); + if (numBytes < 0) { + return true; + } + if (!call.response.hasRemaining()) { + call.connection.decRpcCount(); + //noinspection RedundantIfStatement + if (numElements == 1) { // last call fully processes. + done = true; // no more data for this channel. + } else { + done = false; // more calls pending to be sent. + } + if (LOG.isDebugEnabled()) { + LOG.debug(getName() + ": callId: " + call.id + " wrote " + numBytes + " bytes."); + } + } else { + // + // If we were unable to write the entire response out, then + // insert in Selector queue. + // + call.connection.responseQueue.addFirst(call); + + if (inHandler) { + // set the serve time when the response has to be sent later + call.timestamp = System.currentTimeMillis(); + if (enqueueInSelector(call)) + done = true; + } + if (LOG.isDebugEnabled()) { + LOG.debug(getName() + call.toString() + " partially sent, wrote " + + numBytes + " bytes."); + } + } + error = false; // everything went off well + } + } finally { + if (error && call != null) { + LOG.warn(getName() + call.toString() + ": output error"); + done = true; // error. no more data for this channel. + closeConnection(call.connection); + } + } + return done; + } + + // + // Enqueue for background thread to send responses out later. + // + private boolean enqueueInSelector(Call call) throws IOException { + boolean done = false; + incPending(); + try { + // Wake up the thread blocked on select, only then can the call + // to channel.register() complete. + SocketChannel channel = call.connection.channel; + writeSelector.wakeup(); + channel.register(writeSelector, SelectionKey.OP_WRITE, call); + } catch (ClosedChannelException e) { + //It's OK. Channel might be closed else where. + done = true; + } finally { + decPending(); + } + return done; + } + + // + // Enqueue a response from the application. + // + void doRespond(Call call) throws IOException { + // set the serve time when the response has to be sent later + call.timestamp = System.currentTimeMillis(); + + boolean doRegister = false; + synchronized (call.connection.responseQueue) { + call.connection.responseQueue.addLast(call); + if (call.connection.responseQueue.size() == 1) { + doRegister = !processResponse(call.connection.responseQueue, false); + } + } + if (doRegister) { + enqueueInSelector(call); + } + } + + private synchronized void incPending() { // call waiting to be enqueued. + pending++; + } + + private synchronized void decPending() { // call done enqueueing. + pending--; + notify(); + } + + private synchronized void waitPending() throws InterruptedException { + while (pending > 0) { + wait(); + } + } + } + + @SuppressWarnings("serial") + public static class CallQueueTooBigException extends IOException { + CallQueueTooBigException() { + super(); + } + } + + private Function, Integer> qosFunction = null; + + /** + * Gets the QOS level for this call. If it is higher than the highPriorityLevel and there + * are priorityHandlers available it will be processed in it's own thread set. + * + * @param newFunc + */ + @Override + public void setQosFunction(Function, Integer> newFunc) { + qosFunction = newFunc; + } + + protected int getQosLevel(Pair headerAndParam) { + if (qosFunction == null) return 0; + Integer res = qosFunction.apply(headerAndParam); + return res == null? 0: res; + } + + /** Reads calls from a connection and queues them for handling. */ + @edu.umd.cs.findbugs.annotations.SuppressWarnings( + value="VO_VOLATILE_INCREMENT", + justification="False positive according to http://sourceforge.net/p/findbugs/bugs/1032/") + public class Connection { + // If initial preamble with version and magic has been read or not. + private boolean connectionPreambleRead = false; + // If the connection header has been read or not. + private boolean connectionHeaderRead = false; + protected SocketChannel channel; + private ByteBuffer data; + private ByteBuffer dataLengthBuffer; + protected final LinkedList responseQueue; + private volatile int rpcCount = 0; // number of outstanding rpcs + private long lastContact; + private InetAddress addr; + protected Socket socket; + // Cache the remote host & port info so that even if the socket is + // disconnected, we can say where it used to connect to. + protected String hostAddress; + protected int remotePort; + ConnectionHeader connectionHeader; + /** + * Codec the client asked use. + */ + private Codec codec; + /** + * Compression codec the client asked us use. + */ + private CompressionCodec compressionCodec; + BlockingService service; + protected UserGroupInformation user = null; + private AuthMethod authMethod; + private boolean saslContextEstablished; + private boolean skipInitialSaslHandshake; + private ByteBuffer unwrappedData; + // When is this set? FindBugs wants to know! Says NP + private ByteBuffer unwrappedDataLengthBuffer = ByteBuffer.allocate(4); + boolean useSasl; + SaslServer saslServer; + private boolean useWrap = false; + // Fake 'call' for failed authorization response + private static final int AUTHROIZATION_FAILED_CALLID = -1; + private final Call authFailedCall = + new Call(AUTHROIZATION_FAILED_CALLID, this.service, null, null, null, this, null, 0, null); + private ByteArrayOutputStream authFailedResponse = + new ByteArrayOutputStream(); + // Fake 'call' for SASL context setup + private static final int SASL_CALLID = -33; + private final Call saslCall = + new Call(SASL_CALLID, this.service, null, null, null, this, null, 0, null); + + public UserGroupInformation attemptingUser = null; // user name before auth + + public Connection(SocketChannel channel, long lastContact) { + this.channel = channel; + this.lastContact = lastContact; + this.data = null; + this.dataLengthBuffer = ByteBuffer.allocate(4); + this.socket = channel.socket(); + this.addr = socket.getInetAddress(); + if (addr == null) { + this.hostAddress = "*Unknown*"; + } else { + this.hostAddress = addr.getHostAddress(); + } + this.remotePort = socket.getPort(); + this.responseQueue = new LinkedList(); + if (socketSendBufferSize != 0) { + try { + socket.setSendBufferSize(socketSendBufferSize); + } catch (IOException e) { + LOG.warn("Connection: unable to set socket send buffer size to " + + socketSendBufferSize); + } + } + } + + @Override + public String toString() { + return getHostAddress() + ":" + remotePort; + } + + public String getHostAddress() { + return hostAddress; + } + + public InetAddress getHostInetAddress() { + return addr; + } + + public int getRemotePort() { + return remotePort; + } + + public void setLastContact(long lastContact) { + this.lastContact = lastContact; + } + + public long getLastContact() { + return lastContact; + } + + /* Return true if the connection has no outstanding rpc */ + private boolean isIdle() { + return rpcCount == 0; + } + + /* Decrement the outstanding RPC count */ + protected void decRpcCount() { + rpcCount--; + } + + /* Increment the outstanding RPC count */ + protected void incRpcCount() { + rpcCount++; + } + + protected boolean timedOut(long currentTime) { + return isIdle() && currentTime - lastContact > maxIdleTime; + } + + private UserGroupInformation getAuthorizedUgi(String authorizedId) + throws IOException { + if (authMethod == AuthMethod.DIGEST) { + TokenIdentifier tokenId = HBaseSaslRpcServer.getIdentifier(authorizedId, + secretManager); + UserGroupInformation ugi = tokenId.getUser(); + if (ugi == null) { + throw new AccessControlException( + "Can't retrieve username from tokenIdentifier."); + } + ugi.addTokenIdentifier(tokenId); + return ugi; + } else { + return UserGroupInformation.createRemoteUser(authorizedId); + } + } + + private void saslReadAndProcess(byte[] saslToken) throws IOException, + InterruptedException { + if (saslContextEstablished) { + if (LOG.isDebugEnabled()) + LOG.debug("Have read input token of size " + saslToken.length + + " for processing by saslServer.unwrap()"); + + if (!useWrap) { + processOneRpc(saslToken); + } else { + byte [] plaintextData = saslServer.unwrap(saslToken, 0, saslToken.length); + processUnwrappedData(plaintextData); + } + } else { + byte[] replyToken = null; + try { + if (saslServer == null) { + switch (authMethod) { + case DIGEST: + if (secretManager == null) { + throw new AccessControlException( + "Server is not configured to do DIGEST authentication."); + } + saslServer = Sasl.createSaslServer(AuthMethod.DIGEST + .getMechanismName(), null, SaslUtil.SASL_DEFAULT_REALM, + SaslUtil.SASL_PROPS, new SaslDigestCallbackHandler( + secretManager, this)); + break; + default: + UserGroupInformation current = UserGroupInformation + .getCurrentUser(); + String fullName = current.getUserName(); + if (LOG.isDebugEnabled()) { + LOG.debug("Kerberos principal name is " + fullName); + } + final String names[] = SaslUtil.splitKerberosName(fullName); + if (names.length != 3) { + throw new AccessControlException( + "Kerberos principal name does NOT have the expected " + + "hostname part: " + fullName); + } + current.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws SaslException { + saslServer = Sasl.createSaslServer(AuthMethod.KERBEROS + .getMechanismName(), names[0], names[1], + SaslUtil.SASL_PROPS, new SaslGssCallbackHandler()); + return null; + } + }); + } + if (saslServer == null) + throw new AccessControlException( + "Unable to find SASL server implementation for " + + authMethod.getMechanismName()); + if (LOG.isDebugEnabled()) { + LOG.debug("Created SASL server with mechanism = " + authMethod.getMechanismName()); + } + } + if (LOG.isDebugEnabled()) { + LOG.debug("Have read input token of size " + saslToken.length + + " for processing by saslServer.evaluateResponse()"); + } + replyToken = saslServer.evaluateResponse(saslToken); + } catch (IOException e) { + IOException sendToClient = e; + Throwable cause = e; + while (cause != null) { + if (cause instanceof InvalidToken) { + sendToClient = (InvalidToken) cause; + break; + } + cause = cause.getCause(); + } + doRawSaslReply(SaslStatus.ERROR, null, sendToClient.getClass().getName(), + sendToClient.getLocalizedMessage()); + metrics.authenticationFailure(); + String clientIP = this.toString(); + // attempting user could be null + AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser); + throw e; + } + if (replyToken != null) { + if (LOG.isDebugEnabled()) { + LOG.debug("Will send token of size " + replyToken.length + + " from saslServer."); + } + doRawSaslReply(SaslStatus.SUCCESS, new BytesWritable(replyToken), null, + null); + } + if (saslServer.isComplete()) { + String qop = (String) saslServer.getNegotiatedProperty(Sasl.QOP); + useWrap = qop != null && !"auth".equalsIgnoreCase(qop); + user = getAuthorizedUgi(saslServer.getAuthorizationID()); + if (LOG.isDebugEnabled()) { + LOG.debug("SASL server context established. Authenticated client: " + + user + ". Negotiated QoP is " + + saslServer.getNegotiatedProperty(Sasl.QOP)); + } + metrics.authenticationSuccess(); + AUDITLOG.info(AUTH_SUCCESSFUL_FOR + user); + saslContextEstablished = true; + } + } + } + /** + * No protobuf encoding of raw sasl messages + */ + private void doRawSaslReply(SaslStatus status, Writable rv, + String errorClass, String error) throws IOException { + //In my testing, have noticed that sasl messages are usually + //in the ballpark of 100-200. That's why the initialcapacity is 256. + ByteBufferOutputStream saslResponse = new ByteBufferOutputStream(256); + DataOutputStream out = new DataOutputStream(saslResponse); + out.writeInt(status.state); // write status + if (status == SaslStatus.SUCCESS) { + rv.write(out); + } else { + WritableUtils.writeString(out, errorClass); + WritableUtils.writeString(out, error); + } + saslCall.setSaslTokenResponse(saslResponse.getByteBuffer()); + saslCall.responder = responder; + saslCall.sendResponseIfReady(); + } + + private void disposeSasl() { + if (saslServer != null) { + try { + saslServer.dispose(); + saslServer = null; + } catch (SaslException ignored) { + } + } + } + + /** + * Read off the wire. + * @return Returns -1 if failure (and caller will close connection) else return how many + * bytes were read and processed + * @throws IOException + * @throws InterruptedException + */ + public int readAndProcess() throws IOException, InterruptedException { + while (true) { + // Try and read in an int. If new connection, the int will hold the 'HBas' HEADER. If it + // does, read in the rest of the connection preamble, the version and the auth method. + // Else it will be length of the data to read (or -1 if a ping). We catch the integer + // length into the 4-byte this.dataLengthBuffer. + int count; + if (this.dataLengthBuffer.remaining() > 0) { + count = channelRead(channel, this.dataLengthBuffer); + if (count < 0 || this.dataLengthBuffer.remaining() > 0) { + return count; + } + } + // If we have not read the connection setup preamble, look to see if that is on the wire. + if (!connectionPreambleRead) { + // Check for 'HBas' magic. + this.dataLengthBuffer.flip(); + if (!HConstants.RPC_HEADER.equals(dataLengthBuffer)) { + return doBadPreambleHandling("Expected HEADER=" + + Bytes.toStringBinary(HConstants.RPC_HEADER.array()) + + " but received HEADER=" + Bytes.toStringBinary(dataLengthBuffer.array()) + + " from " + toString()); + } + // Now read the next two bytes, the version and the auth to use. + ByteBuffer versionAndAuthBytes = ByteBuffer.allocate(2); + count = channelRead(channel, versionAndAuthBytes); + if (count < 0 || versionAndAuthBytes.remaining() > 0) { + return count; + } + int version = versionAndAuthBytes.get(0); + byte authbyte = versionAndAuthBytes.get(1); + this.authMethod = AuthMethod.valueOf(authbyte); + if (version != CURRENT_VERSION) { + String msg = getFatalConnectionString(version, authbyte); + return doBadPreambleHandling(msg, new WrongVersionException(msg)); + } + if (authMethod == null) { + String msg = getFatalConnectionString(version, authbyte); + return doBadPreambleHandling(msg, new BadAuthException(msg)); + } + if (isSecurityEnabled && authMethod == AuthMethod.SIMPLE) { + AccessControlException ae = new AccessControlException("Authentication is required"); + setupResponse(authFailedResponse, authFailedCall, ae, ae.getMessage()); + responder.doRespond(authFailedCall); + throw ae; + } + if (!isSecurityEnabled && authMethod != AuthMethod.SIMPLE) { + doRawSaslReply(SaslStatus.SUCCESS, new IntWritable( + SaslUtil.SWITCH_TO_SIMPLE_AUTH), null, null); + authMethod = AuthMethod.SIMPLE; + // client has already sent the initial Sasl message and we + // should ignore it. Both client and server should fall back + // to simple auth from now on. + skipInitialSaslHandshake = true; + } + if (authMethod != AuthMethod.SIMPLE) { + useSasl = true; + } + connectionPreambleRead = true; + // Preamble checks out. Go around again to read actual connection header. + dataLengthBuffer.clear(); + continue; + } + // We have read a length and we have read the preamble. It is either the connection header + // or it is a request. + if (data == null) { + dataLengthBuffer.flip(); + int dataLength = dataLengthBuffer.getInt(); + if (dataLength == RpcClient.PING_CALL_ID) { + if (!useWrap) { //covers the !useSasl too + dataLengthBuffer.clear(); + return 0; //ping message + } + } + if (dataLength < 0) { + throw new IllegalArgumentException("Unexpected data length " + + dataLength + "!! from " + getHostAddress()); + } + data = ByteBuffer.allocate(dataLength); + incRpcCount(); // Increment the rpc count + } + count = channelRead(channel, data); + if (data.remaining() == 0) { + dataLengthBuffer.clear(); + data.flip(); + if (skipInitialSaslHandshake) { + data = null; + skipInitialSaslHandshake = false; + continue; + } + boolean headerRead = connectionHeaderRead; + if (useSasl) { + saslReadAndProcess(data.array()); + } else { + processOneRpc(data.array()); + } + this.data = null; + if (!headerRead) { + continue; + } + } else { + // More to read still; go around again. + if (LOG.isTraceEnabled()) LOG.trace("Continue to read rest of data " + data.remaining()); + continue; + } + return count; + } + } + + private String getFatalConnectionString(final int version, final byte authByte) { + return "serverVersion=" + CURRENT_VERSION + + ", clientVersion=" + version + ", authMethod=" + authByte + + ", authSupported=" + (authMethod != null) + " from " + toString(); + } + + private int doBadPreambleHandling(final String msg) throws IOException { + return doBadPreambleHandling(msg, new FatalConnectionException(msg)); + } + + private int doBadPreambleHandling(final String msg, final Exception e) throws IOException { + LOG.warn(msg); + Call fakeCall = new Call(-1, null, null, null, null, this, responder, -1, null); + setupResponse(null, fakeCall, e, msg); + responder.doRespond(fakeCall); + // Returning -1 closes out the connection. + return -1; + } + + // Reads the connection header following version + private void processConnectionHeader(byte[] buf) throws IOException { + this.connectionHeader = ConnectionHeader.parseFrom(buf); + String serviceName = connectionHeader.getServiceName(); + if (serviceName == null) throw new EmptyServiceNameException(); + this.service = getService(services, serviceName); + if (this.service == null) throw new UnknownServiceException(serviceName); + setupCellBlockCodecs(this.connectionHeader); + UserGroupInformation protocolUser = createUser(connectionHeader); + if (!useSasl) { + user = protocolUser; + if (user != null) { + user.setAuthenticationMethod(AuthMethod.SIMPLE.authenticationMethod); + } + } else { + // user is authenticated + user.setAuthenticationMethod(authMethod.authenticationMethod); + //Now we check if this is a proxy user case. If the protocol user is + //different from the 'user', it is a proxy user scenario. However, + //this is not allowed if user authenticated with DIGEST. + if ((protocolUser != null) + && (!protocolUser.getUserName().equals(user.getUserName()))) { + if (authMethod == AuthMethod.DIGEST) { + // Not allowed to doAs if token authentication is used + throw new AccessControlException("Authenticated user (" + user + + ") doesn't match what the client claims to be (" + + protocolUser + ")"); + } else { + // Effective user can be different from authenticated user + // for simple auth or kerberos auth + // The user is the real user. Now we create a proxy user + UserGroupInformation realUser = user; + user = UserGroupInformation.createProxyUser(protocolUser + .getUserName(), realUser); + // Now the user is a proxy user, set Authentication method Proxy. + user.setAuthenticationMethod(AuthenticationMethod.PROXY); + } + } + } + } + + /** + * Set up cell block codecs + * @param header + * @throws FatalConnectionException + */ + private void setupCellBlockCodecs(final ConnectionHeader header) + throws FatalConnectionException { + // TODO: Plug in other supported decoders. + if (!header.hasCellBlockCodecClass()) throw new FatalConnectionException("No codec"); + String className = header.getCellBlockCodecClass(); + try { + this.codec = (Codec)Class.forName(className).newInstance(); + } catch (Exception e) { + throw new UnsupportedCellCodecException(className, e); + } + if (!header.hasCellBlockCompressorClass()) return; + className = header.getCellBlockCompressorClass(); + try { + this.compressionCodec = (CompressionCodec)Class.forName(className).newInstance(); + } catch (Exception e) { + throw new UnsupportedCompressionCodecException(className, e); + } + } + + private void processUnwrappedData(byte[] inBuf) throws IOException, + InterruptedException { + ReadableByteChannel ch = Channels.newChannel(new ByteArrayInputStream(inBuf)); + // Read all RPCs contained in the inBuf, even partial ones + while (true) { + int count = -1; + if (unwrappedDataLengthBuffer.remaining() > 0) { + count = channelRead(ch, unwrappedDataLengthBuffer); + if (count <= 0 || unwrappedDataLengthBuffer.remaining() > 0) + return; + } + + if (unwrappedData == null) { + unwrappedDataLengthBuffer.flip(); + int unwrappedDataLength = unwrappedDataLengthBuffer.getInt(); + + if (unwrappedDataLength == RpcClient.PING_CALL_ID) { + if (LOG.isDebugEnabled()) + LOG.debug("Received ping message"); + unwrappedDataLengthBuffer.clear(); + continue; // ping message + } + unwrappedData = ByteBuffer.allocate(unwrappedDataLength); + } + + count = channelRead(ch, unwrappedData); + if (count <= 0 || unwrappedData.remaining() > 0) + return; + + if (unwrappedData.remaining() == 0) { + unwrappedDataLengthBuffer.clear(); + unwrappedData.flip(); + processOneRpc(unwrappedData.array()); + unwrappedData = null; + } + } + } + + private void processOneRpc(byte[] buf) throws IOException, InterruptedException { + if (connectionHeaderRead) { + processRequest(buf); + } else { + processConnectionHeader(buf); + this.connectionHeaderRead = true; + if (!authorizeConnection()) { + // Throw FatalConnectionException wrapping ACE so client does right thing and closes + // down the connection instead of trying to read non-existent retun. + throw new AccessControlException("Connection from " + this + " for service " + + connectionHeader.getServiceName() + " is unauthorized for user: " + user); + } + } + } + + /** + * @param buf Has the request header and the request param and optionally encoded data buffer + * all in this one array. + * @throws IOException + * @throws InterruptedException + */ + protected void processRequest(byte[] buf) throws IOException, InterruptedException { + long totalRequestSize = buf.length; + int offset = 0; + // Here we read in the header. We avoid having pb + // do its default 4k allocation for CodedInputStream. We force it to use backing array. + CodedInputStream cis = CodedInputStream.newInstance(buf, offset, buf.length); + int headerSize = cis.readRawVarint32(); + offset = cis.getTotalBytesRead(); + RequestHeader header = RequestHeader.newBuilder().mergeFrom(buf, offset, headerSize).build(); + offset += headerSize; + int id = header.getCallId(); + if (LOG.isTraceEnabled()) { + LOG.trace("RequestHeader " + TextFormat.shortDebugString(header) + + " totalRequestSize: " + totalRequestSize + " bytes"); + } + // Enforcing the call queue size, this triggers a retry in the client + // This is a bit late to be doing this check - we have already read in the total request. + if ((totalRequestSize + callQueueSize.get()) > maxQueueSize) { + final Call callTooBig = + new Call(id, this.service, null, null, null, this, responder, totalRequestSize, null); + ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream(); + setupResponse(responseBuffer, callTooBig, new CallQueueTooBigException(), + "Call queue is full, is ipc.server.max.callqueue.size too small?"); + responder.doRespond(callTooBig); + return; + } + MethodDescriptor md = null; + Message param = null; + CellScanner cellScanner = null; + try { + if (header.hasRequestParam() && header.getRequestParam()) { + md = this.service.getDescriptorForType().findMethodByName(header.getMethodName()); + Builder builder = this.service.getRequestPrototype(md).newBuilderForType(); + // To read the varint, I need an inputstream; might as well be a CIS. + cis = CodedInputStream.newInstance(buf, offset, buf.length); + int paramSize = cis.readRawVarint32(); + offset += cis.getTotalBytesRead(); + if (builder != null) { + param = builder.mergeFrom(buf, offset, paramSize).build(); + } + offset += paramSize; + } + if (header.hasCellBlockMeta()) { + cellScanner = ipcUtil.createCellScanner(this.codec, this.compressionCodec, + buf, offset, buf.length); + } + } catch (Throwable t) { + String msg = "Unable to read call parameter from client " + getHostAddress(); + LOG.warn(msg, t); + final Call readParamsFailedCall = + new Call(id, this.service, null, null, null, this, responder, totalRequestSize, null); + ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream(); + setupResponse(responseBuffer, readParamsFailedCall, t, + msg + "; " + t.getMessage()); + responder.doRespond(readParamsFailedCall); + return; + } + + Call call = null; + if (header.hasTraceInfo()) { + call = new Call(id, this.service, md, param, cellScanner, this, responder, totalRequestSize, + new TraceInfo(header.getTraceInfo().getTraceId(), header.getTraceInfo().getParentId())); + } else { + call = new Call(id, this.service, md, param, cellScanner, this, responder, + totalRequestSize, null); + } + callQueueSize.add(totalRequestSize); + Pair headerAndParam = new Pair(header, param); + if (priorityCallQueue != null && getQosLevel(headerAndParam) > highPriorityLevel) { + priorityCallQueue.put(call); + } else if (replicationQueue != null && + getQosLevel(headerAndParam) == HConstants.REPLICATION_QOS) { + replicationQueue.put(call); + } else { + callQueue.put(call); // queue the call; maybe blocked here + } + } + + private boolean authorizeConnection() throws IOException { + try { + // If auth method is DIGEST, the token was obtained by the + // real user for the effective user, therefore not required to + // authorize real user. doAs is allowed only for simple or kerberos + // authentication + if (user != null && user.getRealUser() != null + && (authMethod != AuthMethod.DIGEST)) { + ProxyUsers.authorize(user, this.getHostAddress(), conf); + } + authorize(user, connectionHeader, getHostInetAddress()); + if (LOG.isDebugEnabled()) { + LOG.debug("Authorized " + TextFormat.shortDebugString(connectionHeader)); + } + metrics.authorizationSuccess(); + } catch (AuthorizationException ae) { + LOG.debug("Connection authorization failed: " + ae.getMessage(), ae); + metrics.authorizationFailure(); + setupResponse(authFailedResponse, authFailedCall, ae, ae.getMessage()); + responder.doRespond(authFailedCall); + return false; + } + return true; + } + + protected synchronized void close() { + disposeSasl(); + data = null; + this.dataLengthBuffer = null; + if (!channel.isOpen()) + return; + try {socket.shutdownOutput();} catch(Exception ignored) {} // FindBugs DE_MIGHT_IGNORE + if (channel.isOpen()) { + try {channel.close();} catch(Exception ignored) {} + } + try {socket.close();} catch(Exception ignored) {} + } + + private UserGroupInformation createUser(ConnectionHeader head) { + UserGroupInformation ugi = null; + + if (!head.hasUserInfo()) { + return null; + } + UserInformation userInfoProto = head.getUserInfo(); + String effectiveUser = null; + if (userInfoProto.hasEffectiveUser()) { + effectiveUser = userInfoProto.getEffectiveUser(); + } + String realUser = null; + if (userInfoProto.hasRealUser()) { + realUser = userInfoProto.getRealUser(); + } + if (effectiveUser != null) { + if (realUser != null) { + UserGroupInformation realUserUgi = + UserGroupInformation.createRemoteUser(realUser); + ugi = UserGroupInformation.createProxyUser(effectiveUser, realUserUgi); + } else { + ugi = UserGroupInformation.createRemoteUser(effectiveUser); + } + } + return ugi; + } + } + + /** Handles queued calls . */ + private class Handler extends Thread { + private final BlockingQueue myCallQueue; + private MonitoredRPCHandler status; + + public Handler(final BlockingQueue cq, int instanceNumber) { + this.myCallQueue = cq; + this.setDaemon(true); + + String threadName = "IPC Server handler " + instanceNumber + " on " + port; + if (cq == priorityCallQueue) { + // this is just an amazing hack, but it works. + threadName = "PRI " + threadName; + } else if (cq == replicationQueue) { + threadName = "REPL " + threadName; + } + this.setName(threadName); + this.status = TaskMonitor.get().createRPCStatus(threadName); + } + + @Override + public void run() { + LOG.info(getName() + ": starting"); + status.setStatus("starting"); + SERVER.set(RpcServer.this); + while (running) { + try { + status.pause("Waiting for a call"); + Call call = myCallQueue.take(); // pop the queue; maybe blocked here + status.setStatus("Setting up call"); + status.setConnection(call.connection.getHostAddress(), call.connection.getRemotePort()); + if (LOG.isDebugEnabled()) { + UserGroupInformation remoteUser = call.connection.user; + LOG.debug(call.toString() + " executing as " + + ((remoteUser == null)? "NULL principal": remoteUser.getUserName())); + } + Throwable errorThrowable = null; + String error = null; + Pair resultPair = null; + CurCall.set(call); + Span currentRequestSpan = NullSpan.getInstance(); + try { + if (!started) { + throw new ServerNotRunningYetException("Server is not running yet"); + } + if (call.tinfo != null) { + currentRequestSpan = Trace.startSpan( + "handling " + call.toString(), call.tinfo, Sampler.ALWAYS); + } + RequestContext.set(User.create(call.connection.user), getRemoteIp(), + call.connection.service); + + // make the call + resultPair = call(call.service, call.md, call.param, call.cellScanner, call.timestamp, + status); + } catch (Throwable e) { + LOG.debug(getName() + ": " + call.toString(), e); + errorThrowable = e; + error = StringUtils.stringifyException(e); + } finally { + currentRequestSpan.stop(); + // Must always clear the request context to avoid leaking + // credentials between requests. + RequestContext.clear(); + } + CurCall.set(null); + callQueueSize.add(call.getSize() * -1); + // Set the response for undelayed calls and delayed calls with + // undelayed responses. + if (!call.isDelayed() || !call.isReturnValueDelayed()) { + Message param = resultPair != null? resultPair.getFirst(): null; + CellScanner cells = resultPair != null? resultPair.getSecond(): null; + call.setResponse(param, cells, errorThrowable, error); + } + call.sendResponseIfReady(); + status.markComplete("Sent response"); + } catch (InterruptedException e) { + if (running) { // unexpected -- log it + LOG.info(getName() + ": caught: " + StringUtils.stringifyException(e)); + } + } catch (OutOfMemoryError e) { + if (errorHandler != null) { + if (errorHandler.checkOOME(e)) { + LOG.info(getName() + ": exiting on OutOfMemoryError"); + return; + } + } else { + // rethrow if no handler + throw e; + } + } catch (ClosedChannelException cce) { + LOG.warn(getName() + ": caught a ClosedChannelException, " + + "this means that the server was processing a " + + "request but the client went away. The error message was: " + + cce.getMessage()); + } catch (Exception e) { + LOG.warn(getName() + ": caught: " + StringUtils.stringifyException(e)); + } + } + LOG.info(getName() + ": exiting"); + } + } + + /** + * Datastructure for passing a {@link BlockingService} and its associated class of + * protobuf service interface. For example, a server that fielded what is defined + * in the client protobuf service would pass in an implementation of the client blocking service + * and then its ClientService.BlockingInterface.class. Used checking connection setup. + */ + public static class BlockingServiceAndInterface { + private final BlockingService service; + private final Class serviceInterface; + public BlockingServiceAndInterface(final BlockingService service, + final Class serviceInterface) { + this.service = service; + this.serviceInterface = serviceInterface; + } + public Class getServiceInterface() { + return this.serviceInterface; + } + public BlockingService getBlockingService() { + return this.service; + } + } + + + /** + * Minimal setup. Used by tests mostly. + * @param service + * @param isa + * @param conf + * @throws IOException + */ + public RpcServer(final BlockingService service, final InetSocketAddress isa, + final Configuration conf) + throws IOException { + this(null, "generic", Lists.newArrayList(new BlockingServiceAndInterface(service, null)), + isa, 3, 3, conf, + HConstants.QOS_THRESHOLD); + } + + /** + * Constructs a server listening on the named port and address. + * @param serverInstance hosting instance of {@link Server}. We will do authentications if an + * instance else pass null for no authentication check. + * @param name Used keying this rpc servers' metrics and for naming the Listener thread. + * @param services A list of services. + * @param isa Where to listen + * @param handlerCount the number of handler threads that will be used to process calls + * @param priorityHandlerCount How many threads for priority handling. + * @param conf + * @param highPriorityLevel + * @throws IOException + */ + public RpcServer(final Server serverInstance, final String name, + final List services, + final InetSocketAddress isa, int handlerCount, int priorityHandlerCount, Configuration conf, + int highPriorityLevel) + throws IOException { + this.serverInstance = serverInstance; + this.services = services; + this.isa = isa; + this.conf = conf; + this.handlerCount = handlerCount; + this.priorityHandlerCount = priorityHandlerCount; + this.socketSendBufferSize = 0; + this.maxQueueLength = this.conf.getInt("ipc.server.max.callqueue.length", + handlerCount * DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER); + this.maxQueueSize = + this.conf.getInt("ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE); + this.readThreads = conf.getInt("ipc.server.read.threadpool.size", 10); + this.callQueue = new LinkedBlockingQueue(maxQueueLength); + if (priorityHandlerCount > 0) { + this.priorityCallQueue = new LinkedBlockingQueue(maxQueueLength); // TODO hack on size + } else { + this.priorityCallQueue = null; + } + this.highPriorityLevel = highPriorityLevel; + this.maxIdleTime = 2*conf.getInt("ipc.client.connection.maxidletime", 1000); + this.maxConnectionsToNuke = conf.getInt("ipc.client.kill.max", 10); + this.thresholdIdleConnections = conf.getInt("ipc.client.idlethreshold", 4000); + this.purgeTimeout = conf.getLong("ipc.client.call.purge.timeout", + 2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + this.numOfReplicationHandlers = conf.getInt("hbase.regionserver.replication.handler.count", 3); + if (numOfReplicationHandlers > 0) { + this.replicationQueue = new LinkedBlockingQueue(maxQueueSize); + } + + this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME, DEFAULT_WARN_RESPONSE_TIME); + this.warnResponseSize = conf.getInt(WARN_RESPONSE_SIZE, DEFAULT_WARN_RESPONSE_SIZE); + + // Start the listener here and let it bind to the port + listener = new Listener(name); + this.port = listener.getAddress().getPort(); + + this.metrics = new MetricsHBaseServer(name, new MetricsHBaseServerWrapperImpl(this)); + this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", true); + this.tcpKeepAlive = conf.getBoolean("ipc.server.tcpkeepalive", true); + + this.warnDelayedCalls = conf.getInt(WARN_DELAYED_CALLS, DEFAULT_WARN_DELAYED_CALLS); + this.delayedCalls = new AtomicInteger(0); + this.ipcUtil = new IPCUtil(conf); + + + // Create the responder here + responder = new Responder(); + this.authorize = conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false); + this.isSecurityEnabled = User.isHBaseSecurityEnabled(this.conf); + if (isSecurityEnabled) { + HBaseSaslRpcServer.init(conf); + } + } + + /** + * Subclasses of HBaseServer can override this to provide their own + * Connection implementations. + */ + protected Connection getConnection(SocketChannel channel, long time) { + return new Connection(channel, time); + } + + /** + * Setup response for the IPC Call. + * + * @param response buffer to serialize the response into + * @param call {@link Call} to which we are setting up the response + * @param status {@link Status} of the IPC call + * @param errorClass error class, if the the call failed + * @param error error message, if the call failed + * @throws IOException + */ + private void setupResponse(ByteArrayOutputStream response, Call call, Throwable t, String error) + throws IOException { + if (response != null) response.reset(); + call.setResponse(null, null, t, error); + } + + protected void closeConnection(Connection connection) { + synchronized (connectionList) { + if (connectionList.remove(connection)) { + numConnections--; + } + } + connection.close(); + } + + Configuration getConf() { + return conf; + } + + /** Sets the socket buffer size used for responding to RPCs. + * @param size send size + */ + @Override + public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; } + + /** Starts the service. Must be called before any calls will be handled. */ + @Override + public void start() { + startThreads(); + openServer(); + } + + /** + * Open a previously started server. + */ + @Override + public void openServer() { + started = true; + } + + /** + * Starts the service threads but does not allow requests to be responded yet. + * Client will get {@link ServerNotRunningYetException} instead. + */ + @Override + public synchronized void startThreads() { + AuthenticationTokenSecretManager mgr = createSecretManager(); + if (mgr != null) { + setSecretManager(mgr); + mgr.start(); + } + this.authManager = new ServiceAuthorizationManager(); + HBasePolicyProvider.init(conf, authManager); + responder.start(); + listener.start(); + handlers = startHandlers(callQueue, handlerCount); + priorityHandlers = startHandlers(priorityCallQueue, priorityHandlerCount); + replicationHandlers = startHandlers(replicationQueue, numOfReplicationHandlers); + } + + @Override + public void refreshAuthManager(PolicyProvider pp) { + this.authManager.refresh(this.conf, pp); + } + + private Handler[] startHandlers(BlockingQueue queue, int numOfHandlers) { + if (numOfHandlers <= 0) { + return null; + } + Handler[] handlers = new Handler[numOfHandlers]; + for (int i = 0; i < numOfHandlers; i++) { + handlers[i] = new Handler(queue, i); + handlers[i].start(); + } + return handlers; + } + + private AuthenticationTokenSecretManager createSecretManager() { + if (!isSecurityEnabled) return null; + if (serverInstance == null) return null; + if (!(serverInstance instanceof org.apache.hadoop.hbase.Server)) return null; + org.apache.hadoop.hbase.Server server = (org.apache.hadoop.hbase.Server)serverInstance; + Configuration conf = server.getConfiguration(); + long keyUpdateInterval = + conf.getLong("hbase.auth.key.update.interval", 24*60*60*1000); + long maxAge = + conf.getLong("hbase.auth.token.max.lifetime", 7*24*60*60*1000); + return new AuthenticationTokenSecretManager(conf, server.getZooKeeper(), + server.getServerName().toString(), keyUpdateInterval, maxAge); + } + + public SecretManager getSecretManager() { + return this.secretManager; + } + + @SuppressWarnings("unchecked") + public void setSecretManager(SecretManager secretManager) { + this.secretManager = (SecretManager) secretManager; + } + + /** + * This is a server side method, which is invoked over RPC. On success + * the return response has protobuf response payload. On failure, the + * exception name and the stack trace are returned in the protobuf response. + */ + public Pair call(BlockingService service, MethodDescriptor md, + Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) + throws IOException { + try { + status.setRPC(md.getName(), new Object[]{param}, receiveTime); + // TODO: Review after we add in encoded data blocks. + status.setRPCPacket(param); + status.resume("Servicing call"); + //get an instance of the method arg type + long startTime = System.currentTimeMillis(); + PayloadCarryingRpcController controller = new PayloadCarryingRpcController(cellScanner); + Message result = service.callBlockingMethod(md, controller, param); + int processingTime = (int) (System.currentTimeMillis() - startTime); + int qTime = (int) (startTime - receiveTime); + if (LOG.isTraceEnabled()) { + LOG.trace(CurCall.get().toString() + + ", response " + TextFormat.shortDebugString(result) + + " queueTime: " + qTime + + " processingTime: " + processingTime); + } + metrics.dequeuedCall(qTime); + metrics.processedCall(processingTime); + long responseSize = result.getSerializedSize(); + // log any RPC responses that are slower than the configured warn + // response time or larger than configured warning size + boolean tooSlow = (processingTime > warnResponseTime && warnResponseTime > -1); + boolean tooLarge = (responseSize > warnResponseSize && warnResponseSize > -1); + if (tooSlow || tooLarge) { + // when tagging, we let TooLarge trump TooSmall to keep output simple + // note that large responses will often also be slow. + StringBuilder buffer = new StringBuilder(256); + buffer.append(md.getName()); + buffer.append("("); + buffer.append(param.getClass().getName()); + buffer.append(")"); + logResponse(new Object[]{param}, + md.getName(), buffer.toString(), (tooLarge ? "TooLarge" : "TooSlow"), + status.getClient(), startTime, processingTime, qTime, + responseSize); + } + return new Pair(result, + controller != null? controller.cellScanner(): null); + } catch (Throwable e) { + // The above callBlockingMethod will always return a SE. Strip the SE wrapper before + // putting it on the wire. Its needed to adhere to the pb Service Interface but we don't + // need to pass it over the wire. + if (e instanceof ServiceException) e = e.getCause(); + if (e instanceof IOException) throw (IOException)e; + LOG.error("Unexpected throwable object ", e); + throw new IOException(e.getMessage(), e); + } + } + + /** + * Logs an RPC response to the LOG file, producing valid JSON objects for + * client Operations. + * @param params The parameters received in the call. + * @param methodName The name of the method invoked + * @param call The string representation of the call + * @param tag The tag that will be used to indicate this event in the log. + * @param clientAddress The address of the client who made this call. + * @param startTime The time that the call was initiated, in ms. + * @param processingTime The duration that the call took to run, in ms. + * @param qTime The duration that the call spent on the queue + * prior to being initiated, in ms. + * @param responseSize The size in bytes of the response buffer. + */ + void logResponse(Object[] params, String methodName, String call, String tag, + String clientAddress, long startTime, int processingTime, int qTime, + long responseSize) + throws IOException { + // for JSON encoding + ObjectMapper mapper = new ObjectMapper(); + // base information that is reported regardless of type of call + Map responseInfo = new HashMap(); + responseInfo.put("starttimems", startTime); + responseInfo.put("processingtimems", processingTime); + responseInfo.put("queuetimems", qTime); + responseInfo.put("responsesize", responseSize); + responseInfo.put("client", clientAddress); + responseInfo.put("class", serverInstance == null? "": serverInstance.getClass().getSimpleName()); + responseInfo.put("method", methodName); + if (params.length == 2 && serverInstance instanceof HRegionServer && + params[0] instanceof byte[] && + params[1] instanceof Operation) { + // if the slow process is a query, we want to log its table as well + // as its own fingerprint + byte [] tableName = + HRegionInfo.parseRegionName((byte[]) params[0])[0]; + responseInfo.put("table", Bytes.toStringBinary(tableName)); + // annotate the response map with operation details + responseInfo.putAll(((Operation) params[1]).toMap()); + // report to the log file + LOG.warn("(operation" + tag + "): " + + mapper.writeValueAsString(responseInfo)); + } else if (params.length == 1 && serverInstance instanceof HRegionServer && + params[0] instanceof Operation) { + // annotate the response map with operation details + responseInfo.putAll(((Operation) params[0]).toMap()); + // report to the log file + LOG.warn("(operation" + tag + "): " + + mapper.writeValueAsString(responseInfo)); + } else { + // can't get JSON details, so just report call.toString() along with + // a more generic tag. + responseInfo.put("call", call); + LOG.warn("(response" + tag + "): " + mapper.writeValueAsString(responseInfo)); + } + } + + /** Stops the service. No new calls will be handled after this is called. */ + @Override + public synchronized void stop() { + LOG.info("Stopping server on " + port); + running = false; + stopHandlers(handlers); + stopHandlers(priorityHandlers); + stopHandlers(replicationHandlers); + listener.interrupt(); + listener.doStop(); + responder.interrupt(); + notifyAll(); + } + + private void stopHandlers(Handler[] handlers) { + if (handlers != null) { + for (Handler handler : handlers) { + if (handler != null) { + handler.interrupt(); + } + } + } + } + + /** Wait for the server to be stopped. + * Does not wait for all subthreads to finish. + * See {@link #stop()}. + * @throws InterruptedException e + */ + @Override + public synchronized void join() throws InterruptedException { + while (running) { + wait(); + } + } + + /** + * Return the socket (ip+port) on which the RPC server is listening to. + * @return the socket (ip+port) on which the RPC server is listening to. + */ + @Override + public synchronized InetSocketAddress getListenerAddress() { + return listener.getAddress(); + } + + /** + * Set the handler for calling out of RPC for error conditions. + * @param handler the handler implementation + */ + @Override + public void setErrorHandler(HBaseRPCErrorHandler handler) { + this.errorHandler = handler; + } /** * Returns the metrics instance for reporting RPC call statistics */ - MetricsHBaseServer getMetrics(); + public MetricsHBaseServer getMetrics() { + return metrics; + } - public void setQosFunction(Function, Integer> newFunc); -} \ No newline at end of file + /** + * Authorize the incoming client connection. + * + * @param user client user + * @param connection incoming connection + * @param addr InetAddress of incoming connection + * @throws org.apache.hadoop.security.authorize.AuthorizationException when the client isn't authorized to talk the protocol + */ + @SuppressWarnings("static-access") + public void authorize(UserGroupInformation user, ConnectionHeader connection, InetAddress addr) + throws AuthorizationException { + if (authorize) { + Class c = getServiceInterface(services, connection.getServiceName()); + this.authManager.authorize(user != null ? user : null, c, getConf(), addr); + } + } + + /** + * When the read or write buffer size is larger than this limit, i/o will be + * done in chunks of this size. Most RPC requests and responses would be + * be smaller. + */ + private static int NIO_BUFFER_LIMIT = 64 * 1024; //should not be more than 64KB. + + /** + * This is a wrapper around {@link java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)}. + * If the amount of data is large, it writes to channel in smaller chunks. + * This is to avoid jdk from creating many direct buffers as the size of + * buffer increases. This also minimizes extra copies in NIO layer + * as a result of multiple write operations required to write a large + * buffer. + * + * @param channel writable byte channel to write to + * @param buffer buffer to write + * @return number of bytes written + * @throws java.io.IOException e + * @see java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer) + */ + protected int channelWrite(WritableByteChannel channel, + ByteBuffer buffer) throws IOException { + + int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? + channel.write(buffer) : channelIO(null, channel, buffer); + if (count > 0) { + metrics.sentBytes(count); + } + return count; + } + + /** + * This is a wrapper around {@link java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)}. + * If the amount of data is large, it writes to channel in smaller chunks. + * This is to avoid jdk from creating many direct buffers as the size of + * ByteBuffer increases. There should not be any performance degredation. + * + * @param channel writable byte channel to write on + * @param buffer buffer to write + * @return number of bytes written + * @throws java.io.IOException e + * @see java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer) + */ + protected int channelRead(ReadableByteChannel channel, + ByteBuffer buffer) throws IOException { + + int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? + channel.read(buffer) : channelIO(channel, null, buffer); + if (count > 0) { + metrics.receivedBytes(count); + } + return count; + } + + /** + * Helper for {@link #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer)} + * and {@link #channelWrite(java.nio.channels.WritableByteChannel, java.nio.ByteBuffer)}. Only + * one of readCh or writeCh should be non-null. + * + * @param readCh read channel + * @param writeCh write channel + * @param buf buffer to read or write into/out of + * @return bytes written + * @throws java.io.IOException e + * @see #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer) + * @see #channelWrite(java.nio.channels.WritableByteChannel, java.nio.ByteBuffer) + */ + private static int channelIO(ReadableByteChannel readCh, + WritableByteChannel writeCh, + ByteBuffer buf) throws IOException { + + int originalLimit = buf.limit(); + int initialRemaining = buf.remaining(); + int ret = 0; + + while (buf.remaining() > 0) { + try { + int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT); + buf.limit(buf.position() + ioSize); + + ret = (readCh == null) ? writeCh.write(buf) : readCh.read(buf); + + if (ret < ioSize) { + break; + } + + } finally { + buf.limit(originalLimit); + } + } + + int nBytes = initialRemaining - buf.remaining(); + return (nBytes > 0) ? nBytes : ret; + } + + /** + * Needed for delayed calls. We need to be able to store the current call + * so that we can complete it later. + * @return Call the server is currently handling. + */ + public static RpcCallContext getCurrentCall() { + return CurCall.get(); + } + + /** + * @param serviceName Some arbitrary string that represents a 'service'. + * @param services Available service instances + * @return Matching BlockingServiceAndInterface pair + */ + static BlockingServiceAndInterface getServiceAndInterface( + final List services, final String serviceName) { + for (BlockingServiceAndInterface bs : services) { + if (bs.getBlockingService().getDescriptorForType().getName().equals(serviceName)) { + return bs; + } + } + return null; + } + + /** + * @param serviceName Some arbitrary string that represents a 'service'. + * @param services Available services and their service interfaces. + * @return Service interface class for serviceName + */ + static Class getServiceInterface( + final List services, + final String serviceName) { + BlockingServiceAndInterface bsasi = + getServiceAndInterface(services, serviceName); + return bsasi == null? null: bsasi.getServiceInterface(); + } + + /** + * @param serviceName Some arbitrary string that represents a 'service'. + * @param services Available services and their service interfaces. + * @return BlockingService that goes with the passed serviceName + */ + static BlockingService getService( + final List services, + final String serviceName) { + BlockingServiceAndInterface bsasi = + getServiceAndInterface(services, serviceName); + return bsasi == null? null: bsasi.getBlockingService(); + } + + /** Returns the remote side ip address when invoked inside an RPC + * Returns null incase of an error. + * @return InetAddress + */ + public static InetAddress getRemoteIp() { + Call call = CurCall.get(); + if (call != null) { + return call.connection.socket.getInetAddress(); + } + return null; + } + + /** Returns remote address as a string when invoked inside an RPC. + * Returns null in case of an error. + * @return String + */ + public static String getRemoteAddress() { + Call call = CurCall.get(); + if (call != null) { + return call.connection.getHostAddress(); + } + return null; + } + + /** + * May be called under + * {@code #call(Class, RpcRequestBody, long, MonitoredRPCHandler)} implementations, + * and under protobuf methods of parameters and return values. + * Permits applications to access the server context. + * @return the server instance called under or null + */ + public static RpcServerInterface get() { + return SERVER.get(); + } + + /** + * A convenience method to bind to a given address and report + * better exceptions if the address is not a valid host. + * @param socket the socket to bind + * @param address the address to bind to + * @param backlog the number of connections allowed in the queue + * @throws BindException if the address can't be bound + * @throws UnknownHostException if the address isn't a valid host name + * @throws IOException other random errors from bind + */ + public static void bind(ServerSocket socket, InetSocketAddress address, + int backlog) throws IOException { + try { + socket.bind(address, backlog); + } catch (BindException e) { + BindException bindException = + new BindException("Problem binding to " + address + " : " + + e.getMessage()); + bindException.initCause(e); + throw bindException; + } catch (SocketException e) { + // If they try to bind to a different host's address, give a better + // error message. + if ("Unresolved address".equals(e.getMessage())) { + throw new UnknownHostException("Invalid hostname for server: " + + address.getHostName()); + } + throw e; + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java new file mode 100644 index 00000000000..91f09e21571 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java @@ -0,0 +1,76 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ipc; + +import java.io.IOException; +import java.net.InetSocketAddress; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.security.authorize.PolicyProvider; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Function; +import com.google.protobuf.BlockingService; +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.ServiceException; + +@InterfaceAudience.Private +public interface RpcServerInterface { + // TODO: Needs cleanup. Why a 'start', and then a 'startThreads' and an 'openServer'? + + void setSocketSendBufSize(int size); + + void start(); + + void stop(); + + void join() throws InterruptedException; + + InetSocketAddress getListenerAddress(); + + Pair call(BlockingService service, MethodDescriptor md, + Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) + throws IOException, ServiceException; + + void setErrorHandler(HBaseRPCErrorHandler handler); + + void openServer(); + + void startThreads(); + + /** + * Returns the metrics instance for reporting RPC call statistics + */ + MetricsHBaseServer getMetrics(); + + public void setQosFunction(Function, Integer> newFunc); + + /** + * Refresh autentication manager policy. + * @param pp + */ + @VisibleForTesting + void refreshAuthManager(PolicyProvider pp); +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java new file mode 100644 index 00000000000..53f3e554a2f --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +@SuppressWarnings("serial") +public class UnknownServiceException extends FatalConnectionException { + UnknownServiceException(final String msg) { + super(msg); + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index aefe3e387fa..50cc4d3b171 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -543,7 +543,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { + Bytes.toStringBinary(row)); byte[] regionName = location.getRegionInfo().getRegionName(); if(!useSecure) { - success = ProtobufUtil.bulkLoadHFile(server, famPaths, regionName, assignSeqIds); + success = ProtobufUtil.bulkLoadHFile(stub, famPaths, regionName, assignSeqIds); } else { HTable table = new HTable(conn.getConfiguration(), tableName); secureClient = new SecureBulkLoadClient(table); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index 11c1dcd2f46..aaf5ec19574 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -24,9 +24,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.HConnectable; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; -import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java index e3a3ee3a3e7..3a3e8d33552 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java @@ -156,7 +156,7 @@ class ActiveMasterManager extends ZooKeeperListener { // We are the master, return startupStatus.setStatus("Successfully registered as active master."); this.clusterHasActiveMaster.set(true); - LOG.info("Master=" + this.sn); + LOG.info("Registered Active Master=" + this.sn); return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 2beac775897..d2a53820094 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -53,9 +53,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HealthCheckChore; -import org.apache.hadoop.hbase.MasterAdminProtocol; -import org.apache.hadoop.hbase.MasterMonitorProtocol; -import org.apache.hadoop.hbase.RegionServerStatusProtocol; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; @@ -79,9 +76,9 @@ import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; import org.apache.hadoop.hbase.exceptions.UnknownRegionException; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorType; -import org.apache.hadoop.hbase.ipc.HBaseServer; -import org.apache.hadoop.hbase.ipc.HBaseServerRPC; +import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.master.balancer.BalancerChore; import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore; @@ -109,6 +106,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest; @@ -161,6 +159,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshot import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest; @@ -169,6 +168,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDe import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; @@ -227,16 +227,15 @@ import com.google.protobuf.ServiceException; * *

You can also shutdown just this master. Call {@link #stopMaster()}. * - * @see MasterMonitorProtocol - * @see MasterAdminProtocol - * @see RegionServerStatusProtocol * @see Watcher */ @InterfaceAudience.Private @SuppressWarnings("deprecation") public class HMaster extends HasThread -implements MasterMonitorProtocol, MasterAdminProtocol, RegionServerStatusProtocol, MasterServices, -Server { +implements MasterMonitorProtos.MasterMonitorService.BlockingInterface, +MasterAdminProtos.MasterAdminService.BlockingInterface, +RegionServerStatusProtos.RegionServerStatusService.BlockingInterface, +MasterServices, Server { private static final Log LOG = LogFactory.getLog(HMaster.class.getName()); // MASTER is name of the webapp and the attribute name used stuffing this @@ -260,7 +259,7 @@ Server { private LoadBalancerTracker loadBalancerTracker; // RPC server for the HMaster - private final RpcServer rpcServer; + private final RpcServerInterface rpcServer; // Set after we've called HBaseServer#openServer and ready to receive RPCs. // Set back to false after we stop rpcServer. Used by tests. private volatile boolean rpcServerOpen = false; @@ -367,8 +366,6 @@ Server { this.conf = new Configuration(conf); // Disable the block cache on the master this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f); - // Set how many times to retry talking to another server over HConnection. - HConnectionManager.setServerSideHConnectionRetries(this.conf, LOG); // Server to handle client requests. String hostname = Strings.domainNamePointerToHostName(DNS.getDefaultHost( conf.get("hbase.master.dns.interface", "default"), @@ -387,23 +384,22 @@ Server { throw new IllegalArgumentException("Failed resolve of bind address " + initialIsa); } } + String name = "master/" + initialIsa.toString(); + // Set how many times to retry talking to another server over HConnection. + HConnectionManager.setServerSideHConnectionRetries(this.conf, name, LOG); int numHandlers = conf.getInt("hbase.master.handler.count", conf.getInt("hbase.regionserver.handler.count", 25)); - this.rpcServer = HBaseServerRPC.getServer(MasterMonitorProtocol.class, this, - new Class[]{MasterMonitorProtocol.class, - MasterAdminProtocol.class, RegionServerStatusProtocol.class}, - initialIsa.getHostName(), // This is bindAddress if set else it's hostname - initialIsa.getPort(), - numHandlers, - 0, // we dont use high priority handlers in master - conf.getBoolean("hbase.rpc.verbose", false), conf, - 0); // this is a DNC w/o high priority handlers + this.rpcServer = new RpcServer(this, name, getServices(), + initialIsa, // BindAddress is IP we got for this server. + numHandlers, + 0, // we dont use high priority handlers in master + conf, + 0); // this is a DNC w/o high priority handlers // Set our address. this.isa = this.rpcServer.getListenerAddress(); - this.serverName = new ServerName(hostname, - this.isa.getPort(), System.currentTimeMillis()); + this.serverName = new ServerName(hostname, this.isa.getPort(), System.currentTimeMillis()); this.rsFatals = new MemoryBoundedLogMessageBuffer( - conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024)); + conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024)); // login the zookeeper client principal (if using security) ZKUtil.loginClient(this.conf, "hbase.zookeeper.client.keytab.file", @@ -457,6 +453,23 @@ Server { } } + /** + * @return list of blocking services and their security info classes that this server supports + */ + private List getServices() { + List bssi = new ArrayList(3); + bssi.add(new BlockingServiceAndInterface( + MasterMonitorProtos.MasterMonitorService.newReflectiveBlockingService(this), + MasterMonitorProtos.MasterMonitorService.BlockingInterface.class)); + bssi.add(new BlockingServiceAndInterface( + MasterAdminProtos.MasterAdminService.newReflectiveBlockingService(this), + MasterAdminProtos.MasterAdminService.BlockingInterface.class)); + bssi.add(new BlockingServiceAndInterface( + RegionServerStatusProtos.RegionServerStatusService.newReflectiveBlockingService(this), + RegionServerStatusProtos.RegionServerStatusService.BlockingInterface.class)); + return bssi; + } + /** * Stall startup if we are designated a backup master; i.e. we want someone * else to become the master before proceeding. @@ -612,10 +625,10 @@ Server { boolean wasUp = this.clusterStatusTracker.isClusterUp(); if (!wasUp) this.clusterStatusTracker.setClusterUp(); - LOG.info("Server active/primary master; " + this.serverName + + LOG.info("Server active/primary master=" + this.serverName + ", sessionid=0x" + Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) + - ", cluster-up flag was=" + wasUp); + ", setting cluster-up flag (Was=" + wasUp + ")"); // create the snapshot manager this.snapshotManager = new SnapshotManager(this, this.metricsMaster); @@ -765,7 +778,7 @@ Server { enableServerShutdownHandler(); // Update meta with new PB serialization if required. i.e migrate all HRI to PB serialization - // in meta. This must happen before we assign all user regions or else the assignment will + // in meta. This must happen before we assign all user regions or else the assignment will // fail. // TODO: Remove this after 0.96, when we do 0.98. org.apache.hadoop.hbase.catalog.MetaMigrationConvertingToPB @@ -995,7 +1008,6 @@ Server { * need to install an unexpected exception handler. */ void startServiceThreads() throws IOException{ - // Start the executor service pools this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION, conf.getInt("hbase.master.executor.openregion.threads", 5)); @@ -1037,22 +1049,22 @@ Server { this.infoServer.start(); } - // Start the health checker - if (this.healthCheckChore != null) { - Threads.setDaemonThreadRunning(this.healthCheckChore.getThread(), n + ".healthChecker"); - } + // Start the health checker + if (this.healthCheckChore != null) { + Threads.setDaemonThreadRunning(this.healthCheckChore.getThread(), n + ".healthChecker"); + } // Start allowing requests to happen. this.rpcServer.openServer(); this.rpcServerOpen = true; - if (LOG.isDebugEnabled()) { - LOG.debug("Started service threads"); + if (LOG.isTraceEnabled()) { + LOG.trace("Started service threads"); } } /** * Use this when trying to figure when its ok to send in rpcs. Used by tests. - * @return True if we have successfully run {@link HBaseServer#openServer()} + * @return True if we have successfully run {@link RpcServer#openServer()} */ boolean isRpcServerOpen() { return this.rpcServerOpen; @@ -1141,7 +1153,7 @@ Server { throws UnknownHostException { // Do it out here in its own little method so can fake an address when // mocking up in tests. - return HBaseServer.getRemoteIp(); + return RpcServer.getRemoteIp(); } /** @@ -2354,9 +2366,9 @@ Server { /** * Offline specified region from master's in-memory state. It will not attempt to * reassign the region as in unassign. - * + * * This is a special method that should be used by experts or hbck. - * + * */ @Override public OfflineRegionResponse offlineRegion(RpcController controller, OfflineRegionRequest request) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 3c58a350319..5e50046db2b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -477,7 +477,7 @@ public class MasterFileSystem { private static void bootstrap(final Path rd, final Configuration c) throws IOException { - LOG.info("BOOTSTRAP: creating first META region"); + LOG.info("BOOTSTRAP: creating META region"); try { // Bootstrapping, make sure blockcache is off. Else, one will be // created here in bootstap and it'll need to be cleaned up. Better to diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index f20456628e2..39acd807c94 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -155,10 +155,7 @@ public interface MasterServices extends Server { public boolean isServerShutdownHandlerEnabled(); /** - * Registers a new protocol buffer {@link Service} subclass as a master coprocessor endpoint to - * be available for handling - * {@link org.apache.hadoop.hbase.MasterAdminProtocol#execMasterService(com.google.protobuf.RpcController, - * org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)} calls. + * Registers a new protocol buffer {@link Service} subclass as a master coprocessor endpoint. * *

* Only a single instance may be registered for a given {@link Service} subclass (the diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 2f5d02b8b6f..3f6d03992a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -27,9 +27,9 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; import java.util.SortedMap; -import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentSkipListMap; @@ -37,25 +37,25 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.exceptions.ClockOutOfSyncException; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.exceptions.PleaseHoldException; import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.exceptions.YouAreDeadException; -import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.RetriesExhaustedException; +import org.apache.hadoop.hbase.exceptions.ClockOutOfSyncException; +import org.apache.hadoop.hbase.exceptions.PleaseHoldException; +import org.apache.hadoop.hbase.exceptions.YouAreDeadException; +import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException; import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler; import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; @@ -112,12 +112,12 @@ public class ServerManager { private final Map onlineServers = new ConcurrentHashMap(); - // TODO: This is strange to have two maps but HSI above is used on both sides /** - * Map from full server-instance name to the RPC connection for this server. + * Map of admin interfaces per registered regionserver; these interfaces we use to control + * regionservers out on the cluster */ - private final Map serverConnections = - new HashMap(); + private final Map rsAdmins = + new HashMap(); /** * List of region servers that should not get any more new @@ -351,7 +351,7 @@ public class ServerManager { void recordNewServer(final ServerName serverName, final ServerLoad sl) { LOG.info("Registering server=" + serverName); this.onlineServers.put(serverName, sl); - this.serverConnections.remove(serverName); + this.rsAdmins.remove(serverName); } public long getLastFlushedSequenceId(byte[] regionName) { @@ -472,7 +472,7 @@ public class ServerManager { synchronized (onlineServers) { onlineServers.notifyAll(); } - this.serverConnections.remove(serverName); + this.rsAdmins.remove(serverName); // If cluster is going down, yes, servers are going to be expiring; don't // process as a dead server if (this.clusterShutdown) { @@ -591,7 +591,7 @@ public class ServerManager { public RegionOpeningState sendRegionOpen(final ServerName server, HRegionInfo region, int versionOfOfflineNode) throws IOException { - AdminProtocol admin = getServerConnection(server); + AdminService.BlockingInterface admin = getRsAdmin(server); if (admin == null) { LOG.warn("Attempting to send OPEN RPC to server " + server.toString() + " failed because no RPC connection found to this server"); @@ -619,7 +619,7 @@ public class ServerManager { public List sendRegionOpen(ServerName server, List> regionOpenInfos) throws IOException { - AdminProtocol admin = getServerConnection(server); + AdminService.BlockingInterface admin = getRsAdmin(server); if (admin == null) { LOG.warn("Attempting to send OPEN RPC to server " + server.toString() + " failed because no RPC connection found to this server"); @@ -653,7 +653,7 @@ public class ServerManager { public boolean sendRegionClose(ServerName server, HRegionInfo region, int versionOfClosingNode, ServerName dest, boolean transitionInZK) throws IOException { if (server == null) throw new NullPointerException("Passed server is null"); - AdminProtocol admin = getServerConnection(server); + AdminService.BlockingInterface admin = getRsAdmin(server); if (admin == null) { throw new IOException("Attempting to send CLOSE RPC to server " + server.toString() + " for region " + @@ -688,7 +688,7 @@ public class ServerManager { throw new NullPointerException("Passed server is null"); if (region_a == null || region_b == null) throw new NullPointerException("Passed region is null"); - AdminProtocol admin = getServerConnection(server); + AdminService.BlockingInterface admin = getRsAdmin(server); if (admin == null) { throw new IOException("Attempting to send MERGE REGIONS RPC to server " + server.toString() + " for region " @@ -701,18 +701,17 @@ public class ServerManager { /** * @param sn - * @return + * @return Admin interface for the remote regionserver named sn * @throws IOException * @throws RetriesExhaustedException wrapping a ConnectException if failed - * putting up proxy. */ - private AdminProtocol getServerConnection(final ServerName sn) + private AdminService.BlockingInterface getRsAdmin(final ServerName sn) throws IOException { - AdminProtocol admin = this.serverConnections.get(sn); + AdminService.BlockingInterface admin = this.rsAdmins.get(sn); if (admin == null) { - LOG.debug("New connection to " + sn.toString()); + LOG.debug("New admin connection to " + sn.toString()); admin = this.connection.getAdmin(sn); - this.serverConnections.put(sn, admin); + this.rsAdmins.put(sn, admin); } return admin; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index aa0b5072f15..1cd3daf9d09 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -178,8 +178,7 @@ public class SplitLogManager extends ZooKeeperListener { this.timeout = conf.getInt("hbase.splitlog.manager.timeout", DEFAULT_TIMEOUT); this.unassignedTimeout = conf.getInt("hbase.splitlog.manager.unassigned.timeout", DEFAULT_UNASSIGNED_TIMEOUT); - LOG.info("timeout = " + timeout); - LOG.info("unassigned timeout = " + unassignedTimeout); + LOG.info("timeout=" + timeout + ", unassigned timeout=" + unassignedTimeout); this.serverName = serverName; this.timeoutMonitor = @@ -855,7 +854,7 @@ public class SplitLogManager extends ZooKeeperListener { } getDataSetWatch(nodepath, zkretries); } - LOG.info("found " + (orphans.size() - rescan_nodes) + " orphan tasks and " + + LOG.info("Found " + (orphans.size() - rescan_nodes) + " orphan tasks and " + rescan_nodes + " rescan nodes"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java index ccc61ac427f..29f01541443 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java @@ -22,12 +22,12 @@ package org.apache.hadoop.hbase.protobuf; import com.google.protobuf.ByteString; import com.google.protobuf.ServiceException; -import org.apache.hadoop.hbase.HConstants; + import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.WALProtos; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; @@ -79,7 +79,7 @@ public class ReplicationProtbufUtil { * @param entries * @throws java.io.IOException */ - public static void replicateWALEntry(final AdminProtocol admin, + public static void replicateWALEntry(final AdminService.BlockingInterface admin, final HLog.Entry[] entries) throws IOException { AdminProtos.ReplicateWALEntryRequest request = buildReplicateWALEntryRequest(entries); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index bc1ef09d2df..d2cae1e541f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -108,7 +108,7 @@ import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.ipc.HBaseServer; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.RpcCallContext; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; @@ -3576,7 +3576,7 @@ public class HRegion implements HeapSize { // , Writable{ if (!results.isEmpty()) { throw new IllegalArgumentException("First parameter should be an empty list"); } - RpcCallContext rpcCall = HBaseServer.getCurrentCall(); + RpcCallContext rpcCall = RpcServer.getCurrentCall(); // The loop here is used only when at some point during the next we determine // that due to effects of filters or otherwise, we have an empty row in the result. // Then we loop and try again. Otherwise, we must get out on the first iteration via return, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 8f3a16a57b9..22d539a4fdd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -65,7 +65,6 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HealthCheckChore; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.RegionServerStatusProtocol; import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; @@ -74,9 +73,7 @@ import org.apache.hadoop.hbase.ZNodeClearer; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.catalog.MetaReader; -import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.Append; -import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HConnectionManager; @@ -108,19 +105,19 @@ import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.ipc.HBaseClientRPC; import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler; -import org.apache.hadoop.hbase.ipc.HBaseServerRPC; -import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; -import org.apache.hadoop.hbase.ipc.ProtobufRpcClientEngine; -import org.apache.hadoop.hbase.ipc.RpcClientEngine; +import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; +import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; @@ -179,6 +176,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLa import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException; import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; @@ -218,6 +216,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.zookeeper.KeeperException; import org.cliffc.high_scale_lib.Counter; +import com.google.protobuf.BlockingRpcChannel; import com.google.protobuf.ByteString; import com.google.protobuf.Message; import com.google.protobuf.RpcController; @@ -230,8 +229,9 @@ import com.google.protobuf.TextFormat; */ @InterfaceAudience.Private @SuppressWarnings("deprecation") -public class HRegionServer implements ClientProtocol, - AdminProtocol, Runnable, RegionServerServices, HBaseRPCErrorHandler, LastSequenceId { +public class HRegionServer implements ClientProtos.ClientService.BlockingInterface, + AdminProtos.AdminService.BlockingInterface, Runnable, RegionServerServices, + HBaseRPCErrorHandler, LastSequenceId { public static final Log LOG = LogFactory.getLog(HRegionServer.class); @@ -326,15 +326,14 @@ public class HRegionServer implements ClientProtocol, protected final int numRegionsToReport; - // Remote HMaster - private RegionServerStatusProtocol hbaseMaster; + // Stub to do region server status calls against the master. + private RegionServerStatusService.BlockingInterface rssStub; + // RPC client. Used to make the stub above that does region server status checking. + RpcClient rpcClient; // Server to handle client requests. Default access so can be accessed by // unit tests. - RpcServer rpcServer; - - // RPC client for communicating with master - RpcClientEngine rpcClientEngine; + RpcServerInterface rpcServer; private final InetSocketAddress isa; private UncaughtExceptionHandler uncaughtExceptionHandler; @@ -460,15 +459,12 @@ public class HRegionServer implements ClientProtocol, throws IOException, InterruptedException { this.fsOk = true; this.conf = conf; - // Set how many times to retry talking to another server over HConnection. - HConnectionManager.setServerSideHConnectionRetries(this.conf, LOG); this.isOnline = false; checkCodecs(this.conf); // do we use checksum verification in the hbase? If hbase checksum verification // is enabled, then we automatically switch off hdfs checksum verification. - this.useHBaseChecksum = conf.getBoolean( - HConstants.HBASE_CHECKSUM_VERIFICATION, false); + this.useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, false); // Config'ed params this.numRetries = conf.getInt("hbase.client.retries.number", 10); @@ -506,18 +502,17 @@ public class HRegionServer implements ClientProtocol, if (initialIsa.getAddress() == null) { throw new IllegalArgumentException("Failed resolve of " + initialIsa); } - this.rand = new Random(initialIsa.hashCode()); - this.rpcServer = HBaseServerRPC.getServer(AdminProtocol.class, this, - new Class[]{ClientProtocol.class, - AdminProtocol.class, HBaseRPCErrorHandler.class, - OnlineRegions.class}, - initialIsa.getHostName(), // BindAddress is IP we got for this server. - initialIsa.getPort(), - conf.getInt("hbase.regionserver.handler.count", 10), - conf.getInt("hbase.regionserver.metahandler.count", 10), - conf.getBoolean("hbase.rpc.verbose", false), - conf, HConstants.QOS_THRESHOLD); + String name = "regionserver/" + initialIsa.toString(); + // Set how many times to retry talking to another server over HConnection. + HConnectionManager.setServerSideHConnectionRetries(this.conf, name, LOG); + this.rpcServer = new RpcServer(this, name, getServices(), + /*HBaseRPCErrorHandler.class, OnlineRegions.class},*/ + initialIsa, // BindAddress is IP we got for this server. + conf.getInt("hbase.regionserver.handler.count", 10), + conf.getInt("hbase.regionserver.metahandler.count", 10), + conf, HConstants.QOS_THRESHOLD); + // Set our address. this.isa = this.rpcServer.getListenerAddress(); @@ -542,6 +537,20 @@ public class HRegionServer implements ClientProtocol, this.rsHost = new RegionServerCoprocessorHost(this, this.conf); } + /** + * @return list of blocking services and their security info classes that this server supports + */ + private List getServices() { + List bssi = new ArrayList(2); + bssi.add(new BlockingServiceAndInterface( + ClientProtos.ClientService.newReflectiveBlockingService(this), + ClientProtos.ClientService.BlockingInterface.class)); + bssi.add(new BlockingServiceAndInterface( + AdminProtos.AdminService.newReflectiveBlockingService(this), + AdminProtos.AdminService.BlockingInterface.class)); + return bssi; + } + /** * Run test on configured codecs to make sure supporting libs are in place. * @param c @@ -706,7 +715,7 @@ public class HRegionServer implements ClientProtocol, movedRegionsCleaner = MovedRegionsCleaner.createAndStart(this); // Setup RPC client for master communication - rpcClientEngine = new ProtobufRpcClientEngine(conf, clusterId); + rpcClient = new RpcClient(conf, clusterId); } /** @@ -870,10 +879,10 @@ public class HRegionServer implements ClientProtocol, } // Make sure the proxy is down. - if (this.hbaseMaster != null) { - this.hbaseMaster = null; + if (this.rssStub != null) { + this.rssStub = null; } - this.rpcClientEngine.close(); + this.rpcClient.stop(); this.leases.close(); if (!killed) { @@ -920,7 +929,7 @@ public class HRegionServer implements ClientProtocol, this.serverNameFromMasterPOV.getVersionedBytes()); request.setServer(ProtobufUtil.toServerName(sn)); request.setLoad(sl); - this.hbaseMaster.regionServerReport(null, request.build()); + this.rssStub.regionServerReport(null, request.build()); } catch (ServiceException se) { IOException ioe = ProtobufUtil.getRemoteException(se); if (ioe instanceof YouAreDeadException) { @@ -929,7 +938,9 @@ public class HRegionServer implements ClientProtocol, } // Couldn't connect to the master, get location from zk and reconnect // Method blocks until new master is found or we are stopped - getMaster(); + Pair p = + createRegionServerStatusStub(); + this.rssStub = p.getSecond(); } } @@ -1078,9 +1089,11 @@ public class HRegionServer implements ClientProtocol, String hostnameFromMasterPOV = e.getValue(); this.serverNameFromMasterPOV = new ServerName(hostnameFromMasterPOV, this.isa.getPort(), this.startcode); - LOG.info("Master passed us hostname to use. Was=" + - this.isa.getHostName() + ", Now=" + - this.serverNameFromMasterPOV.getHostname()); + if (!this.serverNameFromMasterPOV.equals(this.isa.getHostName())) { + LOG.info("Master passed us a different hostname to use; was=" + + this.isa.getHostName() + ", but now=" + + this.serverNameFromMasterPOV.getHostname()); + } continue; } String value = e.getValue(); @@ -1301,10 +1314,10 @@ public class HRegionServer implements ClientProtocol, FlushRequester requester = server.getFlushRequester(); if (requester != null) { long randomDelay = rand.nextInt(RANGE_OF_DELAY) + MIN_DELAY_TIME; - LOG.info(getName() + " requesting flush for region " + r.getRegionNameAsString() + + LOG.info(getName() + " requesting flush for region " + r.getRegionNameAsString() + " after a delay of " + randomDelay); //Throttle the flushes by putting a delay. If we don't throttle, and there - //is a balanced write-load on the regions in a table, we might end up + //is a balanced write-load on the regions in a table, we might end up //overwhelming the filesystem with too many flushes at once. requester.requestDelayedFlush(r, randomDelay); } @@ -1629,7 +1642,7 @@ public class HRegionServer implements ClientProtocol, } @Override - public RpcServer getRpcServer() { + public RpcServerInterface getRpcServer() { return rpcServer; } @@ -1662,14 +1675,14 @@ public class HRegionServer implements ClientProtocol, msg += "\nCause:\n" + StringUtils.stringifyException(cause); } // Report to the master but only if we have already registered with the master. - if (hbaseMaster != null && this.serverNameFromMasterPOV != null) { + if (rssStub != null && this.serverNameFromMasterPOV != null) { ReportRSFatalErrorRequest.Builder builder = ReportRSFatalErrorRequest.newBuilder(); ServerName sn = ServerName.parseVersionedServerName(this.serverNameFromMasterPOV.getVersionedBytes()); builder.setServer(ProtobufUtil.toServerName(sn)); builder.setErrorMessage(msg); - hbaseMaster.reportRSFatalError(null, builder.build()); + rssStub.reportRSFatalError(null, builder.build()); } } catch (Throwable t) { LOG.warn("Unable to report fatal error to master", t); @@ -1753,14 +1766,16 @@ public class HRegionServer implements ClientProtocol, * * @return master + port, or null if server has been stopped */ - private ServerName getMaster() { - ServerName masterServerName = null; + private Pair + createRegionServerStatusStub() { + ServerName sn = null; long previousLogTime = 0; - RegionServerStatusProtocol master = null; + RegionServerStatusService.BlockingInterface master = null; boolean refresh = false; // for the first time, use cached data + RegionServerStatusService.BlockingInterface intf = null; while (keepLooping() && master == null) { - masterServerName = this.masterAddressManager.getMasterAddress(refresh); - if (masterServerName == null) { + sn = this.masterAddressManager.getMasterAddress(refresh); + if (sn == null) { if (!keepLooping()) { // give up with no connection. LOG.debug("No master found and cluster is stopped; bailing out"); @@ -1769,22 +1784,20 @@ public class HRegionServer implements ClientProtocol, LOG.debug("No master found; retry"); previousLogTime = System.currentTimeMillis(); refresh = true; // let's try pull it from ZK directly - sleeper.sleep(); continue; } InetSocketAddress isa = - new InetSocketAddress(masterServerName.getHostname(), masterServerName.getPort()); + new InetSocketAddress(sn.getHostname(), sn.getPort()); LOG.info("Attempting connect to Master server at " + this.masterAddressManager.getMasterAddress()); try { - // Do initial RPC setup. The final argument indicates that the RPC - // should retry indefinitely. - master = HBaseClientRPC.waitForProxy(rpcClientEngine, RegionServerStatusProtocol.class, - isa, this.conf, -1, this.rpcTimeout, this.rpcTimeout); - LOG.info("Connected to master at " + isa); + BlockingRpcChannel channel = this.rpcClient.createBlockingRpcChannel(sn, + User.getCurrent(), this.rpcTimeout); + intf = RegionServerStatusService.newBlockingStub(channel); + break; } catch (IOException e) { e = e instanceof RemoteException ? ((RemoteException)e).unwrapRemoteException() : e; @@ -1805,8 +1818,7 @@ public class HRegionServer implements ClientProtocol, } } } - this.hbaseMaster = master; - return masterServerName; + return new Pair(sn, intf); } /** @@ -1826,7 +1838,10 @@ public class HRegionServer implements ClientProtocol, */ private RegionServerStartupResponse reportForDuty() throws IOException { RegionServerStartupResponse result = null; - ServerName masterServerName = getMaster(); + Pair p = + createRegionServerStatusStub(); + this.rssStub = p.getSecond(); + ServerName masterServerName = p.getFirst(); if (masterServerName == null) return result; try { this.requestCount.set(0); @@ -1838,7 +1853,7 @@ public class HRegionServer implements ClientProtocol, request.setPort(port); request.setServerStartCode(this.startcode); request.setServerCurrentTime(now); - result = this.hbaseMaster.regionServerStartup(null, request.build()); + result = this.rssStub.regionServerStartup(null, request.build()); } catch (ServiceException se) { IOException ioe = ProtobufUtil.getRemoteException(se); if (ioe instanceof ClockOutOfSyncException) { @@ -1858,7 +1873,7 @@ public class HRegionServer implements ClientProtocol, try { GetLastFlushedSequenceIdRequest req = RequestConverter.buildGetLastFlushedSequenceIdRequest(region); - lastFlushedSequenceId = hbaseMaster.getLastFlushedSequenceId(null, req) + lastFlushedSequenceId = rssStub.getLastFlushedSequenceId(null, req) .getLastFlushedSequenceId(); } catch (ServiceException e) { lastFlushedSequenceId = -1l; @@ -3062,8 +3077,7 @@ public class HRegionServer implements ClientProtocol, builder.setMoreResults(moreResults); return builder.build(); } catch (Throwable t) { - if (scannerName != null && - t instanceof NotServingRegionException) { + if (scannerName != null && t instanceof NotServingRegionException) { scanners.remove(scannerName); } throw convertThrowableToIOE(cleanup(t)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java index cdcd7f3abb2..2acc93e2fdd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java @@ -24,9 +24,10 @@ import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.executor.ExecutorService; -import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.zookeeper.KeeperException; @@ -80,7 +81,7 @@ public interface RegionServerServices extends OnlineRegions { /** * Returns a reference to the region server's RPC server */ - public RpcServer getRpcServer(); + public RpcServerInterface getRpcServer(); /** * Get the regions that are currently being opened or closed in the RS diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 00e2590f624..3fecd8098dd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -441,7 +441,7 @@ class FSHLog implements HLog, Syncable { } } if (m != null) { - LOG.info("Using getNumCurrentReplicas--HDFS-826"); + if (LOG.isTraceEnabled()) LOG.trace("Using getNumCurrentReplicas--HDFS-826"); } return m; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index cb605711809..d55f39b36db 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -49,12 +49,12 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; -import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.exceptions.TableNotFoundException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; @@ -718,7 +718,7 @@ public class ReplicationSource extends Thread continue; } try { - AdminProtocol rrs = getRS(); + AdminService.BlockingInterface rrs = getRS(); ReplicationProtbufUtil.replicateWALEntry(rrs, Arrays.copyOf(this.entriesArray, currentNbEntries)); if (this.lastLoggedPosition != this.repLogReader.getPosition()) { @@ -848,7 +848,7 @@ public class ReplicationSource extends Thread * @return * @throws IOException */ - private AdminProtocol getRS() throws IOException { + private AdminService.BlockingInterface getRS() throws IOException { if (this.currentPeers.size() == 0) { throw new IOException(this.peerClusterZnode + " has 0 region servers"); } @@ -867,7 +867,7 @@ public class ReplicationSource extends Thread Thread pingThread = new Thread() { public void run() { try { - AdminProtocol rrs = getRS(); + AdminService.BlockingInterface rrs = getRS(); // Dummy call which should fail ProtobufUtil.getServerInfo(rrs); latch.countDown(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java index efeedcead87..045a8b35e01 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hbase.security; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.client.AdminProtocol; -import org.apache.hadoop.hbase.client.ClientProtocol; -import org.apache.hadoop.hbase.MasterMonitorProtocol; -import org.apache.hadoop.hbase.MasterAdminProtocol; -import org.apache.hadoop.hbase.RegionServerStatusProtocol; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.Service; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; @@ -33,11 +33,11 @@ import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; */ public class HBasePolicyProvider extends PolicyProvider { protected final static Service[] services = { - new Service("security.client.protocol.acl", ClientProtocol.class), - new Service("security.client.protocol.acl", AdminProtocol.class), - new Service("security.admin.protocol.acl", MasterMonitorProtocol.class), - new Service("security.admin.protocol.acl", MasterAdminProtocol.class), - new Service("security.masterregion.protocol.acl", RegionServerStatusProtocol.class) + new Service("security.client.protocol.acl", ClientService.BlockingInterface.class), + new Service("security.client.protocol.acl", AdminService.BlockingInterface.class), + new Service("security.admin.protocol.acl", MasterMonitorService.BlockingInterface.class), + new Service("security.admin.protocol.acl", MasterAdminService.BlockingInterface.class), + new Service("security.masterregion.protocol.acl", RegionServerStatusService.BlockingInterface.class) }; @Override @@ -45,13 +45,11 @@ public class HBasePolicyProvider extends PolicyProvider { return services; } - public static void init(Configuration conf, - ServiceAuthorizationManager authManager) { + public static void init(Configuration conf, ServiceAuthorizationManager authManager) { // set service-level authorization security policy System.setProperty("hadoop.policy.file", "hbase-policy.xml"); - if (conf.getBoolean( - ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { + if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { authManager.refresh(conf, new HBasePolicyProvider()); } } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java index 2ba2b455b08..514fee31d22 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java @@ -34,7 +34,7 @@ import javax.security.sasl.Sasl; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ipc.HBaseServer; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.TokenIdentifier; @@ -96,11 +96,11 @@ public class HBaseSaslRpcServer { /** CallbackHandler for SASL DIGEST-MD5 mechanism */ public static class SaslDigestCallbackHandler implements CallbackHandler { private SecretManager secretManager; - private HBaseServer.Connection connection; + private RpcServer.Connection connection; public SaslDigestCallbackHandler( SecretManager secretManager, - HBaseServer.Connection connection) { + RpcServer.Connection connection) { this.secretManager = secretManager; this.connection = connection; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java index cb649d9fcad..30bb8af0e46 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java @@ -29,9 +29,9 @@ import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.CoprocessorService; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.ipc.HBaseServer; -import org.apache.hadoop.hbase.ipc.RequestContext; import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.ipc.RequestContext; +import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ResponseConverter; import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; @@ -60,8 +60,8 @@ public class TokenProvider implements AuthenticationProtos.AuthenticationService if (env instanceof RegionCoprocessorEnvironment) { RegionCoprocessorEnvironment regionEnv = (RegionCoprocessorEnvironment)env; - RpcServer server = regionEnv.getRegionServerServices().getRpcServer(); - SecretManager mgr = ((HBaseServer)server).getSecretManager(); + RpcServerInterface server = regionEnv.getRegionServerServices().getRpcServer(); + SecretManager mgr = ((RpcServer)server).getSecretManager(); if (mgr instanceof AuthenticationTokenSecretManager) { secretManager = (AuthenticationTokenSecretManager)mgr; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index e1768cb6fff..920b66d5029 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -66,13 +66,12 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.MetaEditor; -import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HConnectable; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; -import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; @@ -86,6 +85,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; @@ -3063,8 +3063,7 @@ public class HBaseFsck extends Configured implements Tool { public synchronized Void call() throws IOException { errors.progress(); try { - AdminProtocol server = - connection.getAdmin(rsinfo); + BlockingInterface server = connection.getAdmin(rsinfo); // list all online regions from this region server List regions = ProtobufUtil.getOnlineRegions(server); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java index 373e51e11a8..2006035a93d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java @@ -32,14 +32,14 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException; import org.apache.hadoop.hbase.catalog.MetaEditor; -import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.zookeeper.KeeperException; @@ -149,7 +149,7 @@ public class HBaseFsckRepair { public static void closeRegionSilentlyAndWait(HBaseAdmin admin, ServerName server, HRegionInfo region) throws IOException, InterruptedException { HConnection connection = admin.getConnection(); - AdminProtocol rs = connection.getAdmin(server); + AdminService.BlockingInterface rs = connection.getAdmin(server); ProtobufUtil.closeRegion(rs, region.getRegionName(), false); long timeout = admin.getConfiguration() .getLong("hbase.hbck.close.timeout", 120000); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java index eef900c4265..14ab22bde09 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java @@ -39,9 +39,9 @@ import org.apache.hadoop.hbase.exceptions.TableNotDisabledException; import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HConnectable; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; -import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java index f20c2e54b21..b5d53f67dbe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java @@ -25,8 +25,10 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.client.AdminProtocol; -import org.apache.hadoop.hbase.client.ClientProtocol; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService; import org.apache.hadoop.hbase.util.Threads; /** @@ -94,26 +96,28 @@ public abstract class HBaseCluster implements Closeable, Configurable { } /** - * Returns an {@link MasterAdminProtocol} to the active master + * Returns an {@link MasterAdminService.BlockingInterface} to the active master */ - public abstract MasterAdminProtocol getMasterAdmin() + public abstract MasterAdminService.BlockingInterface getMasterAdmin() throws IOException; /** - * Returns an {@link MasterMonitorProtocol} to the active master + * Returns an {@link MasterMonitorService.BlockingInterface} to the active master */ - public abstract MasterMonitorProtocol getMasterMonitor() - throws IOException; + public abstract MasterMonitorService.BlockingInterface getMasterMonitor() + throws IOException; /** * Returns an AdminProtocol interface to the regionserver */ - public abstract AdminProtocol getAdminProtocol(ServerName serverName) throws IOException; + public abstract AdminService.BlockingInterface getAdminProtocol(ServerName serverName) + throws IOException; /** * Returns a ClientProtocol interface to the regionserver */ - public abstract ClientProtocol getClientProtocol(ServerName serverName) throws IOException; + public abstract ClientService.BlockingInterface getClientProtocol(ServerName serverName) + throws IOException; /** * Starts a new region server on the given hostname or if this is a mini/local cluster, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index 64f8e60ee67..477cc9cd7b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -29,10 +29,12 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hbase.client.AdminProtocol; -import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -372,12 +374,12 @@ public class MiniHBaseCluster extends HBaseCluster { } @Override - public MasterAdminProtocol getMasterAdmin() { + public MasterAdminService.BlockingInterface getMasterAdmin() { return this.hbaseCluster.getActiveMaster(); } @Override - public MasterMonitorProtocol getMasterMonitor() { + public MasterMonitorService.BlockingInterface getMasterMonitor() { return this.hbaseCluster.getActiveMaster(); } @@ -712,12 +714,13 @@ public class MiniHBaseCluster extends HBaseCluster { } @Override - public AdminProtocol getAdminProtocol(ServerName serverName) throws IOException { + public AdminService.BlockingInterface getAdminProtocol(ServerName serverName) throws IOException { return getRegionServer(getRegionServerIndex(serverName)); } @Override - public ClientProtocol getClientProtocol(ServerName serverName) throws IOException { + public ClientService.BlockingInterface getClientProtocol(ServerName serverName) + throws IOException { return getRegionServer(getRegionServerIndex(serverName)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java index 2c3592958c5..ac80b374861 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java @@ -22,8 +22,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.net.ConnectException; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import junit.framework.Assert; @@ -37,25 +35,20 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MediumTests; -import org.apache.hadoop.hbase.exceptions.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.AdminProtocol; -import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.RetriesExhaustedException; -import org.apache.hadoop.hbase.client.ServerCallable; +import org.apache.hadoop.hbase.exceptions.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.util.Progressable; import org.apache.zookeeper.KeeperException; @@ -63,7 +56,6 @@ import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; @@ -149,7 +141,8 @@ public class TestCatalogTracker { */ @Test public void testInterruptWaitOnMeta() throws IOException, InterruptedException, ServiceException { - final ClientProtocol client = Mockito.mock(ClientProtocol.class); + final ClientProtos.ClientService.BlockingInterface client = + Mockito.mock(ClientProtos.ClientService.BlockingInterface.class); HConnection connection = mockConnection(null, client); try { Mockito.when(client.get((RpcController)Mockito.any(), (GetRequest)Mockito.any())). @@ -183,7 +176,8 @@ public class TestCatalogTracker { private void testVerifyMetaRegionLocationWithException(Exception ex) throws IOException, InterruptedException, KeeperException, ServiceException { // Mock an ClientProtocol. - final ClientProtocol implementation = Mockito.mock(ClientProtocol.class); + final ClientProtos.ClientService.BlockingInterface implementation = + Mockito.mock(ClientProtos.ClientService.BlockingInterface.class); HConnection connection = mockConnection(null, implementation); try { // If a 'get' is called on mocked interface, throw connection refused. @@ -253,8 +247,8 @@ public class TestCatalogTracker { HConnection connection = Mockito.mock(HConnection.class); ServiceException connectException = new ServiceException(new ConnectException("Connection refused")); - final AdminProtocol implementation = - Mockito.mock(AdminProtocol.class); + final AdminProtos.AdminService.BlockingInterface implementation = + Mockito.mock(AdminProtos.AdminService.BlockingInterface.class); Mockito.when(implementation.getRegionInfo((RpcController)Mockito.any(), (GetRegionInfoRequest)Mockito.any())).thenThrow(connectException); Mockito.when(connection.getAdmin(Mockito.any(ServerName.class), Mockito.anyBoolean())). @@ -309,22 +303,23 @@ public class TestCatalogTracker { } /** - * @param admin An {@link AdminProtocol} instance; you'll likely + * @param admin An {@link AdminProtos.AdminService.BlockingInterface} instance; you'll likely * want to pass a mocked HRS; can be null. * @param client A mocked ClientProtocol instance, can be null * @return Mock up a connection that returns a {@link Configuration} when * {@link HConnection#getConfiguration()} is called, a 'location' when * {@link HConnection#getRegionLocation(byte[], byte[], boolean)} is called, - * and that returns the passed {@link AdminProtocol} instance when + * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when * {@link HConnection#getAdmin(ServerName)} is called, returns the passed - * {@link ClientProtocol} instance when {@link HConnection#getClient(ServerName)} - * is called (Be sure call + * {@link ClientProtos.ClientService.BlockingInterface} instance when + * {@link HConnection#getClient(ServerName)} is called (Be sure to call * {@link HConnectionManager#deleteConnection(org.apache.hadoop.conf.Configuration)} * when done with this mocked Connection. * @throws IOException */ - private HConnection mockConnection(final AdminProtocol admin, - final ClientProtocol client) throws IOException { + private HConnection mockConnection(final AdminProtos.AdminService.BlockingInterface admin, + final ClientProtos.ClientService.BlockingInterface client) + throws IOException { HConnection connection = HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration()); Mockito.doNothing().when(connection).close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java index 64c92543f44..4c9499669c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java @@ -30,12 +30,14 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.ScannerCallable; -import org.apache.hadoop.hbase.ipc.HBaseClient; -import org.apache.hadoop.hbase.ipc.HBaseServer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -43,8 +45,6 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.apache.commons.logging.impl.Log4JLogger; -import org.apache.log4j.Level; /** * Test {@link MetaReader}, {@link MetaEditor}. @@ -71,9 +71,6 @@ public class TestMetaReaderEditor { }; @BeforeClass public static void beforeClass() throws Exception { - ((Log4JLogger)HBaseServer.LOG).getLogger().setLevel(Level.ALL); - ((Log4JLogger)HBaseClient.LOG).getLogger().setLevel(Level.ALL); - ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL); UTIL.startMiniCluster(3); Configuration c = new Configuration(UTIL.getConfiguration()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java index 616ffc2fd70..eb2439dcd20 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java @@ -37,12 +37,12 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.util.Bytes; @@ -138,7 +138,8 @@ public class TestMetaReaderEditorNoCluster { try { // Mock an ClientProtocol. Our mock implementation will fail a few // times when we go to open a scanner. - final ClientProtocol implementation = Mockito.mock(ClientProtocol.class); + final ClientProtos.ClientService.BlockingInterface implementation = + Mockito.mock(ClientProtos.ClientService.BlockingInterface.class); // When scan called throw IOE 'Server not running' a few times // before we return a scanner id. Whats WEIRD is that these // exceptions do not show in the log because they are caught and only diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java index 8ff8919110c..7982423668b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java @@ -20,13 +20,13 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; -import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionKey; import org.mockito.Mockito; /** @@ -52,13 +52,13 @@ public class HConnectionTestingUtility { public static HConnection getMockedConnection(final Configuration conf) throws ZooKeeperConnectionException { HConnectionKey connectionKey = new HConnectionKey(conf); - synchronized (HConnectionManager.HBASE_INSTANCES) { + synchronized (HConnectionManager.CONNECTION_INSTANCES) { HConnectionImplementation connection = - HConnectionManager.HBASE_INSTANCES.get(connectionKey); + HConnectionManager.CONNECTION_INSTANCES.get(connectionKey); if (connection == null) { connection = Mockito.mock(HConnectionImplementation.class); Mockito.when(connection.getConfiguration()).thenReturn(conf); - HConnectionManager.HBASE_INSTANCES.put(connectionKey, connection); + HConnectionManager.CONNECTION_INSTANCES.put(connectionKey, connection); } return connection; } @@ -84,16 +84,17 @@ public class HConnectionTestingUtility { * @return Mock up a connection that returns a {@link Configuration} when * {@link HConnection#getConfiguration()} is called, a 'location' when * {@link HConnection#getRegionLocation(byte[], byte[], boolean)} is called, - * and that returns the passed {@link AdminProtocol} instance when + * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when * {@link HConnection#getAdmin(ServerName)} is called, returns the passed - * {@link ClientProtocol} instance when {@link HConnection#getClient(ServerName)} - * is called (Be sure call + * {@link ClientProtos.ClientService.BlockingInterface} instance when + * {@link HConnection#getClient(ServerName)} is called (Be sure to call * {@link HConnectionManager#deleteConnection(HConnectionKey, boolean)} * when done with this mocked Connection. * @throws IOException */ public static HConnection getMockedConnectionAndDecorate(final Configuration conf, - final AdminProtocol admin, final ClientProtocol client, + final AdminProtos.AdminService.BlockingInterface admin, + final ClientProtos.ClientService.BlockingInterface client, final ServerName sn, final HRegionInfo hri) throws IOException { HConnection c = HConnectionTestingUtility.getMockedConnection(conf); @@ -133,12 +134,12 @@ public class HConnectionTestingUtility { public static HConnection getSpiedConnection(final Configuration conf) throws IOException { HConnectionKey connectionKey = new HConnectionKey(conf); - synchronized (HConnectionManager.HBASE_INSTANCES) { + synchronized (HConnectionManager.CONNECTION_INSTANCES) { HConnectionImplementation connection = - HConnectionManager.HBASE_INSTANCES.get(connectionKey); + HConnectionManager.CONNECTION_INSTANCES.get(connectionKey); if (connection == null) { connection = Mockito.spy(new HConnectionImplementation(conf, true)); - HConnectionManager.HBASE_INSTANCES.put(connectionKey, connection); + HConnectionManager.CONNECTION_INSTANCES.put(connectionKey, connection); } return connection; } @@ -148,8 +149,8 @@ public class HConnectionTestingUtility { * @return Count of extant connection instances */ public static int getConnectionCount() { - synchronized (HConnectionManager.HBASE_INSTANCES) { - return HConnectionManager.HBASE_INSTANCES.size(); + synchronized (HConnectionManager.CONNECTION_INSTANCES) { + return HConnectionManager.CONNECTION_INSTANCES.size(); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index b604df9ecc5..567045141f2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -52,8 +52,8 @@ import org.apache.hadoop.hbase.exceptions.TableNotEnabledException; import org.apache.hadoop.hbase.exceptions.TableNotFoundException; import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException; import org.apache.hadoop.hbase.executor.EventHandler; -import org.apache.hadoop.hbase.ipc.HBaseClient; -import org.apache.hadoop.hbase.ipc.HBaseServer; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java index ea66ae98758..a5a05a1741c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java @@ -29,8 +29,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer; -import org.apache.hadoop.hbase.ipc.HBaseClient; -import org.apache.hadoop.hbase.ipc.HBaseServer; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.util.Bytes; @@ -59,10 +59,12 @@ public class TestClientScannerRPCTimeout { @BeforeClass public static void setUpBeforeClass() throws Exception { - ((Log4JLogger)HBaseServer.LOG).getLogger().setLevel(Level.ALL); - ((Log4JLogger)HBaseClient.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL); Configuration conf = TEST_UTIL.getConfiguration(); + // Don't report so often so easier to see other rpcs + conf.setInt("hbase.regionserver.msginterval", 3 * 10000); conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, rpcTimeout); conf.setStrings(HConstants.REGION_SERVER_IMPL, RegionServerWithScanTimeout.class.getName()); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, CLIENT_RETRIES_NUMBER); @@ -84,12 +86,14 @@ public class TestClientScannerRPCTimeout { putToTable(ht, r1); putToTable(ht, r2); putToTable(ht, r3); + LOG.info("Wrote our three values"); RegionServerWithScanTimeout.seqNoToSleepOn = 1; Scan scan = new Scan(); scan.setCaching(1); ResultScanner scanner = ht.getScanner(scan); Result result = scanner.next(); assertTrue("Expected row: row-1", Bytes.equals(r1, result.getRow())); + LOG.info("Got expected first row"); long t1 = System.currentTimeMillis(); result = scanner.next(); assertTrue((System.currentTimeMillis() - t1) > rpcTimeout); @@ -127,7 +131,8 @@ public class TestClientScannerRPCTimeout { private static boolean sleepAlways = false; private static int tryNumber = 0; - public RegionServerWithScanTimeout(Configuration conf) throws IOException, InterruptedException { + public RegionServerWithScanTimeout(Configuration conf) + throws IOException, InterruptedException { super(conf); } @@ -139,6 +144,7 @@ public class TestClientScannerRPCTimeout { if (this.tableScannerId == request.getScannerId() && (sleepAlways || (!slept && seqNoToSleepOn == request.getNextCallSeq()))) { try { + LOG.info("SLEEPING " + (rpcTimeout + 500)); Thread.sleep(rpcTimeout + 500); } catch (InterruptedException e) { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java index 26d10d1b6e4..15b1dc6c6eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java @@ -19,7 +19,12 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.net.SocketTimeoutException; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -27,14 +32,22 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.exceptions.MasterNotRunningException; import org.apache.hadoop.hbase.MediumTests; -import org.apache.hadoop.hbase.ipc.RandomTimeoutRpcEngine; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.exceptions.MasterNotRunningException; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.security.User; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import com.google.protobuf.BlockingRpcChannel; +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; + @Category(MediumTests.class) public class TestClientTimeouts { final Log LOG = LogFactory.getLog(getClass()); @@ -46,7 +59,6 @@ public class TestClientTimeouts { */ @BeforeClass public static void setUpBeforeClass() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniCluster(SLAVES); } @@ -68,22 +80,29 @@ public class TestClientTimeouts { long lastLimit = HConstants.DEFAULT_HBASE_CLIENT_PREFETCH_LIMIT; HConnection lastConnection = null; boolean lastFailed = false; - int initialInvocations = RandomTimeoutRpcEngine.getNumberOfInvocations(); - - RandomTimeoutRpcEngine engine = new RandomTimeoutRpcEngine(TEST_UTIL.getConfiguration()); + int initialInvocations = RandomTimeoutBlockingRpcChannel.invokations.get(); + RpcClient rpcClient = new RpcClient(TEST_UTIL.getConfiguration(), TEST_UTIL.getClusterKey()) { + // Return my own instance, one that does random timeouts + @Override + public BlockingRpcChannel createBlockingRpcChannel(ServerName sn, + User ticket, int rpcTimeout) { + return new RandomTimeoutBlockingRpcChannel(this, sn, ticket, rpcTimeout); + } + }; try { for (int i = 0; i < 5 || (lastFailed && i < 100); ++i) { lastFailed = false; // Ensure the HBaseAdmin uses a new connection by changing Configuration. Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); conf.setLong(HConstants.HBASE_CLIENT_PREFETCH_LIMIT, ++lastLimit); + HBaseAdmin admin = null; try { - HBaseAdmin admin = new HBaseAdmin(conf); + admin = new HBaseAdmin(conf); HConnection connection = admin.getConnection(); assertFalse(connection == lastConnection); lastConnection = connection; - // override the connection's rpc engine for timeout testing - ((HConnectionManager.HConnectionImplementation)connection).setRpcEngine(engine); + // Override the connection's rpc client for timeout testing + ((HConnectionManager.HConnectionImplementation)connection).setRpcClient(rpcClient); // run some admin commands HBaseAdmin.checkHBaseAvailable(conf); admin.setBalancerRunning(false, false); @@ -91,13 +110,43 @@ public class TestClientTimeouts { // Since we are randomly throwing SocketTimeoutExceptions, it is possible to get // a MasterNotRunningException. It's a bug if we get other exceptions. lastFailed = true; + } finally { + admin.close(); } } // Ensure the RandomTimeoutRpcEngine is actually being used. assertFalse(lastFailed); - assertTrue(RandomTimeoutRpcEngine.getNumberOfInvocations() > initialInvocations); + assertTrue(RandomTimeoutBlockingRpcChannel.invokations.get() > initialInvocations); } finally { - engine.close(); + rpcClient.stop(); } } -} + + /** + * Blocking rpc channel that goes via hbase rpc. + */ + static class RandomTimeoutBlockingRpcChannel extends RpcClient.BlockingRpcChannelImplementation { + private static final Random RANDOM = new Random(System.currentTimeMillis()); + public static final double CHANCE_OF_TIMEOUT = 0.3; + private static AtomicInteger invokations = new AtomicInteger(); + + RandomTimeoutBlockingRpcChannel(final RpcClient rpcClient, final ServerName sn, + final User ticket, final int rpcTimeout) { + super(rpcClient, sn, ticket, rpcTimeout); + } + + @Override + public Message callBlockingMethod(MethodDescriptor md, + RpcController controller, Message param, Message returnType) + throws ServiceException { + invokations.getAndIncrement(); + if (RANDOM.nextFloat() < CHANCE_OF_TIMEOUT) { + // throw a ServiceException, becuase that is the only exception type that + // {@link ProtobufRpcEngine} throws. If this RpcEngine is used with a different + // "actual" type, this may not properly mimic the underlying RpcEngine. + throw new ServiceException(new SocketTimeoutException("fake timeout")); + } + return super.callBlockingMethod(md, controller, param, returnType); + } + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 4f44915c7b1..a6dd7463c47 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -78,8 +78,8 @@ import org.apache.hadoop.hbase.filter.WhileMatchFilter; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; -import org.apache.hadoop.hbase.ipc.HBaseClient; -import org.apache.hadoop.hbase.ipc.HBaseServer; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; @@ -124,8 +124,8 @@ public class TestFromClientSide { */ @BeforeClass public static void setUpBeforeClass() throws Exception { - ((Log4JLogger)HBaseServer.LOG).getLogger().setLevel(Level.ALL); - ((Log4JLogger)HBaseClient.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL); Configuration conf = TEST_UTIL.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index c235309b730..1d9e59da2a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -36,8 +36,8 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.LargeTests; -import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.junit.After; @@ -116,7 +116,7 @@ public class TestFromClientSide3 { HConnection conn = HConnectionManager.getConnection(TEST_UTIL .getConfiguration()); HRegionLocation loc = table.getRegionLocation(row, true); - AdminProtocol server = conn.getAdmin(loc.getServerName()); + AdminProtos.AdminService.BlockingInterface server = conn.getAdmin(loc.getServerName()); byte[] regName = loc.getRegionInfo().getRegionName(); for (int i = 0; i < nFlushes; i++) { @@ -163,7 +163,8 @@ public class TestFromClientSide3 { // Verify we have multiple store files. HRegionLocation loc = hTable.getRegionLocation(row, true); byte[] regionName = loc.getRegionInfo().getRegionName(); - AdminProtocol server = connection.getAdmin(loc.getServerName()); + AdminProtos.AdminService.BlockingInterface server = + connection.getAdmin(loc.getServerName()); assertTrue(ProtobufUtil.getStoreFiles( server, regionName, FAMILY).size() > 1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java index d683ec17807..4f5671dfc6d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation; -import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionKey; import org.apache.hadoop.hbase.exceptions.RegionServerStoppedException; import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException; import org.apache.hadoop.hbase.master.ClusterStatusPublisher; @@ -99,7 +98,7 @@ public class TestHCM { IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException, ZooKeeperConnectionException, IOException { HConnection last = null; - for (int i = 0; i <= (HConnectionManager.MAX_CACHED_HBASE_INSTANCES * 2); i++) { + for (int i = 0; i <= (HConnectionManager.MAX_CACHED_CONNECTION_INSTANCES * 2); i++) { // set random key to differentiate the connection from previous ones Configuration configuration = HBaseConfiguration.create(); configuration.set("somekey", String.valueOf(_randy.nextInt())); @@ -186,9 +185,9 @@ public class TestHCM { // Save off current HConnections Map oldHBaseInstances = new HashMap(); - oldHBaseInstances.putAll(HConnectionManager.HBASE_INSTANCES); + oldHBaseInstances.putAll(HConnectionManager.CONNECTION_INSTANCES); - HConnectionManager.HBASE_INSTANCES.clear(); + HConnectionManager.CONNECTION_INSTANCES.clear(); try { HConnection connection = HConnectionManager.getConnection(TEST_UTIL.getConfiguration()); @@ -198,8 +197,8 @@ public class TestHCM { HConnectionManager.getConnection(TEST_UTIL.getConfiguration())); } finally { // Put original HConnections back - HConnectionManager.HBASE_INSTANCES.clear(); - HConnectionManager.HBASE_INSTANCES.putAll(oldHBaseInstances); + HConnectionManager.CONNECTION_INSTANCES.clear(); + HConnectionManager.CONNECTION_INSTANCES.putAll(oldHBaseInstances); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java index 041afa45637..8ec146909e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java @@ -36,8 +36,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.Waiter; -import org.apache.hadoop.hbase.ipc.HBaseClient; -import org.apache.hadoop.hbase.ipc.HBaseServer; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; @@ -53,8 +53,8 @@ import org.junit.experimental.categories.Category; public class TestMultiParallel { private static final Log LOG = LogFactory.getLog(TestMultiParallel.class); { - ((Log4JLogger)HBaseServer.LOG).getLogger().setLevel(Level.ALL); - ((Log4JLogger)HBaseClient.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL); } private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); private static final byte[] VALUE = Bytes.toBytes("value"); @@ -68,8 +68,8 @@ public class TestMultiParallel { private static final int slaves = 2; // also used for testing HTable pool size @BeforeClass public static void beforeClass() throws Exception { - ((Log4JLogger)HBaseServer.LOG).getLogger().setLevel(Level.ALL); - ((Log4JLogger)HBaseClient.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL); UTIL.startMiniCluster(slaves); HTable t = UTIL.createTable(Bytes.toBytes(TEST_TABLE), Bytes.toBytes(FAMILY)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWithScanLimits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWithScanLimits.java index b1246acc516..21db8b7e949 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWithScanLimits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWithScanLimits.java @@ -34,8 +34,8 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.exceptions.MasterNotRunningException; import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.ipc.HBaseClient; -import org.apache.hadoop.hbase.ipc.HBaseServer; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; @@ -177,8 +177,8 @@ public class TestFilterWithScanLimits { @BeforeClass public static void setUp() throws Exception { - ((Log4JLogger)HBaseServer.LOG).getLogger().setLevel(Level.ALL); - ((Log4JLogger)HBaseClient.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL); TEST_UTIL.startMiniCluster(1); initialize(TEST_UTIL.getConfiguration()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java index 4c59da78661..a95aed3d9c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java @@ -40,32 +40,33 @@ import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; +import org.apache.log4j.Level; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.commons.logging.impl.Log4JLogger; /** * Tests changing data block encoding settings of a column family. */ @Category(LargeTests.class) public class TestChangingEncoding { - private static final Log LOG = LogFactory.getLog(TestChangingEncoding.class); - static final String CF = "EncodingTestCF"; static final byte[] CF_BYTES = Bytes.toBytes(CF); private static final int NUM_ROWS_PER_BATCH = 100; private static final int NUM_COLS_PER_ROW = 20; - private static final HBaseTestingUtility TEST_UTIL = - new HBaseTestingUtility(); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final Configuration conf = TEST_UTIL.getConfiguration(); private static final int TIMEOUT_MS = 240000; @@ -100,6 +101,8 @@ public class TestChangingEncoding { public static void setUpBeforeClass() throws Exception { // Use a small flush size to create more HFiles. conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024); + // ((Log4JLogger)RpcServerImplementation.LOG).getLogger().setLevel(Level.TRACE); + // ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.TRACE); TEST_UTIL.startMiniCluster(); } @@ -190,6 +193,7 @@ public class TestChangingEncoding { prepareTest("ChangingEncoding"); for (boolean encodeOnDisk : new boolean[]{false, true}) { for (DataBlockEncoding encoding : ENCODINGS_TO_ITERATE) { + LOG.info("encoding=" + encoding + ", encodeOnDisk=" + encodeOnDisk); setEncodingConf(encoding, encodeOnDisk); writeSomeNewData(); verifyAllData(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/RandomTimeoutRpcEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/RandomTimeoutRpcEngine.java deleted file mode 100644 index 3cf9e4fc6df..00000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/RandomTimeoutRpcEngine.java +++ /dev/null @@ -1,96 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import java.io.IOException; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.Method; -import java.lang.reflect.Proxy; -import java.net.InetSocketAddress; -import java.net.SocketTimeoutException; -import java.util.Random; -import java.util.concurrent.atomic.AtomicInteger; - -import javax.net.SocketFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.IpcProtocol; -import org.apache.hadoop.hbase.security.User; - -import com.google.protobuf.ServiceException; - -/** - * RpcEngine that random throws a SocketTimeoutEngine for testing. - * Make sure to call setProtocolEngine to have the client actually use the RpcEngine - * for a specific protocol - */ -public class RandomTimeoutRpcEngine extends ProtobufRpcClientEngine { - - private static final Random RANDOM = new Random(System.currentTimeMillis()); - public static double chanceOfTimeout = 0.3; - private static AtomicInteger invokations = new AtomicInteger(); - - public RandomTimeoutRpcEngine(Configuration conf) { - super(conf, HConstants.CLUSTER_ID_DEFAULT); - } - - @Override - public T getProxy( - Class protocol, InetSocketAddress addr, Configuration conf, int rpcTimeout) - throws IOException { - // Start up the requested-for proxy so we can pass-through calls to the underlying - // RpcEngine. Also instantiate and return our own proxy (RandomTimeoutInvocationHandler) - // that will either throw exceptions or pass through to the underlying proxy. - T actualProxy = super.getProxy(protocol, addr, conf, rpcTimeout); - RandomTimeoutInvocationHandler invoker = - new RandomTimeoutInvocationHandler(actualProxy); - T wrapperProxy = (T)Proxy.newProxyInstance( - protocol.getClassLoader(), new Class[]{protocol}, invoker); - return wrapperProxy; - } - - /** - * @return the number of times the invoker has been invoked - */ - public static int getNumberOfInvocations() { - return invokations.get(); - } - - static class RandomTimeoutInvocationHandler implements InvocationHandler { - private IpcProtocol actual = null; - - public RandomTimeoutInvocationHandler(IpcProtocol actual) { - this.actual = actual; - } - - public Object invoke(Object proxy, Method method, Object[] args) - throws Throwable { - RandomTimeoutRpcEngine.invokations.getAndIncrement(); - if (RANDOM.nextFloat() < chanceOfTimeout) { - // throw a ServiceException, becuase that is the only exception type that - // {@link ProtobufRpcEngine} throws. If this RpcEngine is used with a different - // "actual" type, this may not properly mimic the underlying RpcEngine. - throw new ServiceException(new SocketTimeoutException("fake timeout")); - } - return Proxy.getInvocationHandler(actual).invoke(proxy, method, args); - } - } -} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestDelayedRpc.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestDelayedRpc.java index 20d347d45c7..c3f1665e916 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestDelayedRpc.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestDelayedRpc.java @@ -28,15 +28,18 @@ import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.List; +import com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.IpcProtocol; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestArg; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestResponse; +import org.apache.hadoop.hbase.security.User; import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Level; import org.apache.log4j.Logger; @@ -44,6 +47,9 @@ import org.apache.log4j.spi.LoggingEvent; import org.junit.Test; import org.junit.experimental.categories.Category; +import com.google.protobuf.BlockingRpcChannel; +import com.google.protobuf.BlockingService; +import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; /** @@ -55,7 +61,7 @@ import com.google.protobuf.ServiceException; public class TestDelayedRpc { private static final Log LOG = LogFactory.getLog(TestDelayedRpc.class); - public static RpcServer rpcServer; + public static RpcServerInterface rpcServer; public static final int UNDELAYED = 0; public static final int DELAYED = 1; @@ -73,23 +79,25 @@ public class TestDelayedRpc { private void testDelayedRpc(boolean delayReturnValue) throws Exception { Configuration conf = HBaseConfiguration.create(); InetSocketAddress isa = new InetSocketAddress("localhost", 0); - TestRpcImpl instance = new TestRpcImpl(delayReturnValue); - rpcServer = HBaseServerRPC.getServer(instance.getClass(), instance, - new Class[]{ TestRpcImpl.class }, - isa.getHostName(), isa.getPort(), 1, 0, true, conf, 0); + TestDelayedImplementation instance = new TestDelayedImplementation(delayReturnValue); + BlockingService service = + TestDelayedRpcProtos.TestDelayedService.newReflectiveBlockingService(instance); + rpcServer = new RpcServer(null, "testDelayedRpc", + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(service, null)), + isa, 1, 0, conf, 0); rpcServer.start(); - - ProtobufRpcClientEngine clientEngine = - new ProtobufRpcClientEngine(conf, HConstants.CLUSTER_ID_DEFAULT); + RpcClient rpcClient = new RpcClient(conf, HConstants.DEFAULT_CLUSTER_ID.toString()); try { - TestRpc client = clientEngine.getProxy(TestRpc.class, - rpcServer.getListenerAddress(), conf, 1000); - + BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel( + new ServerName(rpcServer.getListenerAddress().getHostName(), + rpcServer.getListenerAddress().getPort(), System.currentTimeMillis()), + User.getCurrent(), 1000); + TestDelayedRpcProtos.TestDelayedService.BlockingInterface stub = + TestDelayedRpcProtos.TestDelayedService.newBlockingStub(channel); List results = new ArrayList(); - - TestThread th1 = new TestThread(client, true, results); - TestThread th2 = new TestThread(client, false, results); - TestThread th3 = new TestThread(client, false, results); + TestThread th1 = new TestThread(stub, true, results); + TestThread th2 = new TestThread(stub, false, results); + TestThread th3 = new TestThread(stub, false, results); th1.start(); Thread.sleep(100); th2.start(); @@ -104,7 +112,7 @@ public class TestDelayedRpc { assertEquals(UNDELAYED, results.get(1).intValue()); assertEquals(results.get(2).intValue(), delayReturnValue ? DELAYED : 0xDEADBEEF); } finally { - clientEngine.close(); + rpcClient.stop(); } } @@ -130,34 +138,41 @@ public class TestDelayedRpc { } } + /** + * Tests that we see a WARN message in the logs. + * @throws Exception + */ @Test public void testTooManyDelayedRpcs() throws Exception { Configuration conf = HBaseConfiguration.create(); final int MAX_DELAYED_RPC = 10; conf.setInt("hbase.ipc.warn.delayedrpc.number", MAX_DELAYED_RPC); - + // Set up an appender to catch the "Too many delayed calls" that we expect. ListAppender listAppender = new ListAppender(); - Logger log = Logger.getLogger("org.apache.hadoop.ipc.HBaseServer"); + Logger log = Logger.getLogger("org.apache.hadoop.ipc.RpcServer"); log.addAppender(listAppender); log.setLevel(Level.WARN); + InetSocketAddress isa = new InetSocketAddress("localhost", 0); - TestRpcImpl instance = new TestRpcImpl(true); - rpcServer = HBaseServerRPC.getServer(instance.getClass(), instance, - new Class[]{ TestRpcImpl.class }, - isa.getHostName(), isa.getPort(), 1, 0, true, conf, 0); + TestDelayedImplementation instance = new TestDelayedImplementation(true); + BlockingService service = + TestDelayedRpcProtos.TestDelayedService.newReflectiveBlockingService(instance); + rpcServer = new RpcServer(null, "testTooManyDelayedRpcs", + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(service, null)), + isa, 1, 0, conf, 0); rpcServer.start(); - - ProtobufRpcClientEngine clientEngine = - new ProtobufRpcClientEngine(conf, HConstants.CLUSTER_ID_DEFAULT); + RpcClient rpcClient = new RpcClient(conf, HConstants.DEFAULT_CLUSTER_ID.toString()); try { - TestRpc client = clientEngine.getProxy(TestRpc.class, - rpcServer.getListenerAddress(), conf, 1000); - + BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel( + new ServerName(rpcServer.getListenerAddress().getHostName(), + rpcServer.getListenerAddress().getPort(), System.currentTimeMillis()), + User.getCurrent(), 1000); + TestDelayedRpcProtos.TestDelayedService.BlockingInterface stub = + TestDelayedRpcProtos.TestDelayedService.newBlockingStub(channel); Thread threads[] = new Thread[MAX_DELAYED_RPC + 1]; - for (int i = 0; i < MAX_DELAYED_RPC; i++) { - threads[i] = new TestThread(client, true, null); + threads[i] = new TestThread(stub, true, null); threads[i].start(); } @@ -165,7 +180,7 @@ public class TestDelayedRpc { assertTrue(listAppender.getMessages().isEmpty()); /* This should give a warning. */ - threads[MAX_DELAYED_RPC] = new TestThread(client, true, null); + threads[MAX_DELAYED_RPC] = new TestThread(stub, true, null); threads[MAX_DELAYED_RPC].start(); for (int i = 0; i < MAX_DELAYED_RPC; i++) { @@ -173,20 +188,16 @@ public class TestDelayedRpc { } assertFalse(listAppender.getMessages().isEmpty()); - assertTrue(listAppender.getMessages().get(0).startsWith( - "Too many delayed calls")); + assertTrue(listAppender.getMessages().get(0).startsWith("Too many delayed calls")); log.removeAppender(listAppender); } finally { - clientEngine.close(); + rpcClient.stop(); } } - public interface TestRpc extends IpcProtocol { - TestResponse test(final Object rpcController, TestArg delay) throws ServiceException; - } - - private static class TestRpcImpl implements TestRpc { + static class TestDelayedImplementation + implements TestDelayedRpcProtos.TestDelayedService.BlockingInterface { /** * Should the return value of delayed call be set at the end of the delay * or at call return. @@ -197,12 +208,12 @@ public class TestDelayedRpc { * @param delayReturnValue Should the response to the delayed call be set * at the start or the end of the delay. */ - public TestRpcImpl(boolean delayReturnValue) { + public TestDelayedImplementation(boolean delayReturnValue) { this.delayReturnValue = delayReturnValue; } @Override - public TestResponse test(final Object rpcController, final TestArg testArg) + public TestResponse test(final RpcController rpcController, final TestArg testArg) throws ServiceException { boolean delay = testArg.getDelay(); TestResponse.Builder responseBuilder = TestResponse.newBuilder(); @@ -210,7 +221,7 @@ public class TestDelayedRpc { responseBuilder.setResponse(UNDELAYED); return responseBuilder.build(); } - final Delayable call = HBaseServer.getCurrentCall(); + final Delayable call = RpcServer.getCurrentCall(); call.startDelay(delayReturnValue); new Thread() { public void run() { @@ -232,28 +243,30 @@ public class TestDelayedRpc { } private static class TestThread extends Thread { - private TestRpc server; + private TestDelayedRpcProtos.TestDelayedService.BlockingInterface stub; private boolean delay; private List results; - public TestThread(TestRpc server, boolean delay, List results) { - this.server = server; + public TestThread(TestDelayedRpcProtos.TestDelayedService.BlockingInterface stub, + boolean delay, List results) { + this.stub = stub; this.delay = delay; this.results = results; } @Override public void run() { + Integer result; try { - Integer result = new Integer(server.test(null, TestArg.newBuilder().setDelay(delay). + result = new Integer(stub.test(null, TestArg.newBuilder().setDelay(delay). build()).getResponse()); - if (results != null) { - synchronized (results) { - results.add(result); - } + } catch (ServiceException e) { + throw new RuntimeException(e); + } + if (results != null) { + synchronized (results) { + results.add(result); } - } catch (Exception e) { - fail("Unexpected exception: "+e.getMessage()); } } } @@ -262,22 +275,26 @@ public class TestDelayedRpc { public void testEndDelayThrowing() throws IOException { Configuration conf = HBaseConfiguration.create(); InetSocketAddress isa = new InetSocketAddress("localhost", 0); - FaultyTestRpc instance = new FaultyTestRpc(); - rpcServer = HBaseServerRPC.getServer(instance.getClass(), instance, - new Class[]{ TestRpcImpl.class }, - isa.getHostName(), isa.getPort(), 1, 0, true, conf, 0); + FaultyTestDelayedImplementation instance = new FaultyTestDelayedImplementation(); + BlockingService service = + TestDelayedRpcProtos.TestDelayedService.newReflectiveBlockingService(instance); + rpcServer = new RpcServer(null, "testEndDelayThrowing", + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(service, null)), + isa, 1, 0, conf, 0); rpcServer.start(); - - ProtobufRpcClientEngine clientEngine = - new ProtobufRpcClientEngine(conf, HConstants.CLUSTER_ID_DEFAULT); + RpcClient rpcClient = new RpcClient(conf, HConstants.DEFAULT_CLUSTER_ID.toString()); try { - TestRpc client = clientEngine.getProxy(TestRpc.class, - rpcServer.getListenerAddress(), conf, 1000); + BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel( + new ServerName(rpcServer.getListenerAddress().getHostName(), + rpcServer.getListenerAddress().getPort(), System.currentTimeMillis()), + User.getCurrent(), 1000); + TestDelayedRpcProtos.TestDelayedService.BlockingInterface stub = + TestDelayedRpcProtos.TestDelayedService.newBlockingStub(channel); int result = 0xDEADBEEF; try { - result = client.test(null, TestArg.newBuilder().setDelay(false).build()).getResponse(); + result = stub.test(null, TestArg.newBuilder().setDelay(false).build()).getResponse(); } catch (Exception e) { fail("No exception should have been thrown."); } @@ -285,30 +302,36 @@ public class TestDelayedRpc { boolean caughtException = false; try { - result = client.test(null, TestArg.newBuilder().setDelay(true).build()).getResponse(); + result = stub.test(null, TestArg.newBuilder().setDelay(true).build()).getResponse(); } catch(Exception e) { // Exception thrown by server is enclosed in a RemoteException. - if (e.getCause().getMessage().contains( - "java.lang.Exception: Something went wrong")) + if (e.getCause().getMessage().contains("java.lang.Exception: Something went wrong")) { caughtException = true; - LOG.warn(e); + } + LOG.warn("Caught exception, expected=" + caughtException); } assertTrue(caughtException); } finally { - clientEngine.close(); + rpcClient.stop(); } } /** * Delayed calls to this class throw an exception. */ - private static class FaultyTestRpc implements TestRpc { + private static class FaultyTestDelayedImplementation extends TestDelayedImplementation { + public FaultyTestDelayedImplementation() { + super(false); + } + @Override - public TestResponse test(Object rpcController, TestArg arg) { - if (!arg.getDelay()) - return TestResponse.newBuilder().setResponse(UNDELAYED).build(); - Delayable call = HBaseServer.getCurrentCall(); + public TestResponse test(RpcController rpcController, TestArg arg) + throws ServiceException { + LOG.info("In faulty test, delay=" + arg.getDelay()); + if (!arg.getDelay()) return TestResponse.newBuilder().setResponse(UNDELAYED).build(); + Delayable call = RpcServer.getCurrentCall(); call.startDelay(true); + LOG.info("In faulty test, delaying"); try { call.endDelayThrowing(new Exception("Something went wrong")); } catch (IOException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseClient.java index 235f829e31f..246e9b7fad5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseClient.java @@ -37,7 +37,7 @@ public class TestHBaseClient { public void testFailedServer(){ ManualEnvironmentEdge ee = new ManualEnvironmentEdge(); EnvironmentEdgeManager.injectEdge( ee ); - HBaseClient.FailedServers fs = new HBaseClient.FailedServers(new Configuration()); + RpcClient.FailedServers fs = new RpcClient.FailedServers(new Configuration()); InetSocketAddress ia = InetSocketAddress.createUnresolved("bad", 12); InetSocketAddress ia2 = InetSocketAddress.createUnresolved("bad", 12); // same server as ia @@ -55,7 +55,7 @@ public class TestHBaseClient { Assert.assertTrue( fs.isFailedServer(ia) ); Assert.assertTrue( fs.isFailedServer(ia2) ); - ee.incValue( HBaseClient.FAILED_SERVER_EXPIRY_DEFAULT + 1 ); + ee.incValue( RpcClient.FAILED_SERVER_EXPIRY_DEFAULT + 1 ); Assert.assertFalse( fs.isFailedServer(ia) ); Assert.assertFalse( fs.isFailedServer(ia2) ); @@ -68,7 +68,7 @@ public class TestHBaseClient { Assert.assertTrue( fs.isFailedServer(ia3) ); Assert.assertTrue( fs.isFailedServer(ia4) ); - ee.incValue( HBaseClient.FAILED_SERVER_EXPIRY_DEFAULT + 1 ); + ee.incValue( RpcClient.FAILED_SERVER_EXPIRY_DEFAULT + 1 ); Assert.assertFalse( fs.isFailedServer(ia) ); Assert.assertFalse( fs.isFailedServer(ia2) ); Assert.assertFalse( fs.isFailedServer(ia3) ); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java index 40ee14cffbd..cabc8435ff2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.ipc; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.anyInt; @@ -25,7 +26,6 @@ import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.spy; import java.io.IOException; -import java.lang.reflect.Method; import java.net.InetSocketAddress; import java.net.Socket; import java.util.ArrayList; @@ -33,6 +33,7 @@ import java.util.List; import javax.net.SocketFactory; +import com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -42,12 +43,16 @@ import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.IpcProtocol; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoRequestProto; +import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoResponseProto; +import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EmptyRequestProto; +import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EmptyResponseProto; +import org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.security.User; @@ -62,64 +67,115 @@ import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import com.google.protobuf.BlockingService; +import com.google.protobuf.Descriptors.MethodDescriptor; import com.google.protobuf.Message; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; +/** + * Some basic ipc tests. + */ @Category(SmallTests.class) public class TestIPC { public static final Log LOG = LogFactory.getLog(TestIPC.class); static byte [] CELL_BYTES = Bytes.toBytes("xyz"); static Cell CELL = new KeyValue(CELL_BYTES, CELL_BYTES, CELL_BYTES, CELL_BYTES); + // We are using the test TestRpcServiceProtos generated classes and Service because they are + // available and basic with methods like 'echo', and ping. Below we make a blocking service + // by passing in implementation of blocking interface. We use this service in all tests that + // follow. + private static final BlockingService SERVICE = + TestRpcServiceProtos.TestProtobufRpcProto.newReflectiveBlockingService( + new TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface() { - private static class TestRpcServer extends HBaseServer { - TestRpcServer() throws IOException { - super("0.0.0.0", 0, 1, 1, HBaseConfiguration.create(), "TestRpcServer", 0); + @Override + public EmptyResponseProto ping(RpcController controller, + EmptyRequestProto request) throws ServiceException { + // TODO Auto-generated method stub + return null; } @Override - public Pair call(Class protocol, Method method, - Message param, final CellScanner cells, long receiveTime, MonitoredRPCHandler status) - throws IOException { - /* - List cellsOut = new ArrayList(); - while (cells.advance()) { - Cell cell = cells.current(); - Bytes.equals(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), - CELL_BYTES, 0, CELL_BYTES.length); - cellsOut.add(cell); + public EmptyResponseProto error(RpcController controller, + EmptyRequestProto request) throws ServiceException { + // TODO Auto-generated method stub + return null; + } + + @Override + public EchoResponseProto echo(RpcController controller, EchoRequestProto request) + throws ServiceException { + if (controller instanceof PayloadCarryingRpcController) { + PayloadCarryingRpcController pcrc = (PayloadCarryingRpcController)controller; + // If cells, scan them to check we are able to iterate what we were given and since this is + // an echo, just put them back on the controller creating a new block. Tests our block + // building. + CellScanner cellScanner = pcrc.cellScanner(); + List list = new ArrayList(); + while(cellScanner.advance()) { + list.add(cellScanner.current()); + } + cellScanner = CellUtil.createCellScanner(list); + ((PayloadCarryingRpcController)controller).setCellScanner(cellScanner); } - return new Pair(param, CellUtil.createCellScanner(cellsOut)); - */ - return new Pair(param, null); + return EchoResponseProto.newBuilder().setMessage(request.getMessage()).build(); + } + }); + + /** + * Instance of server. We actually don't do anything speical in here so could just use + * HBaseRpcServer directly. + */ + private static class TestRpcServer extends RpcServer { + TestRpcServer() throws IOException { + super(null, "testRpcServer", + Lists.newArrayList(new BlockingServiceAndInterface(SERVICE, null)), + new InetSocketAddress("0.0.0.0", 0), 1, 1, + HBaseConfiguration.create(), 0); + } + + @Override + public Pair call(BlockingService service, + MethodDescriptor md, Message param, CellScanner cellScanner, + long receiveTime, MonitoredRPCHandler status) throws IOException { + return super.call(service, md, param, cellScanner, receiveTime, status); } } /** - * A nothing protocol used in test below. + * It is hard to verify the compression is actually happening under the wraps. Hope that if + * unsupported, we'll get an exception out of some time (meantime, have to trace it manually + * to confirm that compression is happening down in the client and server). + * @throws IOException + * @throws InterruptedException + * @throws SecurityException + * @throws NoSuchMethodException */ - interface NothingProtocol extends IpcProtocol { - void doNothing(); - } - - public static class DoNothing implements NothingProtocol { - public void doNothing() {} - } - @Test public void testCompressCellBlock() throws IOException, InterruptedException, SecurityException, NoSuchMethodException { + // Currently, you set Configuration conf = HBaseConfiguration.create(); conf.set("hbase.client.rpc.compressor", GzipCodec.class.getCanonicalName()); TestRpcServer rpcServer = new TestRpcServer(); - HBaseClient client = new HBaseClient(conf, HConstants.CLUSTER_ID_DEFAULT); + RpcClient client = new RpcClient(conf, HConstants.CLUSTER_ID_DEFAULT); List cells = new ArrayList(); - cells.add(CELL); + int count = 3; + for (int i = 0; i < count; i++) cells.add(CELL); try { rpcServer.start(); InetSocketAddress address = rpcServer.getListenerAddress(); - // Get any method name... just so it is not null - Method m = NothingProtocol.class.getMethod("doNothing"); - client.call(m, null, CellUtil.createCellScanner(cells), address, NothingProtocol.class, - User.getCurrent(), 0); + MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); + EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); + Pair r = client.call(md, param, CellUtil.createCellScanner(cells), + md.getOutputType().toProto(), User.getCurrent(), address, 0); + int index = 0; + while (r.getSecond().advance()) { + assertTrue(CELL.equals(r.getSecond().current())); + index++; + } + assertEquals(count, index); } finally { client.stop(); rpcServer.stop(); @@ -140,11 +196,13 @@ public class TestIPC { }).when(spyFactory).createSocket(); TestRpcServer rpcServer = new TestRpcServer(); - HBaseClient client = new HBaseClient(conf, HConstants.CLUSTER_ID_DEFAULT, spyFactory); + RpcClient client = new RpcClient(conf, HConstants.CLUSTER_ID_DEFAULT, spyFactory); try { rpcServer.start(); InetSocketAddress address = rpcServer.getListenerAddress(); - client.call(null, null, null, address, null, User.getCurrent(), 0); + MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); + EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); + client.call(md, param, null, null, User.getCurrent(), address, 0); fail("Expected an exception to have been thrown!"); } catch (Exception e) { LOG.info("Caught expected exception: " + e.toString()); @@ -167,7 +225,7 @@ public class TestIPC { int cellcount = Integer.parseInt(args[1]); Configuration conf = HBaseConfiguration.create(); TestRpcServer rpcServer = new TestRpcServer(); - HBaseClient client = new HBaseClient(conf, HConstants.CLUSTER_ID_DEFAULT); + RpcClient client = new RpcClient(conf, HConstants.CLUSTER_ID_DEFAULT); KeyValue kv = KeyValueUtil.ensureKeyValue(CELL); Put p = new Put(kv.getRow()); for (int i = 0; i < cellcount; i++) { @@ -178,8 +236,6 @@ public class TestIPC { try { rpcServer.start(); InetSocketAddress address = rpcServer.getListenerAddress(); - // Get any method name... just so it is not null - Method m = NothingProtocol.class.getMethod("doNothing"); long startTime = System.currentTimeMillis(); User user = User.getCurrent(); for (int i = 0; i < cycles; i++) { @@ -194,7 +250,7 @@ public class TestIPC { // "Thread dump " + Thread.currentThread().getName()); } Pair response = - client.call(m, param, cellScanner, address, NothingProtocol.class, user, 0); + client.call(null, param, cellScanner, null, user, address, 0); /* int count = 0; while (p.getSecond().advance()) { @@ -209,4 +265,4 @@ public class TestIPC { rpcServer.stop(); } } -} +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java index f45bce47cd0..02bbd9715da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java @@ -20,15 +20,19 @@ package org.apache.hadoop.hbase.ipc; import java.io.IOException; import java.net.InetSocketAddress; +import com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.IpcProtocol; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos; +import org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoRequestProto; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoResponseProto; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EmptyRequestProto; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EmptyResponseProto; -import org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto; +import org.apache.hadoop.hbase.security.User; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.junit.Assert; @@ -37,30 +41,29 @@ import org.junit.Before; import org.junit.After; import org.junit.experimental.categories.Category; +import com.google.protobuf.BlockingRpcChannel; +import com.google.protobuf.BlockingService; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; /** * Test for testing protocol buffer based RPC mechanism. - * This test depends on test.proto definition of types in - * hbase-server/src/test/protobuf/test.proto - * and protobuf service definition from - * hbase-server/src/test/protobuf/test_rpc_service.proto + * This test depends on test.proto definition of types in src/test/protobuf/test.proto + * and protobuf service definition from src/test/protobuf/test_rpc_service.proto */ @Category(MediumTests.class) public class TestProtoBufRpc { public final static String ADDRESS = "0.0.0.0"; - public final static int PORT = 0; - private static InetSocketAddress addr; - private static Configuration conf; - private static RpcServer server; - - public interface TestRpcService extends TestProtobufRpcProto.BlockingInterface, IpcProtocol { - public long VERSION = 1; - } - - public static class PBServerImpl implements TestRpcService { + public static int PORT = 0; + private InetSocketAddress isa; + private Configuration conf; + private RpcServerInterface server; + /** + * Implementation of the test service defined out in TestRpcServiceProtos + */ + static class PBServerImpl + implements TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface { @Override public EmptyResponseProto ping(RpcController unused, EmptyRequestProto request) throws ServiceException { @@ -83,19 +86,21 @@ public class TestProtoBufRpc { @Before public void setUp() throws IOException { // Setup server for both protocols - conf = new Configuration(); + this.conf = HBaseConfiguration.create(); Logger log = Logger.getLogger("org.apache.hadoop.ipc.HBaseServer"); log.setLevel(Level.DEBUG); log = Logger.getLogger("org.apache.hadoop.ipc.HBaseServer.trace"); log.setLevel(Level.TRACE); // Create server side implementation PBServerImpl serverImpl = new PBServerImpl(); + BlockingService service = + TestRpcServiceProtos.TestProtobufRpcProto.newReflectiveBlockingService(serverImpl); // Get RPC server for server side implementation - server = HBaseServerRPC.getServer(TestRpcService.class,serverImpl, - new Class[]{TestRpcService.class}, - ADDRESS, PORT, 10, 10, true, conf, 0); - addr = server.getListenerAddress(); - server.start(); + this.server = new RpcServer(null, "testrpc", + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(service, null)), + new InetSocketAddress(ADDRESS, PORT), 10, 10, conf, 0); + this.isa = server.getListenerAddress(); + this.server.start(); } @After @@ -105,27 +110,31 @@ public class TestProtoBufRpc { @Test public void testProtoBufRpc() throws Exception { - ProtobufRpcClientEngine clientEngine = - new ProtobufRpcClientEngine(conf, HConstants.CLUSTER_ID_DEFAULT); + RpcClient rpcClient = new RpcClient(conf, HConstants.CLUSTER_ID_DEFAULT); try { - TestRpcService client = clientEngine.getProxy(TestRpcService.class, addr, conf, 100000); + BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel( + new ServerName(this.isa.getHostName(), this.isa.getPort(), System.currentTimeMillis()), + User.getCurrent(), 0); + TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface stub = + TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(channel); // Test ping method - EmptyRequestProto emptyRequest = EmptyRequestProto.newBuilder().build(); - client.ping(null, emptyRequest); + TestProtos.EmptyRequestProto emptyRequest = + TestProtos.EmptyRequestProto.newBuilder().build(); + stub.ping(null, emptyRequest); // Test echo method EchoRequestProto echoRequest = EchoRequestProto.newBuilder().setMessage("hello").build(); - EchoResponseProto echoResponse = client.echo(null, echoRequest); + EchoResponseProto echoResponse = stub.echo(null, echoRequest); Assert.assertEquals(echoResponse.getMessage(), "hello"); // Test error method - error should be thrown as RemoteException try { - client.error(null, emptyRequest); + stub.error(null, emptyRequest); Assert.fail("Expected exception is not thrown"); } catch (ServiceException e) { } } finally { - clientEngine.close(); + rpcClient.stop(); } } -} +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/protobuf/generated/TestDelayedRpcProtos.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/protobuf/generated/TestDelayedRpcProtos.java index 22bed840f5e..7b587ef702e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/protobuf/generated/TestDelayedRpcProtos.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/protobuf/generated/TestDelayedRpcProtos.java @@ -766,6 +766,227 @@ public final class TestDelayedRpcProtos { // @@protoc_insertion_point(class_scope:TestResponse) } + public static abstract class TestDelayedService + implements com.google.protobuf.Service { + protected TestDelayedService() {} + + public interface Interface { + public abstract void test( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestArg request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new TestDelayedService() { + @java.lang.Override + public void test( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestArg request, + com.google.protobuf.RpcCallback done) { + impl.test(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.test(controller, (org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestArg)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestArg.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestResponse.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + public abstract void test( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestArg request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.getDescriptor().getServices().get(0); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.test(controller, (org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestArg)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestArg.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestResponse.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestDelayedService implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void test( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestArg request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestResponse.class, + org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestResponse.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestResponse test( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestArg request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestResponse test( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestArg request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos.TestResponse.getDefaultInstance()); + } + + } + } + private static com.google.protobuf.Descriptors.Descriptor internal_static_TestArg_descriptor; private static @@ -787,9 +1008,10 @@ public final class TestDelayedRpcProtos { java.lang.String[] descriptorData = { "\n\026test_delayed_rpc.proto\"\030\n\007TestArg\022\r\n\005d" + "elay\030\001 \002(\010\" \n\014TestResponse\022\020\n\010response\030\001" + - " \002(\005BL\n.org.apache.hadoop.hbase.ipc.prot" + - "obuf.generatedB\024TestDelayedRpcProtos\210\001\001\240" + - "\001\001" + " \002(\00525\n\022TestDelayedService\022\037\n\004test\022\010.Tes" + + "tArg\032\r.TestResponseBL\n.org.apache.hadoop" + + ".hbase.ipc.protobuf.generatedB\024TestDelay" + + "edRpcProtos\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java index 46e60620fa9..40656052759 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java @@ -37,20 +37,19 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.exceptions.TableExistsException; -import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.exceptions.TableExistsException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad; @@ -199,12 +198,13 @@ public class TestLoadIncrementalHFilesSplitRecovery { /** * Checks that all columns have the expected value and that there is the * expected number of rows. + * @throws IOException */ - void assertExpectedTable(String table, int count, int value) { + void assertExpectedTable(String table, int count, int value) throws IOException { + HTable t = null; try { assertEquals(util.getHBaseAdmin().listTables(table).length, 1); - - HTable t = new HTable(util.getConfiguration(), table); + t = new HTable(util.getConfiguration(), table); Scan s = new Scan(); ResultScanner sr = t.getScanner(s); int i = 0; @@ -219,6 +219,8 @@ public class TestLoadIncrementalHFilesSplitRecovery { assertEquals(count, i); } catch (IOException e) { fail("Failed due to exception"); + } finally { + if (t != null) t.close(); } } @@ -277,7 +279,8 @@ public class TestLoadIncrementalHFilesSplitRecovery { thenReturn(loc); Mockito.when(c.locateRegion((byte[]) Mockito.any(), (byte[]) Mockito.any())). thenReturn(loc); - ClientProtocol hri = Mockito.mock(ClientProtocol.class); + ClientProtos.ClientService.BlockingInterface hri = + Mockito.mock(ClientProtos.ClientService.BlockingInterface.class); Mockito.when(hri.bulkLoadHFile((RpcController)Mockito.any(), (BulkLoadHFileRequest)Mockito.any())). thenThrow(new ServiceException(new IOException("injecting bulk load error"))); Mockito.when(c.getClient(Mockito.any(ServerName.class))). diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index e0b8548b552..a60babe06d1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -30,16 +30,15 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.CatalogTracker; -import org.apache.hadoop.hbase.client.AdminProtocol; -import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException; import org.apache.hadoop.hbase.executor.ExecutorService; -import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.master.TableLockManager.NullTableLockManager; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; @@ -100,7 +99,9 @@ import com.google.protobuf.ServiceException; * {@link #setGetResult(byte[], byte[], Result)} for how to fill the backing data * store that the get pulls from. */ -class MockRegionServer implements AdminProtocol, ClientProtocol, RegionServerServices { +class MockRegionServer +implements AdminProtos.AdminService.BlockingInterface, +ClientProtos.ClientService.BlockingInterface, RegionServerServices { private final ServerName sn; private final ZooKeeperWatcher zkw; private final Configuration conf; @@ -304,7 +305,7 @@ class MockRegionServer implements AdminProtocol, ClientProtocol, RegionServerSer } @Override - public RpcServer getRpcServer() { + public RpcServerInterface getRpcServer() { // TODO Auto-generated method stub return null; } @@ -323,7 +324,7 @@ class MockRegionServer implements AdminProtocol, ClientProtocol, RegionServerSer @Override public GetResponse get(RpcController controller, GetRequest request) - throws ServiceException { + throws ServiceException { byte[] regionName = request.getRegion().getValue().toByteArray(); Map m = this.gets.get(regionName); GetResponse.Builder builder = GetResponse.newBuilder(); @@ -336,7 +337,7 @@ class MockRegionServer implements AdminProtocol, ClientProtocol, RegionServerSer @Override public MultiGetResponse multiGet(RpcController controller, MultiGetRequest requests) - throws ServiceException { + throws ServiceException { byte[] regionName = requests.getRegion().getValue().toByteArray(); Map m = this.gets.get(regionName); MultiGetResponse.Builder builder = MultiGetResponse.newBuilder(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java index a8d35a3dd61..2df08370181 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java @@ -30,24 +30,23 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.MediumTests; -import org.apache.hadoop.hbase.exceptions.RegionException; import org.apache.hadoop.hbase.RegionTransition; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaMockingUtil; -import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.exceptions.RegionException; +import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorType; @@ -58,6 +57,7 @@ import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; import org.apache.hadoop.hbase.master.handler.EnableTableHandler; import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; @@ -583,7 +583,8 @@ public class TestAssignmentManager { // Need to set up a fake scan of meta for the servershutdown handler // Make an RS Interface implementation. Make it so a scanner can go against it. - ClientProtocol implementation = Mockito.mock(ClientProtocol.class); + ClientProtos.ClientService.BlockingInterface implementation = + Mockito.mock(ClientProtos.ClientService.BlockingInterface.class); // Get a meta row result that has region up on SERVERNAME_A Result r; @@ -1047,7 +1048,8 @@ public class TestAssignmentManager { // messing with. Needed when "new master" joins cluster. AM will try and // rebuild its list of user regions and it will also get the HRI that goes // with an encoded name by doing a Get on .META. - ClientProtocol ri = Mockito.mock(ClientProtocol.class); + ClientProtos.ClientService.BlockingInterface ri = + Mockito.mock(ClientProtos.ClientService.BlockingInterface.class); // Get a meta row result that has region up on SERVERNAME_A for REGIONINFO Result r = MetaMockingUtil.getMetaTableRowResult(REGIONINFO, SERVERNAME_A); ScanResponse.Builder builder = ScanResponse.newBuilder(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index e8cccd85260..91ae3db73b3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -39,26 +39,25 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.exceptions.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaMockingUtil; -import org.apache.hadoop.hbase.client.AdminProtocol; -import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.exceptions.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse; import org.apache.hadoop.hbase.regionserver.HStore; @@ -91,7 +90,8 @@ public class TestCatalogJanitor { MockServer(final HBaseTestingUtility htu) throws NotAllMetaRegionsOnlineException, IOException, InterruptedException { this.c = htu.getConfiguration(); - ClientProtocol ri = Mockito.mock(ClientProtocol.class); + ClientProtos.ClientService.BlockingInterface ri = + Mockito.mock(ClientProtos.ClientService.BlockingInterface.class); MutateResponse.Builder builder = MutateResponse.newBuilder(); builder.setProcessed(true); try { @@ -106,7 +106,7 @@ public class TestCatalogJanitor { // to make our test work. this.connection = HConnectionTestingUtility.getMockedConnectionAndDecorate(this.c, - Mockito.mock(AdminProtocol.class), ri, + Mockito.mock(AdminProtos.AdminService.BlockingInterface.class), ri, new ServerName("example.org,12345,6789"), HRegionInfo.FIRST_META_REGIONINFO); // Set hbase.rootdir into test dir. @@ -114,7 +114,8 @@ public class TestCatalogJanitor { Path rootdir = FSUtils.getRootDir(this.c); FSUtils.setRootDir(this.c, rootdir); this.ct = Mockito.mock(CatalogTracker.class); - AdminProtocol hri = Mockito.mock(AdminProtocol.class); + AdminProtos.AdminService.BlockingInterface hri = + Mockito.mock(AdminProtos.AdminService.BlockingInterface.class); Mockito.when(this.ct.getConnection()).thenReturn(this.connection); Mockito.when(ct.waitForMetaServerConnection(Mockito.anyLong())).thenReturn(hri); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java index f347d53e288..14b81044e47 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java @@ -22,19 +22,19 @@ package org.apache.hadoop.hbase.master; import static org.junit.Assert.fail; import java.io.IOException; -import java.net.InetSocketAddress; import java.net.SocketTimeoutException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hbase.ipc.HBaseClientRPC; -import org.apache.hadoop.hbase.MasterMonitorProtocol; -import org.apache.hadoop.hbase.ipc.ProtobufRpcClientEngine; +import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; +import org.apache.hadoop.hbase.security.User; import org.junit.Test; import org.junit.experimental.categories.Category; +import com.google.protobuf.BlockingRpcChannel; import com.google.protobuf.ServiceException; @Category(MediumTests.class) @@ -46,29 +46,31 @@ public class TestHMasterRPCException { TEST_UTIL.startMiniZKCluster(); Configuration conf = TEST_UTIL.getConfiguration(); conf.set(HConstants.MASTER_PORT, "0"); - HMaster hm = new HMaster(conf); - ServerName sm = hm.getServerName(); - InetSocketAddress isa = new InetSocketAddress(sm.getHostname(), sm.getPort()); - ProtobufRpcClientEngine engine = - new ProtobufRpcClientEngine(conf, HConstants.CLUSTER_ID_DEFAULT); + RpcClient rpcClient = new RpcClient(conf, HConstants.CLUSTER_ID_DEFAULT); try { int i = 0; //retry the RPC a few times; we have seen SocketTimeoutExceptions if we //try to connect too soon. Retry on SocketTimeoutException. while (i < 20) { try { - MasterMonitorProtocol inf = engine.getProxy( - MasterMonitorProtocol.class, isa, conf, 100 * 10); - inf.isMasterRunning(null, IsMasterRunningRequest.getDefaultInstance()); + BlockingRpcChannel channel = + rpcClient.createBlockingRpcChannel(sm, User.getCurrent(), 0); + MasterMonitorProtos.MasterMonitorService.BlockingInterface stub = + MasterMonitorProtos.MasterMonitorService.newBlockingStub(channel); + stub.isMasterRunning(null, IsMasterRunningRequest.getDefaultInstance()); fail(); } catch (ServiceException ex) { IOException ie = ProtobufUtil.getRemoteException(ex); if (!(ie instanceof SocketTimeoutException)) { - if(ie.getMessage().startsWith( - "org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException: Server is not running yet")) { + if (ie.getMessage().startsWith("org.apache.hadoop.hbase.exceptions." + + "ServerNotRunningYetException: Server is not running yet")) { + // Done. Got the exception we wanted. + System.out.println("Expected exception: " + ie.getMessage()); return; + } else { + throw ex; } } else { System.err.println("Got SocketTimeoutException. Will retry. "); @@ -81,7 +83,7 @@ public class TestHMasterRPCException { } fail(); } finally { - engine.close(); + rpcClient.stop(); } } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java index c2e24ac9351..0ebb78ff3ef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java @@ -132,6 +132,4 @@ public class TestRestartCluster { UTIL.waitTableAvailable(TABLE); } } - -} - +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index 626d22886a9..f61dd8ed212 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -30,7 +30,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext; -import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; @@ -42,6 +41,7 @@ import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.util.Bytes; @@ -152,7 +152,7 @@ public class TestHRegionServerBulkLoad { byte[] regionName = location.getRegionInfo().getRegionName(); BulkLoadHFileRequest request = RequestConverter.buildBulkLoadHFileRequest(famPaths, regionName, true); - server.bulkLoadHFile(null, request); + stub.bulkLoadHFile(null, request); return null; } }.withRetries(); @@ -166,7 +166,8 @@ public class TestHRegionServerBulkLoad { public Void call() throws Exception { LOG.debug("compacting " + location + " for row " + Bytes.toStringBinary(row)); - AdminProtocol server = connection.getAdmin(location.getServerName()); + AdminProtos.AdminService.BlockingInterface server = + connection.getAdmin(location.getServerName()); CompactRegionRequest request = RequestConverter.buildCompactRegionRequest( location.getRegionInfo().getRegionName(), true, null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java index 721b87c11f5..61a358a1198 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java @@ -148,11 +148,11 @@ public class SequenceFileLogWriter implements HLog.Writer { null, createMetadata(conf, compress)); } else { - LOG.debug("using new createWriter -- HADOOP-6840"); + if (LOG.isTraceEnabled()) LOG.trace("Using new createWriter -- HADOOP-6840"); } this.writer_out = getSequenceFilePrivateFSDataOutputStreamAccessible(); - LOG.debug("Path=" + path + ", compression=" + compress); + if (LOG.isTraceEnabled()) LOG.trace("Path=" + path + ", compression=" + compress); } // Get at the private FSDataOutputStream inside in SequenceFile so we can diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index 2ccbe39a0fd..1546735f105 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -18,21 +18,25 @@ package org.apache.hadoop.hbase.security.token; +import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.net.InetSocketAddress; import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.ConcurrentMap; +import com.google.protobuf.BlockingRpcChannel; +import com.google.protobuf.BlockingService; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterId; -import org.apache.hadoop.hbase.IpcProtocol; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -43,18 +47,16 @@ import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; -import org.apache.hadoop.hbase.ipc.HBaseClientRPC; -import org.apache.hadoop.hbase.ipc.HBaseServer; -import org.apache.hadoop.hbase.ipc.HBaseServerRPC; -import org.apache.hadoop.hbase.ipc.ProtobufRpcClientEngine; -import org.apache.hadoop.hbase.ipc.RequestContext; +import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.ipc.RequestContext; +import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; +import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.security.KerberosInfo; -import org.apache.hadoop.hbase.security.TokenInfo; +import org.apache.hadoop.hbase.security.SecurityInfo; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -67,6 +69,8 @@ import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.net.DNS; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.PolicyProvider; +import org.apache.hadoop.security.authorize.Service; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -82,23 +86,16 @@ import org.junit.experimental.categories.Category; public class TestTokenAuthentication { private static Log LOG = LogFactory.getLog(TestTokenAuthentication.class); - @KerberosInfo( - serverPrincipal = "hbase.test.kerberos.principal") - @TokenInfo("HBASE_AUTH_TOKEN") - private static interface BlockingAuthenticationService - extends AuthenticationProtos.AuthenticationService.BlockingInterface, IpcProtocol { - } + public static interface AuthenticationServiceSecurityInfo {} /** * Basic server process for RPC authentication testing */ private static class TokenServer extends TokenProvider - implements BlockingAuthenticationService, Runnable, Server { - + implements AuthenticationProtos.AuthenticationService.BlockingInterface, Runnable, Server { private static Log LOG = LogFactory.getLog(TokenServer.class); - private Configuration conf; - private RpcServer rpcServer; + private RpcServerInterface rpcServer; private InetSocketAddress isa; private ZooKeeperWatcher zookeeper; private Sleeper sleeper; @@ -106,31 +103,27 @@ public class TestTokenAuthentication { private boolean aborted = false; private boolean stopped = false; private long startcode; - private AuthenticationProtos.AuthenticationService.BlockingInterface blockingService; public TokenServer(Configuration conf) throws IOException { this.conf = conf; this.startcode = EnvironmentEdgeManager.currentTimeMillis(); - // Server to handle client requests. - String hostname = Strings.domainNamePointerToHostName( - DNS.getDefaultHost("default", "default")); + String hostname = + Strings.domainNamePointerToHostName(DNS.getDefaultHost("default", "default")); int port = 0; // Creation of an ISA will force a resolve. InetSocketAddress initialIsa = new InetSocketAddress(hostname, port); if (initialIsa.getAddress() == null) { throw new IllegalArgumentException("Failed resolve of " + initialIsa); } - - this.rpcServer = HBaseServerRPC.getServer(TokenServer.class, this, - new Class[]{AuthenticationProtos.AuthenticationService.Interface.class}, - initialIsa.getHostName(), // BindAddress is IP we got for this server. - initialIsa.getPort(), - 3, // handlers - 1, // meta handlers (not used) - true, - this.conf, HConstants.QOS_THRESHOLD); - // Set our address. + final List sai = + new ArrayList(1); + BlockingService service = + AuthenticationProtos.AuthenticationService.newReflectiveBlockingService(this); + sai.add(new BlockingServiceAndInterface(service, + AuthenticationProtos.AuthenticationService.BlockingInterface.class)); + this.rpcServer = + new RpcServer(this, "tokenServer", sai, initialIsa, 3, 1, conf, HConstants.QOS_THRESHOLD); this.isa = this.rpcServer.getListenerAddress(); this.sleeper = new Sleeper(1000, this); } @@ -179,7 +172,7 @@ public class TestTokenAuthentication { // mock RegionServerServices to provide to coprocessor environment final RegionServerServices mockServices = new MockRegionServerServices() { @Override - public RpcServer getRpcServer() { return rpcServer; } + public RpcServerInterface getRpcServer() { return rpcServer; } }; // mock up coprocessor environment @@ -253,7 +246,7 @@ public class TestTokenAuthentication { } public SecretManager getSecretManager() { - return ((HBaseServer)rpcServer).getSecretManager(); + return ((RpcServer)rpcServer).getSecretManager(); } @Override @@ -304,19 +297,30 @@ public class TestTokenAuthentication { public static void setupBeforeClass() throws Exception { TEST_UTIL = new HBaseTestingUtility(); TEST_UTIL.startMiniZKCluster(); + // register token type for protocol + SecurityInfo.addInfo(AuthenticationProtos.AuthenticationService.getDescriptor().getName(), + new SecurityInfo("hbase.test.kerberos.principal", + AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN)); // security settings only added after startup so that ZK does not require SASL Configuration conf = TEST_UTIL.getConfiguration(); conf.set("hadoop.security.authentication", "kerberos"); conf.set("hbase.security.authentication", "kerberos"); + conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION, true); server = new TokenServer(conf); serverThread = new Thread(server); - Threads.setDaemonThreadRunning(serverThread, - "TokenServer:"+server.getServerName().toString()); + Threads.setDaemonThreadRunning(serverThread, "TokenServer:"+server.getServerName().toString()); // wait for startup while (!server.isStarted() && !server.isStopped()) { Thread.sleep(10); } - + server.rpcServer.refreshAuthManager(new PolicyProvider() { + @Override + public Service[] getServices() { + return new Service [] { + new Service("security.client.protocol.acl", + AuthenticationProtos.AuthenticationService.BlockingInterface.class)}; + } + }); ZKClusterId.setClusterId(server.getZooKeeper(), clusterId); secretManager = (AuthenticationTokenSecretManager)server.getSecretManager(); while(secretManager.getCurrentKey() == null) { @@ -363,24 +367,23 @@ public class TestTokenAuthentication { testuser.doAs(new PrivilegedExceptionAction() { public Object run() throws Exception { Configuration c = server.getConfiguration(); - ProtobufRpcClientEngine rpcClient = - new ProtobufRpcClientEngine(c, clusterId.toString()); + RpcClient rpcClient = new RpcClient(c, clusterId.toString()); + ServerName sn = + new ServerName(server.getAddress().getHostName(), server.getAddress().getPort(), + System.currentTimeMillis()); try { - AuthenticationProtos.AuthenticationService.BlockingInterface proxy = - HBaseClientRPC.waitForProxy(rpcClient, BlockingAuthenticationService.class, - server.getAddress(), c, - HConstants.DEFAULT_HBASE_CLIENT_RPC_MAXATTEMPTS, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT); - + BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn, + User.getCurrent(), HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + AuthenticationProtos.AuthenticationService.BlockingInterface stub = + AuthenticationProtos.AuthenticationService.newBlockingStub(channel); AuthenticationProtos.WhoAmIResponse response = - proxy.whoami(null, AuthenticationProtos.WhoAmIRequest.getDefaultInstance()); + stub.whoami(null, AuthenticationProtos.WhoAmIRequest.getDefaultInstance()); String myname = response.getUsername(); assertEquals("testuser", myname); String authMethod = response.getAuthMethod(); assertEquals("TOKEN", authMethod); } finally { - rpcClient.close(); + rpcClient.stop(); } return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java index 52cd489f351..7786847a94d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java @@ -40,8 +40,8 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.exceptions.SnapshotCreationException; import org.apache.hadoop.hbase.exceptions.TableNotFoundException; -import org.apache.hadoop.hbase.ipc.HBaseClient; -import org.apache.hadoop.hbase.ipc.HBaseServer; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.ScannerCallable; @@ -87,8 +87,8 @@ public class TestFlushSnapshotFromClient { */ @BeforeClass public static void setupCluster() throws Exception { - ((Log4JLogger)HBaseServer.LOG).getLogger().setLevel(Level.ALL); - ((Log4JLogger)HBaseClient.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL); setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(NUM_RS); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java index 32eb34663d4..087f1085d54 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java @@ -19,10 +19,8 @@ package org.apache.hadoop.hbase.snapshot; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import java.io.IOException; -import java.util.Arrays; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -35,25 +33,23 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.catalog.CatalogTracker; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.MD5Hash; -import org.junit.*; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java index 6bebc4aec0b..856575b7c29 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.fs.HFileSystem; -import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager.NullTableLockManager; import org.apache.hadoop.hbase.regionserver.CompactionRequestor; @@ -99,7 +99,7 @@ public class MockRegionServerServices implements RegionServerServices { } @Override - public RpcServer getRpcServer() { + public RpcServerInterface getRpcServer() { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index f901f50304c..872a45e4b6f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -59,7 +59,6 @@ import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.MetaEditor; -import org.apache.hadoop.hbase.client.AdminProtocol; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -77,6 +76,7 @@ import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -524,8 +524,7 @@ public class TestHBaseFsck { new HashMap>(); HConnection connection = admin.getConnection(); for (ServerName hsi : regionServers) { - AdminProtocol server = - connection.getAdmin(hsi); + AdminProtos.AdminService.BlockingInterface server = connection.getAdmin(hsi); // list all online regions from this region server List regions = ProtobufUtil.getOnlineRegions(server); diff --git a/hbase-server/src/test/protobuf/test_delayed_rpc.proto b/hbase-server/src/test/protobuf/test_delayed_rpc.proto index c6aae66e9e0..cfab0fbe222 100644 --- a/hbase-server/src/test/protobuf/test_delayed_rpc.proto +++ b/hbase-server/src/test/protobuf/test_delayed_rpc.proto @@ -28,3 +28,7 @@ message TestArg { message TestResponse { required int32 response = 1; } + +service TestDelayedService { + rpc test(TestArg) returns (TestResponse); +}