diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java index 6ab1a99ae55..0060567ebbd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java @@ -63,7 +63,7 @@ public class FailoverController { "Can't failover to an active service"); } try { - toSvc.monitorHealth(); + HAServiceProtocolHelper.monitorHealth(toSvc); } catch (HealthCheckFailedException hce) { throw new FailoverFailedException( "Can't failover to an unhealthy service", hce); @@ -91,7 +91,7 @@ public class FailoverController { // Try to make fromSvc standby try { - fromSvc.transitionToStandby(); + HAServiceProtocolHelper.transitionToStandby(fromSvc); } catch (ServiceFailedException sfe) { LOG.warn("Unable to make " + fromSvcName + " standby (" + sfe.getMessage() + ")"); @@ -105,7 +105,7 @@ public class FailoverController { boolean failed = false; Throwable cause = null; try { - toSvc.transitionToActive(); + HAServiceProtocolHelper.transitionToActive(toSvc); } catch (ServiceFailedException sfe) { LOG.error("Unable to make " + toSvcName + " active (" + sfe.getMessage() + "). Failing back"); @@ -122,7 +122,7 @@ public class FailoverController { if (failed) { String msg = "Unable to failover to " + toSvcName; try { - fromSvc.transitionToActive(); + HAServiceProtocolHelper.transitionToActive(fromSvc); } catch (ServiceFailedException sfe) { msg = "Failback to " + fromSvcName + " failed (" + sfe.getMessage() + ")"; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java index 2dc5c1f39a3..7dbc17ed6e7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java @@ -92,7 +92,7 @@ public class HAAdmin extends Configured implements Tool { } HAServiceProtocol proto = getProtocol(argv[1]); - proto.transitionToActive(); + HAServiceProtocolHelper.transitionToActive(proto); return 0; } @@ -105,7 +105,7 @@ public class HAAdmin extends Configured implements Tool { } HAServiceProtocol proto = getProtocol(argv[1]); - proto.transitionToStandby(); + HAServiceProtocolHelper.transitionToStandby(proto); return 0; } @@ -139,7 +139,7 @@ public class HAAdmin extends Configured implements Tool { HAServiceProtocol proto = getProtocol(argv[1]); try { - proto.monitorHealth(); + HAServiceProtocolHelper.monitorHealth(proto); } catch (HealthCheckFailedException e) { errOut.println("Health check failed: " + e.getLocalizedMessage()); return 1; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java new file mode 100644 index 00000000000..c8de74269e3 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ha; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.ipc.RemoteException; + +/** + * Helper for making {@link HAServiceProtocol} RPC calls. This helper + * unwraps the {@link RemoteException} to specific exceptions. + * + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class HAServiceProtocolHelper { + public static void monitorHealth(HAServiceProtocol svc) + throws IOException { + try { + svc.monitorHealth(); + } catch (RemoteException e) { + throw e.unwrapRemoteException(HealthCheckFailedException.class); + } + } + + public static void transitionToActive(HAServiceProtocol svc) + throws IOException { + try { + svc.transitionToActive(); + } catch (RemoteException e) { + throw e.unwrapRemoteException(ServiceFailedException.class); + } + } + + public static void transitionToStandby(HAServiceProtocol svc) + throws IOException { + try { + svc.transitionToStandby(); + } catch (RemoteException e) { + throw e.unwrapRemoteException(ServiceFailedException.class); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index da9724ef1f7..0357c5d714f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -47,6 +47,7 @@ import org.apache.hadoop.fs.Path; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import org.apache.hadoop.ha.HAServiceProtocol; +import org.apache.hadoop.ha.HAServiceProtocolHelper; import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf; import org.apache.hadoop.hdfs.protocol.Block; @@ -1590,12 +1591,12 @@ public class MiniDFSCluster { public void transitionToActive(int nnIndex) throws IOException, ServiceFailedException { - getHaServiceClient(nnIndex).transitionToActive(); + HAServiceProtocolHelper.transitionToActive(getHaServiceClient(nnIndex)); } public void transitionToStandby(int nnIndex) throws IOException, ServiceFailedException { - getHaServiceClient(nnIndex).transitionToStandby(); + HAServiceProtocolHelper.transitionToStandby(getHaServiceClient(nnIndex)); }