From 8af96c7b22f92ab84c142c37252f85df7b9b98aa Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 10 Feb 2012 00:46:17 +0000 Subject: [PATCH] HDFS-2917. HA: haadmin should not work if run by regular user. Contributed by Eli Collins git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1242626 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/ha/FailoverController.java | 4 +++ .../java/org/apache/hadoop/ha/HAAdmin.java | 5 +++- .../apache/hadoop/ha/HAServiceProtocol.java | 18 ++++++++++++- .../hadoop/ha/TestFailoverController.java | 26 +++++++++++++++++++ .../hadoop-hdfs/CHANGES.HDFS-1623.txt | 2 ++ .../hadoop/hdfs/server/namenode/NameNode.java | 22 +++++++++++----- .../server/namenode/NameNodeRpcServer.java | 15 +++++++---- 7 files changed, 79 insertions(+), 13 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java index 7205f9f53b5..0960fb7cbd1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java @@ -61,6 +61,7 @@ public class FailoverController { boolean forceActive) throws FailoverFailedException { HAServiceState toSvcState; + try { toSvcState = toSvc.getServiceState(); } catch (IOException e) { @@ -68,10 +69,12 @@ public class FailoverController { LOG.error(msg, e); throw new FailoverFailedException(msg, e); } + if (!toSvcState.equals(HAServiceState.STANDBY)) { throw new FailoverFailedException( "Can't failover to an active service"); } + try { HAServiceProtocolHelper.monitorHealth(toSvc); } catch (HealthCheckFailedException hce) { @@ -81,6 +84,7 @@ public class FailoverController { throw new FailoverFailedException( "Got an IO exception", e); } + try { if (!toSvc.readyToBecomeActive()) { if (!forceActive) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java index 6ceafb9ea69..ccfa11f43dd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java @@ -249,7 +249,10 @@ public abstract class HAAdmin extends Configured implements Tool { try { return runCmd(argv); } catch (IllegalArgumentException iae) { - errOut.println("Illegal argument: " + iae.getMessage()); + errOut.println("Illegal argument: " + iae.getLocalizedMessage()); + return -1; + } catch (IOException ioe) { + errOut.println("Operation failed: " + ioe.getLocalizedMessage()); return -1; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java index 9a7316db054..c0e0d2b389e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ipc.VersionedProtocol; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.KerberosInfo; import java.io.IOException; @@ -75,10 +76,13 @@ public interface HAServiceProtocol extends VersionedProtocol { * * @throws HealthCheckFailedException * if the health check of a service fails. + * @throws AccessControlException + * if access is denied. * @throws IOException * if other errors happen */ public void monitorHealth() throws HealthCheckFailedException, + AccessControlException, IOException; /** @@ -87,10 +91,13 @@ public interface HAServiceProtocol extends VersionedProtocol { * * @throws ServiceFailedException * if transition from standby to active fails. + * @throws AccessControlException + * if access is denied. * @throws IOException * if other errors happen */ public void transitionToActive() throws ServiceFailedException, + AccessControlException, IOException; /** @@ -99,28 +106,37 @@ public interface HAServiceProtocol extends VersionedProtocol { * * @throws ServiceFailedException * if transition from active to standby fails. + * @throws AccessControlException + * if access is denied. * @throws IOException * if other errors happen */ public void transitionToStandby() throws ServiceFailedException, + AccessControlException, IOException; /** * Return the current state of the service. * + * @throws AccessControlException + * if access is denied. * @throws IOException * if other errors happen */ - public HAServiceState getServiceState() throws IOException; + public HAServiceState getServiceState() throws AccessControlException, + IOException; /** * Return true if the service is capable and ready to transition * from the standby state to the active state. * * @return true if the service is ready to become active, false otherwise. + * @throws AccessControlException + * if access is denied. * @throws IOException * if other errors happen */ public boolean readyToBecomeActive() throws ServiceFailedException, + AccessControlException, IOException; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java index 7b5cc32b765..39fc47ef406 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java @@ -32,6 +32,7 @@ import static org.apache.hadoop.ha.TestNodeFencer.setupFencer; import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.AccessControlException; import org.junit.Test; import static org.junit.Assert.*; @@ -133,6 +134,31 @@ public class TestFailoverController { assertEquals(HAServiceState.ACTIVE, svc2.getServiceState()); } + @Test + public void testFailoverWithoutPermission() throws Exception { + DummyService svc1 = new DummyService(HAServiceState.ACTIVE) { + @Override + public HAServiceState getServiceState() throws IOException { + throw new AccessControlException("Access denied"); + } + }; + DummyService svc2 = new DummyService(HAServiceState.STANDBY) { + @Override + public HAServiceState getServiceState() throws IOException { + throw new AccessControlException("Access denied"); + } + }; + NodeFencer fencer = setupFencer(AlwaysSucceedFencer.class.getName()); + + try { + FailoverController.failover(svc1, svc1Addr, svc2, svc2Addr, fencer, false, false); + fail("Can't failover when access is denied"); + } catch (FailoverFailedException ffe) { + assertTrue(ffe.getCause().getMessage().contains("Access denied")); + } + } + + @Test public void testFailoverToUnreadyService() throws Exception { DummyService svc1 = new DummyService(HAServiceState.ACTIVE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt index 943cf86ec5a..23028003649 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt @@ -194,3 +194,5 @@ HDFS-2915. HA: TestFailureOfSharedDir.testFailureOfSharedDir() has race conditio HDFS-2912. Namenode not shutting down when shared edits dir is inaccessible. (Bikas Saha via atm) HDFS-2922. HA: close out operation categories. (eli) + +HDFS-2917. HA: haadmin should not work if run by regular user (eli) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index eb7e3c667b6..eb7f4616909 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; @@ -900,36 +901,45 @@ public class NameNode { } } - synchronized void monitorHealth() throws HealthCheckFailedException { + synchronized void monitorHealth() + throws HealthCheckFailedException, AccessControlException { + namesystem.checkSuperuserPrivilege(); if (!haEnabled) { - return; // no-op, if HA is not eanbled + return; // no-op, if HA is not enabled } // TODO:HA implement health check return; } - synchronized void transitionToActive() throws ServiceFailedException { + synchronized void transitionToActive() + throws ServiceFailedException, AccessControlException { + namesystem.checkSuperuserPrivilege(); if (!haEnabled) { throw new ServiceFailedException("HA for namenode is not enabled"); } state.setState(haContext, ACTIVE_STATE); } - synchronized void transitionToStandby() throws ServiceFailedException { + synchronized void transitionToStandby() + throws ServiceFailedException, AccessControlException { + namesystem.checkSuperuserPrivilege(); if (!haEnabled) { throw new ServiceFailedException("HA for namenode is not enabled"); } state.setState(haContext, STANDBY_STATE); } - synchronized HAServiceState getServiceState() { + synchronized HAServiceState getServiceState() throws AccessControlException { + namesystem.checkSuperuserPrivilege(); if (state == null) { return HAServiceState.INITIALIZING; } return state.getServiceState(); } - synchronized boolean readyToBecomeActive() throws ServiceFailedException { + synchronized boolean readyToBecomeActive() + throws ServiceFailedException, AccessControlException { + namesystem.checkSuperuserPrivilege(); if (!haEnabled) { throw new ServiceFailedException("HA for namenode is not enabled"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index d63dfae11a7..aa5e8134218 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -988,27 +988,32 @@ class NameNodeRpcServer implements NamenodeProtocols { } @Override // HAServiceProtocol - public synchronized void monitorHealth() throws HealthCheckFailedException { + public synchronized void monitorHealth() + throws HealthCheckFailedException, AccessControlException { nn.monitorHealth(); } @Override // HAServiceProtocol - public synchronized void transitionToActive() throws ServiceFailedException { + public synchronized void transitionToActive() + throws ServiceFailedException, AccessControlException { nn.transitionToActive(); } @Override // HAServiceProtocol - public synchronized void transitionToStandby() throws ServiceFailedException { + public synchronized void transitionToStandby() + throws ServiceFailedException, AccessControlException { nn.transitionToStandby(); } @Override // HAServiceProtocol - public synchronized HAServiceState getServiceState() { + public synchronized HAServiceState getServiceState() + throws AccessControlException { return nn.getServiceState(); } @Override // HAServiceProtocol - public synchronized boolean readyToBecomeActive() throws ServiceFailedException { + public synchronized boolean readyToBecomeActive() + throws ServiceFailedException, AccessControlException { return nn.readyToBecomeActive(); }