HDFS-2917. HA: haadmin should not work if run by regular user. Contributed by Eli Collins

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1242626 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-02-10 00:46:17 +00:00
parent a63e12c4c8
commit 8af96c7b22
7 changed files with 79 additions and 13 deletions

View File

@ -61,6 +61,7 @@ public class FailoverController {
boolean forceActive) boolean forceActive)
throws FailoverFailedException { throws FailoverFailedException {
HAServiceState toSvcState; HAServiceState toSvcState;
try { try {
toSvcState = toSvc.getServiceState(); toSvcState = toSvc.getServiceState();
} catch (IOException e) { } catch (IOException e) {
@ -68,10 +69,12 @@ public class FailoverController {
LOG.error(msg, e); LOG.error(msg, e);
throw new FailoverFailedException(msg, e); throw new FailoverFailedException(msg, e);
} }
if (!toSvcState.equals(HAServiceState.STANDBY)) { if (!toSvcState.equals(HAServiceState.STANDBY)) {
throw new FailoverFailedException( throw new FailoverFailedException(
"Can't failover to an active service"); "Can't failover to an active service");
} }
try { try {
HAServiceProtocolHelper.monitorHealth(toSvc); HAServiceProtocolHelper.monitorHealth(toSvc);
} catch (HealthCheckFailedException hce) { } catch (HealthCheckFailedException hce) {
@ -81,6 +84,7 @@ public class FailoverController {
throw new FailoverFailedException( throw new FailoverFailedException(
"Got an IO exception", e); "Got an IO exception", e);
} }
try { try {
if (!toSvc.readyToBecomeActive()) { if (!toSvc.readyToBecomeActive()) {
if (!forceActive) { if (!forceActive) {

View File

@ -249,7 +249,10 @@ public abstract class HAAdmin extends Configured implements Tool {
try { try {
return runCmd(argv); return runCmd(argv);
} catch (IllegalArgumentException iae) { } catch (IllegalArgumentException iae) {
errOut.println("Illegal argument: " + iae.getMessage()); errOut.println("Illegal argument: " + iae.getLocalizedMessage());
return -1;
} catch (IOException ioe) {
errOut.println("Operation failed: " + ioe.getLocalizedMessage());
return -1; return -1;
} }
} }

View File

@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.VersionedProtocol; import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.KerberosInfo;
import java.io.IOException; import java.io.IOException;
@ -75,10 +76,13 @@ public interface HAServiceProtocol extends VersionedProtocol {
* *
* @throws HealthCheckFailedException * @throws HealthCheckFailedException
* if the health check of a service fails. * if the health check of a service fails.
* @throws AccessControlException
* if access is denied.
* @throws IOException * @throws IOException
* if other errors happen * if other errors happen
*/ */
public void monitorHealth() throws HealthCheckFailedException, public void monitorHealth() throws HealthCheckFailedException,
AccessControlException,
IOException; IOException;
/** /**
@ -87,10 +91,13 @@ public interface HAServiceProtocol extends VersionedProtocol {
* *
* @throws ServiceFailedException * @throws ServiceFailedException
* if transition from standby to active fails. * if transition from standby to active fails.
* @throws AccessControlException
* if access is denied.
* @throws IOException * @throws IOException
* if other errors happen * if other errors happen
*/ */
public void transitionToActive() throws ServiceFailedException, public void transitionToActive() throws ServiceFailedException,
AccessControlException,
IOException; IOException;
/** /**
@ -99,28 +106,37 @@ public interface HAServiceProtocol extends VersionedProtocol {
* *
* @throws ServiceFailedException * @throws ServiceFailedException
* if transition from active to standby fails. * if transition from active to standby fails.
* @throws AccessControlException
* if access is denied.
* @throws IOException * @throws IOException
* if other errors happen * if other errors happen
*/ */
public void transitionToStandby() throws ServiceFailedException, public void transitionToStandby() throws ServiceFailedException,
AccessControlException,
IOException; IOException;
/** /**
* Return the current state of the service. * Return the current state of the service.
* *
* @throws AccessControlException
* if access is denied.
* @throws IOException * @throws IOException
* if other errors happen * if other errors happen
*/ */
public HAServiceState getServiceState() throws IOException; public HAServiceState getServiceState() throws AccessControlException,
IOException;
/** /**
* Return true if the service is capable and ready to transition * Return true if the service is capable and ready to transition
* from the standby state to the active state. * from the standby state to the active state.
* *
* @return true if the service is ready to become active, false otherwise. * @return true if the service is ready to become active, false otherwise.
* @throws AccessControlException
* if access is denied.
* @throws IOException * @throws IOException
* if other errors happen * if other errors happen
*/ */
public boolean readyToBecomeActive() throws ServiceFailedException, public boolean readyToBecomeActive() throws ServiceFailedException,
AccessControlException,
IOException; IOException;
} }

View File

@ -32,6 +32,7 @@ import static org.apache.hadoop.ha.TestNodeFencer.setupFencer;
import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.junit.Test; import org.junit.Test;
import static org.junit.Assert.*; import static org.junit.Assert.*;
@ -133,6 +134,31 @@ public class TestFailoverController {
assertEquals(HAServiceState.ACTIVE, svc2.getServiceState()); assertEquals(HAServiceState.ACTIVE, svc2.getServiceState());
} }
@Test
public void testFailoverWithoutPermission() throws Exception {
DummyService svc1 = new DummyService(HAServiceState.ACTIVE) {
@Override
public HAServiceState getServiceState() throws IOException {
throw new AccessControlException("Access denied");
}
};
DummyService svc2 = new DummyService(HAServiceState.STANDBY) {
@Override
public HAServiceState getServiceState() throws IOException {
throw new AccessControlException("Access denied");
}
};
NodeFencer fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
FailoverController.failover(svc1, svc1Addr, svc2, svc2Addr, fencer, false, false);
fail("Can't failover when access is denied");
} catch (FailoverFailedException ffe) {
assertTrue(ffe.getCause().getMessage().contains("Access denied"));
}
}
@Test @Test
public void testFailoverToUnreadyService() throws Exception { public void testFailoverToUnreadyService() throws Exception {
DummyService svc1 = new DummyService(HAServiceState.ACTIVE); DummyService svc1 = new DummyService(HAServiceState.ACTIVE);

View File

@ -194,3 +194,5 @@ HDFS-2915. HA: TestFailureOfSharedDir.testFailureOfSharedDir() has race conditio
HDFS-2912. Namenode not shutting down when shared edits dir is inaccessible. (Bikas Saha via atm) HDFS-2912. Namenode not shutting down when shared edits dir is inaccessible. (Bikas Saha via atm)
HDFS-2922. HA: close out operation categories. (eli) HDFS-2922. HA: close out operation categories. (eli)
HDFS-2917. HA: haadmin should not work if run by regular user (eli)

View File

@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -900,36 +901,45 @@ public class NameNode {
} }
} }
synchronized void monitorHealth() throws HealthCheckFailedException { synchronized void monitorHealth()
throws HealthCheckFailedException, AccessControlException {
namesystem.checkSuperuserPrivilege();
if (!haEnabled) { if (!haEnabled) {
return; // no-op, if HA is not eanbled return; // no-op, if HA is not enabled
} }
// TODO:HA implement health check // TODO:HA implement health check
return; return;
} }
synchronized void transitionToActive() throws ServiceFailedException { synchronized void transitionToActive()
throws ServiceFailedException, AccessControlException {
namesystem.checkSuperuserPrivilege();
if (!haEnabled) { if (!haEnabled) {
throw new ServiceFailedException("HA for namenode is not enabled"); throw new ServiceFailedException("HA for namenode is not enabled");
} }
state.setState(haContext, ACTIVE_STATE); state.setState(haContext, ACTIVE_STATE);
} }
synchronized void transitionToStandby() throws ServiceFailedException { synchronized void transitionToStandby()
throws ServiceFailedException, AccessControlException {
namesystem.checkSuperuserPrivilege();
if (!haEnabled) { if (!haEnabled) {
throw new ServiceFailedException("HA for namenode is not enabled"); throw new ServiceFailedException("HA for namenode is not enabled");
} }
state.setState(haContext, STANDBY_STATE); state.setState(haContext, STANDBY_STATE);
} }
synchronized HAServiceState getServiceState() { synchronized HAServiceState getServiceState() throws AccessControlException {
namesystem.checkSuperuserPrivilege();
if (state == null) { if (state == null) {
return HAServiceState.INITIALIZING; return HAServiceState.INITIALIZING;
} }
return state.getServiceState(); return state.getServiceState();
} }
synchronized boolean readyToBecomeActive() throws ServiceFailedException { synchronized boolean readyToBecomeActive()
throws ServiceFailedException, AccessControlException {
namesystem.checkSuperuserPrivilege();
if (!haEnabled) { if (!haEnabled) {
throw new ServiceFailedException("HA for namenode is not enabled"); throw new ServiceFailedException("HA for namenode is not enabled");
} }

View File

@ -988,27 +988,32 @@ class NameNodeRpcServer implements NamenodeProtocols {
} }
@Override // HAServiceProtocol @Override // HAServiceProtocol
public synchronized void monitorHealth() throws HealthCheckFailedException { public synchronized void monitorHealth()
throws HealthCheckFailedException, AccessControlException {
nn.monitorHealth(); nn.monitorHealth();
} }
@Override // HAServiceProtocol @Override // HAServiceProtocol
public synchronized void transitionToActive() throws ServiceFailedException { public synchronized void transitionToActive()
throws ServiceFailedException, AccessControlException {
nn.transitionToActive(); nn.transitionToActive();
} }
@Override // HAServiceProtocol @Override // HAServiceProtocol
public synchronized void transitionToStandby() throws ServiceFailedException { public synchronized void transitionToStandby()
throws ServiceFailedException, AccessControlException {
nn.transitionToStandby(); nn.transitionToStandby();
} }
@Override // HAServiceProtocol @Override // HAServiceProtocol
public synchronized HAServiceState getServiceState() { public synchronized HAServiceState getServiceState()
throws AccessControlException {
return nn.getServiceState(); return nn.getServiceState();
} }
@Override // HAServiceProtocol @Override // HAServiceProtocol
public synchronized boolean readyToBecomeActive() throws ServiceFailedException { public synchronized boolean readyToBecomeActive()
throws ServiceFailedException, AccessControlException {
return nn.readyToBecomeActive(); return nn.readyToBecomeActive();
} }