HDFS-5489. Use TokenAspect in WebHDFSFileSystem. Contributed by Haohui Mai.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1542158 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2013-11-15 02:11:38 +00:00
parent 6141d2ed27
commit 620890fcc0
6 changed files with 157 additions and 310 deletions

View File

@ -494,6 +494,8 @@ Release 2.3.0 - UNRELEASED
HDFS-5506. Use URLConnectionFactory in DelegationTokenFetcher. (Haohui Mai HDFS-5506. Use URLConnectionFactory in DelegationTokenFetcher. (Haohui Mai
via jing9) via jing9)
HDFS-5489. Use TokenAspect in WebHDFSFileSystem. (Haohui Mai via jing9)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn) HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)

View File

@ -144,6 +144,10 @@ final class TokenAspect<T extends FileSystem & Renewable> {
} }
} }
public synchronized void reset() {
hasInitedToken = false;
}
synchronized void initDelegationToken(UserGroupInformation ugi) { synchronized void initDelegationToken(UserGroupInformation ugi) {
Token<?> token = selectDelegationToken(ugi); Token<?> token = selectDelegationToken(ugi);
if (token != null) { if (token != null) {

View File

@ -118,38 +118,11 @@ public class WebHdfsFileSystem extends FileSystem
/** Delegation token kind */ /** Delegation token kind */
public static final Text TOKEN_KIND = new Text("WEBHDFS delegation"); public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
/** Token selector */ protected TokenAspect<WebHdfsFileSystem> tokenAspect = new TokenAspect<WebHdfsFileSystem>(
public static final DTSelecorByKind DT_SELECTOR this, TOKEN_KIND);
= new DTSelecorByKind(TOKEN_KIND);
private DelegationTokenRenewer dtRenewer = null;
@VisibleForTesting
DelegationTokenRenewer.RenewAction<?> action;
@Override
public URI getCanonicalUri() {
return super.getCanonicalUri();
}
@VisibleForTesting
protected synchronized void addRenewAction(final WebHdfsFileSystem webhdfs) {
if (dtRenewer == null) {
dtRenewer = DelegationTokenRenewer.getInstance();
}
action = dtRenewer.addRenewAction(webhdfs);
}
/** Is WebHDFS enabled in conf? */
public static boolean isEnabled(final Configuration conf, final Log log) {
final boolean b = conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT);
return b;
}
private UserGroupInformation ugi; private UserGroupInformation ugi;
private URI uri; private URI uri;
private boolean hasInitedToken;
private Token<?> delegationToken; private Token<?> delegationToken;
private RetryPolicy retryPolicy = null; private RetryPolicy retryPolicy = null;
private Path workingDir; private Path workingDir;
@ -212,41 +185,27 @@ public class WebHdfsFileSystem extends FileSystem
this.workingDir = getHomeDirectory(); this.workingDir = getHomeDirectory();
if (UserGroupInformation.isSecurityEnabled()) { if (UserGroupInformation.isSecurityEnabled()) {
initDelegationToken(); tokenAspect.initDelegationToken(ugi);
} }
} }
protected void initDelegationToken() throws IOException { @Override
// look for webhdfs token, then try hdfs public URI getCanonicalUri() {
Token<?> token = selectDelegationToken(ugi); return super.getCanonicalUri();
if (token != null) {
LOG.debug("Found existing DT for " + token.getService());
setDelegationToken(token);
hasInitedToken = true;
} }
/** Is WebHDFS enabled in conf? */
public static boolean isEnabled(final Configuration conf, final Log log) {
final boolean b = conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT);
return b;
} }
protected synchronized Token<?> getDelegationToken() throws IOException { protected synchronized Token<?> getDelegationToken() throws IOException {
// we haven't inited yet, or we used to have a token but it expired tokenAspect.ensureTokenInitialized();
if (!hasInitedToken || (action != null && !action.isValid())) {
//since we don't already have a token, go get one
Token<?> token = getDelegationToken(null);
// security might be disabled
if (token != null) {
setDelegationToken(token);
addRenewAction(this);
LOG.debug("Created new DT for " + token.getService());
}
hasInitedToken = true;
}
return delegationToken; return delegationToken;
} }
protected Token<DelegationTokenIdentifier> selectDelegationToken(
UserGroupInformation ugi) {
return DT_SELECTOR.selectToken(getCanonicalUri(), ugi.getTokens(), getConf());
}
@Override @Override
protected int getDefaultPort() { protected int getDefaultPort() {
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
@ -370,7 +329,7 @@ public class WebHdfsFileSystem extends FileSystem
private synchronized void resetStateToFailOver() { private synchronized void resetStateToFailOver() {
currentNNAddrIndex = (currentNNAddrIndex + 1) % nnAddrs.length; currentNNAddrIndex = (currentNNAddrIndex + 1) % nnAddrs.length;
delegationToken = null; delegationToken = null;
hasInitedToken = false; tokenAspect.reset();
} }
/** /**
@ -881,9 +840,7 @@ public class WebHdfsFileSystem extends FileSystem
@Override @Override
public void close() throws IOException { public void close() throws IOException {
super.close(); super.close();
if (dtRenewer != null) { tokenAspect.removeRenewAction();
dtRenewer.removeRenewAction(this); // blocks
}
} }
class OffsetUrlOpener extends ByteRangeInputStream.URLOpener { class OffsetUrlOpener extends ByteRangeInputStream.URLOpener {

View File

@ -19,13 +19,19 @@
package org.apache.hadoop.hdfs.web; package org.apache.hadoop.hdfs.web;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertNull; import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never; import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy; import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verify;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
@ -35,6 +41,7 @@ import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DelegationTokenRenewer; import org.apache.hadoop.fs.DelegationTokenRenewer;
import org.apache.hadoop.fs.DelegationTokenRenewer.RenewAction;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
@ -163,15 +170,44 @@ public class TestTokenAspect {
} }
} }
private static RenewAction<?> getActionFromTokenAspect(
TokenAspect<DummyFs> tokenAspect) {
return (RenewAction<?>) Whitebox.getInternalState(tokenAspect, "action");
}
@Test @Test
public void testGetRemoteToken() throws IOException, URISyntaxException { public void testCachedInitialization() throws IOException, URISyntaxException {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
UserGroupInformation.setConfiguration(conf);
DummyFs fs = spy(new DummyFs()); DummyFs fs = spy(new DummyFs());
Token<TokenIdentifier> token = new Token<TokenIdentifier>(new byte[0], Token<TokenIdentifier> token = new Token<TokenIdentifier>(new byte[0],
new byte[0], DummyFs.TOKEN_KIND, new Text("127.0.0.1:1234")); new byte[0], DummyFs.TOKEN_KIND, new Text("127.0.0.1:1234"));
doReturn(token).when(fs).getDelegationToken(anyString()); doReturn(token).when(fs).getDelegationToken(anyString());
doReturn(token).when(fs).getRenewToken();
fs.emulateSecurityEnabled = true;
fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf);
fs.tokenAspect.ensureTokenInitialized();
verify(fs, times(1)).getDelegationToken(null);
verify(fs, times(1)).setDelegationToken(token);
// For the second iteration, the token should be cached.
fs.tokenAspect.ensureTokenInitialized();
verify(fs, times(1)).getDelegationToken(null);
verify(fs, times(1)).setDelegationToken(token);
}
@Test
public void testGetRemoteToken() throws IOException, URISyntaxException {
Configuration conf = new Configuration();
DummyFs fs = spy(new DummyFs());
Token<TokenIdentifier> token = new Token<TokenIdentifier>(new byte[0],
new byte[0], DummyFs.TOKEN_KIND, new Text("127.0.0.1:1234"));
doReturn(token).when(fs).getDelegationToken(anyString());
doReturn(token).when(fs).getRenewToken();
fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf); fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf);
fs.tokenAspect.ensureTokenInitialized(); fs.tokenAspect.ensureTokenInitialized();
@ -186,7 +222,6 @@ public class TestTokenAspect {
public void testGetRemoteTokenFailure() throws IOException, public void testGetRemoteTokenFailure() throws IOException,
URISyntaxException { URISyntaxException {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
UserGroupInformation.setConfiguration(conf);
DummyFs fs = spy(new DummyFs()); DummyFs fs = spy(new DummyFs());
IOException e = new IOException(); IOException e = new IOException();
doThrow(e).when(fs).getDelegationToken(anyString()); doThrow(e).when(fs).getDelegationToken(anyString());
@ -203,7 +238,6 @@ public class TestTokenAspect {
@Test @Test
public void testInitWithNoTokens() throws IOException, URISyntaxException { public void testInitWithNoTokens() throws IOException, URISyntaxException {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
UserGroupInformation.setConfiguration(conf);
DummyFs fs = spy(new DummyFs()); DummyFs fs = spy(new DummyFs());
doReturn(null).when(fs).getDelegationToken(anyString()); doReturn(null).when(fs).getDelegationToken(anyString());
fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf); fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf);
@ -218,7 +252,6 @@ public class TestTokenAspect {
@Test @Test
public void testInitWithUGIToken() throws IOException, URISyntaxException { public void testInitWithUGIToken() throws IOException, URISyntaxException {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
UserGroupInformation.setConfiguration(conf);
DummyFs fs = spy(new DummyFs()); DummyFs fs = spy(new DummyFs());
doReturn(null).when(fs).getDelegationToken(anyString()); doReturn(null).when(fs).getDelegationToken(anyString());
@ -241,6 +274,51 @@ public class TestTokenAspect {
assertNull(Whitebox.getInternalState(fs.tokenAspect, "action")); assertNull(Whitebox.getInternalState(fs.tokenAspect, "action"));
} }
@Test
public void testRenewal() throws Exception {
Configuration conf = new Configuration();
Token<?> token1 = mock(Token.class);
Token<?> token2 = mock(Token.class);
final long renewCycle = 100;
DelegationTokenRenewer.renewCycle = renewCycle;
UserGroupInformation ugi = UserGroupInformation.createUserForTesting("foo",
new String[] { "bar" });
DummyFs fs = spy(new DummyFs());
doReturn(token1).doReturn(token2).when(fs).getDelegationToken(null);
doReturn(token1).when(fs).getRenewToken();
// cause token renewer to abandon the token
doThrow(new IOException("renew failed")).when(token1).renew(conf);
doThrow(new IOException("get failed")).when(fs).addDelegationTokens(null,
null);
TokenAspect<DummyFs> tokenAspect = new TokenAspect<DummyFs>(fs,
DummyFs.TOKEN_KIND);
fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf);
tokenAspect.initDelegationToken(ugi);
// trigger token acquisition
tokenAspect.ensureTokenInitialized();
DelegationTokenRenewer.RenewAction<?> action = getActionFromTokenAspect(tokenAspect);
verify(fs).setDelegationToken(token1);
assertTrue(action.isValid());
// upon renewal, token will go bad based on above stubbing
Thread.sleep(renewCycle * 2);
assertSame(action, getActionFromTokenAspect(tokenAspect));
assertFalse(action.isValid());
// now that token is invalid, should get a new one
tokenAspect.ensureTokenInitialized();
verify(fs, times(2)).getDelegationToken(anyString());
verify(fs).setDelegationToken(token2);
assertNotSame(action, getActionFromTokenAspect(tokenAspect));
action = getActionFromTokenAspect(tokenAspect);
assertTrue(action.isValid());
}
@Test @Test
public void testTokenSelectionPreferences() throws IOException, public void testTokenSelectionPreferences() throws IOException,
URISyntaxException { URISyntaxException {
@ -252,7 +330,6 @@ public class TestTokenAspect {
DummyFs.TOKEN_KIND); DummyFs.TOKEN_KIND);
UserGroupInformation ugi = UserGroupInformation.createUserForTesting("foo", UserGroupInformation ugi = UserGroupInformation.createUserForTesting("foo",
new String[] { "bar" }); new String[] { "bar" });
UserGroupInformation.setConfiguration(conf);
// use ip-based tokens // use ip-based tokens
SecurityUtilTestHelper.setTokenServiceUseIp(true); SecurityUtilTestHelper.setTokenServiceUseIp(true);

View File

@ -19,16 +19,20 @@
package org.apache.hadoop.hdfs.web; package org.apache.hadoop.hdfs.web;
import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
import static org.junit.Assert.*; import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.*; import static org.junit.Assert.assertFalse;
import static org.mockito.Mockito.*; import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DelegationTokenRenewer;
import org.apache.hadoop.fs.DelegationTokenRenewer.RenewAction;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.web.resources.DeleteOpParam; import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
import org.apache.hadoop.hdfs.web.resources.GetOpParam; import org.apache.hadoop.hdfs.web.resources.GetOpParam;
@ -40,96 +44,38 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
public class TestWebHdfsTokens { public class TestWebHdfsTokens {
static Configuration conf; private static Configuration conf;
static UserGroupInformation ugi;
@BeforeClass @BeforeClass
public static void setup() throws IOException { public static void setUp() {
conf = new Configuration(); conf = new Configuration();
SecurityUtil.setAuthenticationMethod(KERBEROS, conf); SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
UserGroupInformation.setConfiguration(conf); UserGroupInformation.setConfiguration(conf);
ugi = UserGroupInformation.getCurrentUser();
} }
@SuppressWarnings("unchecked") private WebHdfsFileSystem spyWebhdfsInSecureSetup() throws IOException {
@Test(timeout=1000) WebHdfsFileSystem fsOrig = new WebHdfsFileSystem();
public void testInitWithNoToken() throws IOException { fsOrig.initialize(URI.create("webhdfs://127.0.0.1:0"), conf);
WebHdfsFileSystem fs = spy(new WebHdfsFileSystem()); WebHdfsFileSystem fs = spy(fsOrig);
doReturn(null).when(fs).getDelegationToken(anyString()); Whitebox.setInternalState(fsOrig.tokenAspect, "fs", fs);
doNothing().when(fs).addRenewAction(any(WebHdfsFileSystem.class)); return fs;
fs.initialize(URI.create("webhdfs://127.0.0.1:0"), conf);
// when not in ugi, don't get one
verify(fs).initDelegationToken();
verify(fs).selectDelegationToken(ugi);
verify(fs, never()).setDelegationToken(any(Token.class));
verify(fs, never()).getDelegationToken();
verify(fs, never()).getDelegationToken(anyString());
}
@SuppressWarnings("unchecked")
@Test(timeout=1000)
public void testInitWithUGIToken() throws IOException {
WebHdfsFileSystem fs = spy(new WebHdfsFileSystem());
Token<DelegationTokenIdentifier> token = mock(Token.class);
doReturn(token).when(fs).selectDelegationToken(ugi);
doReturn(null).when(fs).getDelegationToken(anyString());
doNothing().when(fs).addRenewAction(any(WebHdfsFileSystem.class));
fs.initialize(URI.create("webhdfs://127.0.0.1:0"), conf);
// when in the ugi, store it but don't renew it
verify(fs).initDelegationToken();
verify(fs).selectDelegationToken(ugi);
verify(fs).setDelegationToken(token);
verify(fs, never()).getDelegationToken();
verify(fs, never()).getDelegationToken(anyString());
verify(fs, never()).addRenewAction(fs);
}
@SuppressWarnings("unchecked")
@Test(timeout=1000)
public void testInternalGetDelegationToken() throws IOException {
WebHdfsFileSystem fs = spy(new WebHdfsFileSystem());
Token<DelegationTokenIdentifier> token = mock(Token.class);
doReturn(null).when(fs).selectDelegationToken(ugi);
doReturn(token).when(fs).getDelegationToken(anyString());
doNothing().when(fs).addRenewAction(any(WebHdfsFileSystem.class));
fs.initialize(URI.create("webhdfs://127.0.0.1:0"), conf);
// get token, store it, and renew it
Token<?> token2 = fs.getDelegationToken();
assertEquals(token2, token);
verify(fs).getDelegationToken(null);
verify(fs).setDelegationToken(token);
verify(fs).addRenewAction(fs);
reset(fs);
// just return token, don't get/set/renew
token2 = fs.getDelegationToken();
assertEquals(token2, token);
verify(fs, never()).getDelegationToken(null);
verify(fs, never()).setDelegationToken(any(Token.class));
verify(fs, never()).addRenewAction(fs);
} }
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
@Test(timeout = 1000) @Test(timeout = 1000)
public void testTokenForNonTokenOp() throws IOException { public void testTokenForNonTokenOp() throws IOException {
WebHdfsFileSystem fs = spy(new WebHdfsFileSystem()); WebHdfsFileSystem fs = spyWebhdfsInSecureSetup();
Token<DelegationTokenIdentifier> token = mock(Token.class); Token<DelegationTokenIdentifier> token = mock(Token.class);
doReturn(null).when(fs).selectDelegationToken(ugi);
doReturn(token).when(fs).getDelegationToken(null); doReturn(token).when(fs).getDelegationToken(null);
doNothing().when(fs).addRenewAction(any(WebHdfsFileSystem.class));
fs.initialize(URI.create("webhdfs://127.0.0.1:0"), conf);
// should get/set/renew token // should get/set/renew token
fs.toUrl(GetOpParam.Op.OPEN, null); fs.toUrl(GetOpParam.Op.OPEN, null);
verify(fs).getDelegationToken(); verify(fs).getDelegationToken();
verify(fs).getDelegationToken(null); verify(fs).getDelegationToken(null);
verify(fs).setDelegationToken(token); verify(fs).setDelegationToken(token);
verify(fs).addRenewAction(fs);
reset(fs); reset(fs);
// should return prior token // should return prior token
@ -137,7 +83,6 @@ public class TestWebHdfsTokens {
verify(fs).getDelegationToken(); verify(fs).getDelegationToken();
verify(fs, never()).getDelegationToken(null); verify(fs, never()).getDelegationToken(null);
verify(fs, never()).setDelegationToken(token); verify(fs, never()).setDelegationToken(token);
verify(fs, never()).addRenewAction(fs);
} }
@Test(timeout = 1000) @Test(timeout = 1000)
@ -157,10 +102,8 @@ public class TestWebHdfsTokens {
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
private void checkNoTokenForOperation(HttpOpParam.Op op) throws IOException { private void checkNoTokenForOperation(HttpOpParam.Op op) throws IOException {
WebHdfsFileSystem fs = spy(new WebHdfsFileSystem()); WebHdfsFileSystem fs = spyWebhdfsInSecureSetup();
doReturn(null).when(fs).selectDelegationToken(ugi);
doReturn(null).when(fs).getDelegationToken(null); doReturn(null).when(fs).getDelegationToken(null);
doNothing().when(fs).addRenewAction(any(WebHdfsFileSystem.class));
fs.initialize(URI.create("webhdfs://127.0.0.1:0"), conf); fs.initialize(URI.create("webhdfs://127.0.0.1:0"), conf);
// do not get a token! // do not get a token!
@ -168,7 +111,6 @@ public class TestWebHdfsTokens {
verify(fs, never()).getDelegationToken(); verify(fs, never()).getDelegationToken();
verify(fs, never()).getDelegationToken(null); verify(fs, never()).getDelegationToken(null);
verify(fs, never()).setDelegationToken(any(Token.class)); verify(fs, never()).setDelegationToken(any(Token.class));
verify(fs, never()).addRenewAction(fs);
} }
@Test(timeout = 1000) @Test(timeout = 1000)
@ -182,8 +124,7 @@ public class TestWebHdfsTokens {
@Test(timeout = 1000) @Test(timeout = 1000)
public void testPutOpRequireAuth() { public void testPutOpRequireAuth() {
for (HttpOpParam.Op op : PutOpParam.Op.values()) { for (HttpOpParam.Op op : PutOpParam.Op.values()) {
boolean expect = (op == PutOpParam.Op.RENEWDELEGATIONTOKEN || boolean expect = (op == PutOpParam.Op.RENEWDELEGATIONTOKEN || op == PutOpParam.Op.CANCELDELEGATIONTOKEN);
op == PutOpParam.Op.CANCELDELEGATIONTOKEN);
assertEquals(expect, op.getRequireAuth()); assertEquals(expect, op.getRequireAuth());
} }
} }
@ -201,50 +142,4 @@ public class TestWebHdfsTokens {
assertFalse(op.getRequireAuth()); assertFalse(op.getRequireAuth());
} }
} }
@Test
public void testGetTokenAfterFailure() throws Exception {
Configuration conf = mock(Configuration.class);
Token<?> token1 = mock(Token.class);
Token<?> token2 = mock(Token.class);
long renewCycle = 1000;
DelegationTokenRenewer.renewCycle = renewCycle;
WebHdfsFileSystem fs = spy(new WebHdfsFileSystem());
doReturn(conf).when(fs).getConf();
doReturn(token1).doReturn(token2).when(fs).getDelegationToken(null);
// cause token renewer to abandon the token
doThrow(new IOException("renew failed")).when(token1).renew(conf);
doThrow(new IOException("get failed")).when(fs).addDelegationTokens(null, null);
// trigger token acquisition
Token<?> token = fs.getDelegationToken();
RenewAction<?> action = fs.action;
assertSame(token1, token);
assertTrue(action.isValid());
// fetch again and make sure it's the same as before
token = fs.getDelegationToken();
assertSame(token1, token);
assertSame(action, fs.action);
assertTrue(fs.action.isValid());
// upon renewal, token will go bad based on above stubbing
Thread.sleep(renewCycle);
assertSame(action, fs.action);
assertFalse(fs.action.isValid());
// now that token is invalid, should get a new one
token = fs.getDelegationToken();
assertSame(token2, token);
assertNotSame(action, fs.action);
assertTrue(fs.action.isValid());
action = fs.action;
// should get same one again
token = fs.getDelegationToken();
assertSame(token2, token);
assertSame(action, fs.action);
assertTrue(fs.action.isValid());
}
} }

View File

@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.web;
import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mock;
import java.io.IOException; import java.io.IOException;
@ -36,15 +34,20 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.web.resources.*; import org.apache.hadoop.hdfs.web.resources.DelegationParam;
import org.apache.hadoop.hdfs.web.resources.DoAsParam;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.SecurityUtilTestHelper;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier; import org.junit.Assert;
import org.junit.*; import org.junit.Before;
import org.junit.Test;
public class TestWebHdfsUrl { public class TestWebHdfsUrl {
// NOTE: port is never used // NOTE: port is never used
@ -306,95 +309,4 @@ public class TestWebHdfsUrl {
} }
return (WebHdfsFileSystem) FileSystem.get(uri, conf); return (WebHdfsFileSystem) FileSystem.get(uri, conf);
} }
@Test(timeout=60000)
public void testSelectHdfsDelegationToken() throws Exception {
SecurityUtilTestHelper.setTokenServiceUseIp(true);
Configuration conf = new Configuration();
conf.setClass("fs.webhdfs.impl", MyWebHdfsFileSystem.class, FileSystem.class);
// test with implicit default port
URI fsUri = URI.create("webhdfs://localhost");
MyWebHdfsFileSystem fs = (MyWebHdfsFileSystem) FileSystem.get(fsUri, conf);
checkTokenSelection(fs, conf);
// test with explicit default port
fsUri = URI.create("webhdfs://localhost:"+fs.getDefaultPort());
fs = (MyWebHdfsFileSystem) FileSystem.get(fsUri, conf);
checkTokenSelection(fs, conf);
// test with non-default port
fsUri = URI.create("webhdfs://localhost:"+(fs.getDefaultPort()-1));
fs = (MyWebHdfsFileSystem) FileSystem.get(fsUri, conf);
checkTokenSelection(fs, conf);
}
private void checkTokenSelection(MyWebHdfsFileSystem fs,
Configuration conf) throws IOException {
int port = fs.getCanonicalUri().getPort();
// can't clear tokens from ugi, so create a new user everytime
UserGroupInformation ugi =
UserGroupInformation.createUserForTesting(fs.getUri().getAuthority(), new String[]{});
// use ip-based tokens
SecurityUtilTestHelper.setTokenServiceUseIp(true);
// test fallback to hdfs token
Token<?> hdfsToken = new Token<TokenIdentifier>(
new byte[0], new byte[0],
DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
new Text("127.0.0.1:8020"));
ugi.addToken(hdfsToken);
// test fallback to hdfs token
Token<?> token = fs.selectDelegationToken(ugi);
assertNotNull(token);
assertEquals(hdfsToken, token);
// test webhdfs is favored over hdfs
Token<?> webHdfsToken = new Token<TokenIdentifier>(
new byte[0], new byte[0],
WebHdfsFileSystem.TOKEN_KIND, new Text("127.0.0.1:"+port));
ugi.addToken(webHdfsToken);
token = fs.selectDelegationToken(ugi);
assertNotNull(token);
assertEquals(webHdfsToken, token);
// switch to using host-based tokens, no token should match
SecurityUtilTestHelper.setTokenServiceUseIp(false);
token = fs.selectDelegationToken(ugi);
assertNull(token);
// test fallback to hdfs token
hdfsToken = new Token<TokenIdentifier>(
new byte[0], new byte[0],
DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
new Text("localhost:8020"));
ugi.addToken(hdfsToken);
token = fs.selectDelegationToken(ugi);
assertNotNull(token);
assertEquals(hdfsToken, token);
// test webhdfs is favored over hdfs
webHdfsToken = new Token<TokenIdentifier>(
new byte[0], new byte[0],
WebHdfsFileSystem.TOKEN_KIND, new Text("localhost:"+port));
ugi.addToken(webHdfsToken);
token = fs.selectDelegationToken(ugi);
assertNotNull(token);
assertEquals(webHdfsToken, token);
}
static class MyWebHdfsFileSystem extends WebHdfsFileSystem {
@Override
public URI getCanonicalUri() {
return super.getCanonicalUri();
}
@Override
public int getDefaultPort() {
return super.getDefaultPort();
}
}
} }