HDFS-3255. HA DFS returns wrong token service. Contributed by Daryn Sharp.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1325415 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2012-04-12 18:08:16 +00:00
parent cb7907aef2
commit cca655f4ab
3 changed files with 16 additions and 6 deletions

View File

@ -381,6 +381,8 @@ Release 2.0.0 - UNRELEASED
HDFS-3260. TestDatanodeRegistration should set minimum DN version in HDFS-3260. TestDatanodeRegistration should set minimum DN version in
addition to minimum NN version. (atm) addition to minimum NN version. (atm)
HDFS-3255. HA DFS returns wrong token service (Daryn Sharp via todd)
BREAKDOWN OF HDFS-1623 SUBTASKS BREAKDOWN OF HDFS-1623 SUBTASKS
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

View File

@ -848,8 +848,9 @@ public class DistributedFileSystem extends FileSystem {
*/ */
@Override @Override
public String getCanonicalServiceName() { public String getCanonicalServiceName() {
if (HAUtil.isLogicalUri(getConf(), getUri())) { URI uri = getUri();
return getUri().getHost(); if (HAUtil.isLogicalUri(getConf(), uri)) {
return HAUtil.buildTokenServiceForLogicalUri(uri).toString();
} else { } else {
return super.getCanonicalServiceName(); return super.getCanonicalServiceName();
} }

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenIdentifier;
@ -196,8 +197,7 @@ public class TestDelegationTokensWithHA {
// check that the token selected for one of the physical IPC addresses // check that the token selected for one of the physical IPC addresses
// matches the one we received // matches the one we received
InetSocketAddress addr = nn0.getNameNodeAddress(); InetSocketAddress addr = nn0.getNameNodeAddress();
Text ipcDtService = new Text( Text ipcDtService = SecurityUtil.buildTokenService(addr);
addr.getAddress().getHostAddress() + ":" + addr.getPort());
Token<DelegationTokenIdentifier> token2 = Token<DelegationTokenIdentifier> token2 =
DelegationTokenSelector.selectHdfsDelegationToken(ipcDtService, ugi); DelegationTokenSelector.selectHdfsDelegationToken(ipcDtService, ugi);
assertNotNull(token2); assertNotNull(token2);
@ -212,8 +212,15 @@ public class TestDelegationTokensWithHA {
*/ */
@Test @Test
public void testDFSGetCanonicalServiceName() throws Exception { public void testDFSGetCanonicalServiceName() throws Exception {
assertEquals(fs.getCanonicalServiceName(), URI hAUri = HATestUtil.getLogicalUri(cluster);
HATestUtil.getLogicalUri(cluster).getHost()); String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri).toString();
assertEquals(haService, dfs.getCanonicalServiceName());
Token<?> token = dfs.getDelegationToken(
UserGroupInformation.getCurrentUser().getShortUserName());
assertEquals(haService, token.getService().toString());
// make sure the logical uri is handled correctly
token.renew(dfs.getConf());
token.cancel(dfs.getConf());
} }
enum TokenTestAction { enum TokenTestAction {