HDFS-5804. Merge change r1563323 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1563332 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
4a943f194c
commit
e5ab642997
|
@ -26,6 +26,7 @@ import java.util.concurrent.ConcurrentMap;
|
|||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -163,8 +164,9 @@ class DFSClientCache {
|
|||
return new CacheLoader<String, DFSClient>() {
|
||||
@Override
|
||||
public DFSClient load(String userName) throws Exception {
|
||||
UserGroupInformation ugi = UserGroupInformation
|
||||
.createRemoteUser(userName);
|
||||
UserGroupInformation ugi = getUserGroupInformation(
|
||||
userName,
|
||||
UserGroupInformation.getCurrentUser());
|
||||
|
||||
// Guava requires CacheLoader never returns null.
|
||||
return ugi.doAs(new PrivilegedExceptionAction<DFSClient>() {
|
||||
|
@ -177,6 +179,28 @@ class DFSClientCache {
|
|||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* This method uses the currentUser, and real user to create a proxy
|
||||
* @param effectiveUser The user who is being proxied by the real user
|
||||
* @param realUser The actual user who does the command
|
||||
* @return Proxy UserGroupInformation
|
||||
* @throws IOException If proxying fails
|
||||
*/
|
||||
UserGroupInformation getUserGroupInformation(
|
||||
String effectiveUser,
|
||||
UserGroupInformation realUser)
|
||||
throws IOException {
|
||||
Preconditions.checkNotNull(effectiveUser);
|
||||
Preconditions.checkNotNull(realUser);
|
||||
UserGroupInformation ugi =
|
||||
UserGroupInformation.createProxyUser(effectiveUser, realUser);
|
||||
if (LOG.isDebugEnabled()){
|
||||
LOG.debug(String.format("Created ugi:" +
|
||||
" %s for username: %s", ugi, effectiveUser));
|
||||
}
|
||||
return ugi;
|
||||
}
|
||||
|
||||
private RemovalListener<String, DFSClient> clientRemovalListener() {
|
||||
return new RemovalListener<String, DFSClient>() {
|
||||
@Override
|
||||
|
|
|
@ -479,9 +479,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
|||
}
|
||||
|
||||
try {
|
||||
// Use superUserClient to get file attr since we don't know whether the
|
||||
// NFS client user has access permission to the file
|
||||
attrs = writeManager.getFileAttr(superUserClient, handle, iug);
|
||||
// HDFS-5804 removed supserUserClient access
|
||||
attrs = writeManager.getFileAttr(dfsClient, handle, iug);
|
||||
|
||||
if (attrs == null) {
|
||||
LOG.error("Can't get path for fileId:" + handle.getFileId());
|
||||
return new ACCESS3Response(Nfs3Status.NFS3ERR_STALE);
|
||||
|
@ -603,8 +603,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
|||
// Only do access check.
|
||||
try {
|
||||
// Don't read from cache. Client may not have read permission.
|
||||
attrs = Nfs3Utils.getFileAttr(superUserClient,
|
||||
Nfs3Utils.getFileIdPath(handle), iug);
|
||||
attrs = Nfs3Utils.getFileAttr(
|
||||
dfsClient,
|
||||
Nfs3Utils.getFileIdPath(handle),
|
||||
iug);
|
||||
} catch (IOException e) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Get error accessing file, fileId:" + handle.getFileId());
|
||||
|
|
|
@ -22,6 +22,7 @@ import static org.junit.Assert.assertTrue;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -40,6 +41,9 @@ import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response;
|
|||
import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response.EntryPlus3;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.SecurityHandler;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -58,9 +62,17 @@ public class TestReaddir {
|
|||
static RpcProgramNfs3 nfsd;
|
||||
static String testdir = "/tmp";
|
||||
static SecurityHandler securityHandler;
|
||||
|
||||
|
||||
@BeforeClass
|
||||
public static void setup() throws Exception {
|
||||
String currentUser = System.getProperty("user.name");
|
||||
config.set(
|
||||
ProxyUsers.getProxySuperuserGroupConfKey(currentUser),
|
||||
"*");
|
||||
config.set(
|
||||
ProxyUsers.getProxySuperuserIpConfKey(currentUser),
|
||||
"*");
|
||||
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
|
||||
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
|
||||
cluster.waitActive();
|
||||
hdfs = cluster.getFileSystem();
|
||||
|
|
|
@ -20,12 +20,15 @@ package org.apache.hadoop.hdfs.nfs.nfs3;
|
|||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.hamcrest.core.Is.is;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.hdfs.DFSClient;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestDFSClientCache {
|
||||
|
@ -49,6 +52,28 @@ public class TestDFSClientCache {
|
|||
assertEquals(MAX_CACHE_SIZE - 1, cache.clientCache.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetUserGroupInformation() throws IOException {
|
||||
String userName = "user1";
|
||||
String currentUser = "currentUser";
|
||||
|
||||
UserGroupInformation currentUserUgi = UserGroupInformation
|
||||
.createUserForTesting(currentUser, new String[0]);
|
||||
currentUserUgi.setAuthenticationMethod(
|
||||
UserGroupInformation.AuthenticationMethod.KERBEROS);
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost");
|
||||
DFSClientCache cache = new DFSClientCache(conf);
|
||||
UserGroupInformation ugiResult
|
||||
= cache.getUserGroupInformation(userName, currentUserUgi);
|
||||
|
||||
assertThat(ugiResult.getUserName(), is(userName));
|
||||
assertThat(ugiResult.getRealUser(), is(currentUserUgi));
|
||||
assertThat(
|
||||
ugiResult.getAuthenticationMethod(),
|
||||
is(UserGroupInformation.AuthenticationMethod.PROXY));
|
||||
}
|
||||
|
||||
private static boolean isDfsClientClose(DFSClient c) {
|
||||
try {
|
||||
c.exists("");
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.nfs.nfs3.response.CREATE3Response;
|
|||
import org.apache.hadoop.nfs.nfs3.response.READ3Response;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.SecurityHandler;
|
||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
@ -285,6 +286,14 @@ public class TestWrites {
|
|||
SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
|
||||
Mockito.when(securityHandler.getUser()).thenReturn(
|
||||
System.getProperty("user.name"));
|
||||
String currentUser = System.getProperty("user.name");
|
||||
config.set(
|
||||
ProxyUsers.getProxySuperuserGroupConfKey(currentUser),
|
||||
"*");
|
||||
config.set(
|
||||
ProxyUsers.getProxySuperuserIpConfKey(currentUser),
|
||||
"*");
|
||||
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
|
||||
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
|
||||
|
|
|
@ -14,6 +14,9 @@ Release 2.4.0 - UNRELEASED
|
|||
HDFS-5153. Datanode should send block reports for each storage in a
|
||||
separate message. (Arpit Agarwal)
|
||||
|
||||
HDFS-5804. HDFS NFS Gateway fails to mount and proxy when using Kerberos.
|
||||
(Abin Shahab via jing9)
|
||||
|
||||
HDFS-5859. DataNode#checkBlockToken should check block tokens even if
|
||||
security is not enabled. (cmccabe)
|
||||
|
||||
|
|
Loading…
Reference in New Issue