HDFS-6218. Audit log should use true client IP for proxied webhdfs operations. Contributed by Daryn Sharp.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1590640 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Kihwal Lee 2014-04-28 14:00:32 +00:00
parent 295b58bb99
commit 02d0f0ba54
4 changed files with 87 additions and 12 deletions

View File

@ -409,6 +409,9 @@ Release 2.5.0 - UNRELEASED
HDFS-6270. Secondary namenode status page shows transaction count in bytes.
(Benoy Antony via wheat9)
HDFS-6218. Audit log should use true client IP for proxied webhdfs
operations. (daryn via kihwal)
Release 2.4.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -690,7 +690,7 @@ public class JspHelper {
// honor the X-Forwarded-For header set by a configured set of trusted
// proxy servers. allows audit logging and proxy user checks to work
// via an http proxy
static String getRemoteAddr(HttpServletRequest request) {
public static String getRemoteAddr(HttpServletRequest request) {
String remoteAddr = request.getRemoteAddr();
String proxyHeader = request.getHeader("X-Forwarded-For");
if (proxyHeader != null && ProxyUsers.isProxyServer(remoteAddr)) {

View File

@ -162,8 +162,16 @@ public class NamenodeWebHdfsMethods {
//clear content type
response.setContentType(null);
// set the remote address, if coming in via a trust proxy server then
// the address with be that of the proxied client
REMOTE_ADDRESS.set(JspHelper.getRemoteAddr(request));
}
private void reset() {
REMOTE_ADDRESS.set(null);
}
private static NamenodeProtocols getRPCServer(NameNode namenode)
throws IOException {
final NamenodeProtocols np = namenode.getRpcServer();
@ -394,7 +402,6 @@ public class NamenodeWebHdfsMethods {
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException, URISyntaxException {
REMOTE_ADDRESS.set(request.getRemoteAddr());
try {
return put(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, destination, owner, group,
@ -402,7 +409,7 @@ public class NamenodeWebHdfsMethods {
modificationTime, accessTime, renameOptions, createParent,
delegationTokenArgument,aclPermission);
} finally {
REMOTE_ADDRESS.set(null);
reset();
}
}
});
@ -583,12 +590,11 @@ public class NamenodeWebHdfsMethods {
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException, URISyntaxException {
REMOTE_ADDRESS.set(request.getRemoteAddr());
try {
return post(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, concatSrcs, bufferSize);
} finally {
REMOTE_ADDRESS.set(null);
reset();
}
}
});
@ -681,12 +687,11 @@ public class NamenodeWebHdfsMethods {
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException, URISyntaxException {
REMOTE_ADDRESS.set(request.getRemoteAddr());
try {
return get(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, offset, length, renewer, bufferSize);
} finally {
REMOTE_ADDRESS.set(null);
reset();
}
}
});
@ -889,12 +894,11 @@ public class NamenodeWebHdfsMethods {
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException {
REMOTE_ADDRESS.set(request.getRemoteAddr());
try {
return delete(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, recursive);
} finally {
REMOTE_ADDRESS.set(null);
reset();
}
}
});

View File

@ -24,7 +24,10 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetAddress;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
@ -33,9 +36,11 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.junit.Before;
import org.junit.Test;
/**
@ -45,6 +50,16 @@ public class TestAuditLogger {
private static final short TEST_PERMISSION = (short) 0654;
@Before
public void setup() {
DummyAuditLogger.initialized = false;
DummyAuditLogger.logCount = 0;
DummyAuditLogger.remoteAddr = null;
Configuration conf = new HdfsConfiguration();
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
/**
* Tests that AuditLogger works as expected.
*/
@ -69,6 +84,57 @@ public class TestAuditLogger {
}
}
@Test
public void testWebHdfsAuditLogger() throws IOException, URISyntaxException {
Configuration conf = new HdfsConfiguration();
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
DummyAuditLogger.class.getName());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
GetOpParam.Op op = GetOpParam.Op.GETFILESTATUS;
try {
cluster.waitClusterUp();
assertTrue(DummyAuditLogger.initialized);
URI uri = new URI(
"http",
NetUtils.getHostPortString(cluster.getNameNode().getHttpAddress()),
"/webhdfs/v1/", op.toQueryString(), null);
// non-proxy request
HttpURLConnection conn = (HttpURLConnection) uri.toURL().openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
assertEquals(200, conn.getResponseCode());
conn.disconnect();
assertEquals(1, DummyAuditLogger.logCount);
assertEquals("127.0.0.1", DummyAuditLogger.remoteAddr);
// non-trusted proxied request
conn = (HttpURLConnection) uri.toURL().openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setRequestProperty("X-Forwarded-For", "1.1.1.1");
conn.connect();
assertEquals(200, conn.getResponseCode());
conn.disconnect();
assertEquals(2, DummyAuditLogger.logCount);
assertEquals("127.0.0.1", DummyAuditLogger.remoteAddr);
// trusted proxied request
conf.set(ProxyUsers.CONF_HADOOP_PROXYSERVERS, "127.0.0.1");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
conn = (HttpURLConnection) uri.toURL().openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setRequestProperty("X-Forwarded-For", "1.1.1.1");
conn.connect();
assertEquals(200, conn.getResponseCode());
conn.disconnect();
assertEquals(3, DummyAuditLogger.logCount);
assertEquals("1.1.1.1", DummyAuditLogger.remoteAddr);
} finally {
cluster.shutdown();
}
}
/**
* Minor test related to HADOOP-9155. Verify that during a
* FileSystem.setPermission() operation, the stat passed in during the
@ -128,7 +194,8 @@ public class TestAuditLogger {
static boolean initialized;
static int logCount;
static short foundPermission;
static String remoteAddr;
public void initialize(Configuration conf) {
initialized = true;
}
@ -140,6 +207,7 @@ public class TestAuditLogger {
public void logAuditEvent(boolean succeeded, String userName,
InetAddress addr, String cmd, String src, String dst,
FileStatus stat) {
remoteAddr = addr.getHostAddress();
logCount++;
if (stat != null) {
foundPermission = stat.getPermission().toShort();