From 0f770f08972b0f8683cb5c942d804458ae5e09d4 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Mon, 28 Apr 2014 14:01:39 +0000 Subject: [PATCH] svn merge -c 1590640 merging from trunk to branch-2 to fix:HDFS-6218. Audit log should use true client IP for proxied webhdfs operations. Contributed by Daryn Sharp. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1590641 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hadoop/hdfs/server/common/JspHelper.java | 2 +- .../web/resources/NamenodeWebHdfsMethods.java | 20 +++-- .../hdfs/server/namenode/TestAuditLogger.java | 74 ++++++++++++++++++- 4 files changed, 87 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3a7fb694ab5..664b91860a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -157,6 +157,9 @@ Release 2.5.0 - UNRELEASED HDFS-6270. Secondary namenode status page shows transaction count in bytes. (Benoy Antony via wheat9) + HDFS-6218. Audit log should use true client IP for proxied webhdfs + operations. (daryn via kihwal) + Release 2.4.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java index 9c7554e383f..cc12ad7b370 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -669,7 +669,7 @@ public class JspHelper { // honor the X-Forwarded-For header set by a configured set of trusted // proxy servers. allows audit logging and proxy user checks to work // via an http proxy - static String getRemoteAddr(HttpServletRequest request) { + public static String getRemoteAddr(HttpServletRequest request) { String remoteAddr = request.getRemoteAddr(); String proxyHeader = request.getHeader("X-Forwarded-For"); if (proxyHeader != null && ProxyUsers.isProxyServer(remoteAddr)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 5005dfc805b..87b64f44341 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -162,8 +162,16 @@ public class NamenodeWebHdfsMethods { //clear content type response.setContentType(null); + + // set the remote address, if coming in via a trust proxy server then + // the address with be that of the proxied client + REMOTE_ADDRESS.set(JspHelper.getRemoteAddr(request)); } + private void reset() { + REMOTE_ADDRESS.set(null); + } + private static NamenodeProtocols getRPCServer(NameNode namenode) throws IOException { final NamenodeProtocols np = namenode.getRpcServer(); @@ -394,7 +402,6 @@ public class NamenodeWebHdfsMethods { return ugi.doAs(new PrivilegedExceptionAction() { @Override public Response run() throws IOException, URISyntaxException { - REMOTE_ADDRESS.set(request.getRemoteAddr()); try { return put(ugi, delegation, username, doAsUser, path.getAbsolutePath(), op, destination, owner, group, @@ -402,7 +409,7 @@ public class NamenodeWebHdfsMethods { modificationTime, accessTime, renameOptions, createParent, delegationTokenArgument,aclPermission); } finally { - REMOTE_ADDRESS.set(null); + reset(); } } }); @@ -583,12 +590,11 @@ public class NamenodeWebHdfsMethods { return ugi.doAs(new PrivilegedExceptionAction() { @Override public Response run() throws IOException, URISyntaxException { - REMOTE_ADDRESS.set(request.getRemoteAddr()); try { return post(ugi, delegation, username, doAsUser, path.getAbsolutePath(), op, concatSrcs, bufferSize); } finally { - REMOTE_ADDRESS.set(null); + reset(); } } }); @@ -681,12 +687,11 @@ public class NamenodeWebHdfsMethods { return ugi.doAs(new PrivilegedExceptionAction() { @Override public Response run() throws IOException, URISyntaxException { - REMOTE_ADDRESS.set(request.getRemoteAddr()); try { return get(ugi, delegation, username, doAsUser, path.getAbsolutePath(), op, offset, length, renewer, bufferSize); } finally { - REMOTE_ADDRESS.set(null); + reset(); } } }); @@ -889,12 +894,11 @@ public class NamenodeWebHdfsMethods { return ugi.doAs(new PrivilegedExceptionAction() { @Override public Response run() throws IOException { - REMOTE_ADDRESS.set(request.getRemoteAddr()); try { return delete(ugi, delegation, username, doAsUser, path.getAbsolutePath(), op, recursive); } finally { - REMOTE_ADDRESS.set(null); + reset(); } } }); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java index f20d51a1dfd..8d1a70ec854 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java @@ -24,7 +24,10 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; +import java.net.HttpURLConnection; import java.net.InetAddress; +import java.net.URI; +import java.net.URISyntaxException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -33,9 +36,11 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.web.resources.GetOpParam; import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.junit.Before; import org.junit.Test; /** @@ -45,6 +50,16 @@ public class TestAuditLogger { private static final short TEST_PERMISSION = (short) 0654; + @Before + public void setup() { + DummyAuditLogger.initialized = false; + DummyAuditLogger.logCount = 0; + DummyAuditLogger.remoteAddr = null; + + Configuration conf = new HdfsConfiguration(); + ProxyUsers.refreshSuperUserGroupsConfiguration(conf); + } + /** * Tests that AuditLogger works as expected. */ @@ -69,6 +84,57 @@ public class TestAuditLogger { } } + @Test + public void testWebHdfsAuditLogger() throws IOException, URISyntaxException { + Configuration conf = new HdfsConfiguration(); + conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY, + DummyAuditLogger.class.getName()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + + GetOpParam.Op op = GetOpParam.Op.GETFILESTATUS; + try { + cluster.waitClusterUp(); + assertTrue(DummyAuditLogger.initialized); + URI uri = new URI( + "http", + NetUtils.getHostPortString(cluster.getNameNode().getHttpAddress()), + "/webhdfs/v1/", op.toQueryString(), null); + + // non-proxy request + HttpURLConnection conn = (HttpURLConnection) uri.toURL().openConnection(); + conn.setRequestMethod(op.getType().toString()); + conn.connect(); + assertEquals(200, conn.getResponseCode()); + conn.disconnect(); + assertEquals(1, DummyAuditLogger.logCount); + assertEquals("127.0.0.1", DummyAuditLogger.remoteAddr); + + // non-trusted proxied request + conn = (HttpURLConnection) uri.toURL().openConnection(); + conn.setRequestMethod(op.getType().toString()); + conn.setRequestProperty("X-Forwarded-For", "1.1.1.1"); + conn.connect(); + assertEquals(200, conn.getResponseCode()); + conn.disconnect(); + assertEquals(2, DummyAuditLogger.logCount); + assertEquals("127.0.0.1", DummyAuditLogger.remoteAddr); + + // trusted proxied request + conf.set(ProxyUsers.CONF_HADOOP_PROXYSERVERS, "127.0.0.1"); + ProxyUsers.refreshSuperUserGroupsConfiguration(conf); + conn = (HttpURLConnection) uri.toURL().openConnection(); + conn.setRequestMethod(op.getType().toString()); + conn.setRequestProperty("X-Forwarded-For", "1.1.1.1"); + conn.connect(); + assertEquals(200, conn.getResponseCode()); + conn.disconnect(); + assertEquals(3, DummyAuditLogger.logCount); + assertEquals("1.1.1.1", DummyAuditLogger.remoteAddr); + } finally { + cluster.shutdown(); + } + } + /** * Minor test related to HADOOP-9155. Verify that during a * FileSystem.setPermission() operation, the stat passed in during the @@ -128,7 +194,8 @@ public class TestAuditLogger { static boolean initialized; static int logCount; static short foundPermission; - + static String remoteAddr; + public void initialize(Configuration conf) { initialized = true; } @@ -140,6 +207,7 @@ public class TestAuditLogger { public void logAuditEvent(boolean succeeded, String userName, InetAddress addr, String cmd, String src, String dst, FileStatus stat) { + remoteAddr = addr.getHostAddress(); logCount++; if (stat != null) { foundPermission = stat.getPermission().toShort();