diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9289a7f79cf..5be2515f3c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -42,6 +42,9 @@ Release 2.5.0 - UNRELEASED HDFS-6225. Remove the o.a.h.hdfs.server.common.UpgradeStatusReport. (wheat9) + + HDFS-6224. Add a unit test to TestAuditLogger for file permissions + passed to logAuditEvent. (Charles Lamb via wang) OPTIMIZATIONS diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 9839c17e6c4..61cc35f5f51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -7324,6 +7324,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, cacheManager.waitForRescanIfNeeded(); } writeLock(); + String effectiveDirectiveStr = null; Long result = null; try { checkOperation(OperationCategory.WRITE); @@ -7335,11 +7336,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats, throw new IOException("addDirective: you cannot specify an ID " + "for this operation."); } - CacheDirectiveInfo effectiveDirective = + CacheDirectiveInfo effectiveDirective = cacheManager.addDirective(directive, pc, flags); getEditLog().logAddCacheDirectiveInfo(effectiveDirective, cacheEntry != null); result = effectiveDirective.getId(); + effectiveDirectiveStr = effectiveDirective.toString(); success = true; } finally { writeUnlock(); @@ -7347,7 +7349,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, getEditLog().logSync(); } if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "addCacheDirective", null, null, null); + logAuditEvent(success, "addCacheDirective", effectiveDirectiveStr, null, null); } RetryCache.setState(cacheEntry, success, result); } @@ -7384,7 +7386,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, getEditLog().logSync(); } if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "modifyCacheDirective", null, null, null); + String idStr = "{id: " + directive.getId().toString() + "}"; + logAuditEvent(success, "modifyCacheDirective", idStr, directive.toString(), null); } RetryCache.setState(cacheEntry, success); } @@ -7412,7 +7415,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } finally { writeUnlock(); if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "removeCacheDirective", null, null, + String idStr = "{id: " + id.toString() + "}"; + logAuditEvent(success, "removeCacheDirective", idStr, null, null); } RetryCache.setState(cacheEntry, success); @@ -7437,7 +7441,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } finally { readUnlock(); if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "listCacheDirectives", null, null, + logAuditEvent(success, "listCacheDirectives", filter.toString(), null, null); } } @@ -7454,6 +7458,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } writeLock(); boolean success = false; + String poolInfoStr = null; try { checkOperation(OperationCategory.WRITE); if (isInSafeMode()) { @@ -7464,12 +7469,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats, pc.checkSuperuserPrivilege(); } CachePoolInfo info = cacheManager.addCachePool(req); + poolInfoStr = info.toString(); getEditLog().logAddCachePool(info, cacheEntry != null); success = true; } finally { writeUnlock(); if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "addCachePool", req.getPoolName(), null, null); + logAuditEvent(success, "addCachePool", poolInfoStr, null, null); } RetryCache.setState(cacheEntry, success); } @@ -7502,7 +7508,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } finally { writeUnlock(); if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "modifyCachePool", req.getPoolName(), null, null); + String poolNameStr = "{poolName: " + req.getPoolName() + "}"; + logAuditEvent(success, "modifyCachePool", poolNameStr, req.toString(), null); } RetryCache.setState(cacheEntry, success); } @@ -7535,7 +7542,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } finally { writeUnlock(); if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "removeCachePool", cachePoolName, null, null); + String poolNameStr = "{poolName: " + cachePoolName + "}"; + logAuditEvent(success, "removeCachePool", poolNameStr, null, null); } RetryCache.setState(cacheEntry, success); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java index 3de27cb2746..f20d51a1dfd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java @@ -29,6 +29,7 @@ import java.net.InetAddress; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -42,6 +43,8 @@ import org.junit.Test; */ public class TestAuditLogger { + private static final short TEST_PERMISSION = (short) 0654; + /** * Tests that AuditLogger works as expected. */ @@ -55,6 +58,7 @@ public class TestAuditLogger { try { cluster.waitClusterUp(); assertTrue(DummyAuditLogger.initialized); + DummyAuditLogger.resetLogCount(); FileSystem fs = cluster.getFileSystem(); long time = System.currentTimeMillis(); @@ -65,6 +69,36 @@ public class TestAuditLogger { } } + /** + * Minor test related to HADOOP-9155. Verify that during a + * FileSystem.setPermission() operation, the stat passed in during the + * logAuditEvent() call returns the new permission rather than the old + * permission. + */ + @Test + public void testAuditLoggerWithSetPermission() throws IOException { + Configuration conf = new HdfsConfiguration(); + conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY, + DummyAuditLogger.class.getName()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + + try { + cluster.waitClusterUp(); + assertTrue(DummyAuditLogger.initialized); + DummyAuditLogger.resetLogCount(); + + FileSystem fs = cluster.getFileSystem(); + long time = System.currentTimeMillis(); + final Path p = new Path("/"); + fs.setTimes(p, time, time); + fs.setPermission(p, new FsPermission(TEST_PERMISSION)); + assertEquals(TEST_PERMISSION, DummyAuditLogger.foundPermission); + assertEquals(2, DummyAuditLogger.logCount); + } finally { + cluster.shutdown(); + } + } + /** * Tests that a broken audit logger causes requests to fail. */ @@ -93,15 +127,23 @@ public class TestAuditLogger { static boolean initialized; static int logCount; + static short foundPermission; public void initialize(Configuration conf) { initialized = true; } + public static void resetLogCount() { + logCount = 0; + } + public void logAuditEvent(boolean succeeded, String userName, InetAddress addr, String cmd, String src, String dst, FileStatus stat) { logCount++; + if (stat != null) { + foundPermission = stat.getPermission().toShort(); + } } }