diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index cab1a09353e..04ca6f774a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -429,6 +429,9 @@ Release 2.5.0 - UNRELEASED HDFS-6471. Make moveFromLocal CLI testcases to be non-disruptive (Dasha Boudnik via cos) + HDFS-6395. Skip checking xattr limits for non-user-visible namespaces. + (Yi Liu via wang). + OPTIMIZATIONS HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index ee7044c4631..78d2af88597 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -2833,6 +2833,7 @@ public class FSDirectory implements Closeable { EnumSet flag) throws QuotaExceededException, IOException { List xAttrs = Lists.newArrayListWithCapacity( existingXAttrs != null ? existingXAttrs.size() + 1 : 1); + int userVisibleXAttrsNum = 0; // Number of user visible xAttrs boolean exist = false; if (existingXAttrs != null) { for (XAttr a: existingXAttrs) { @@ -2841,6 +2842,10 @@ public class FSDirectory implements Closeable { exist = true; } else { xAttrs.add(a); + + if (isUserVisible(a)) { + userVisibleXAttrsNum++; + } } } } @@ -2848,7 +2853,11 @@ public class FSDirectory implements Closeable { XAttrSetFlag.validate(xAttr.getName(), exist, flag); xAttrs.add(xAttr); - if (xAttrs.size() > inodeXAttrsLimit) { + if (isUserVisible(xAttr)) { + userVisibleXAttrsNum++; + } + + if (userVisibleXAttrsNum > inodeXAttrsLimit) { throw new IOException("Cannot add additional XAttr to inode, " + "would exceed limit of " + inodeXAttrsLimit); } @@ -2856,6 +2865,14 @@ public class FSDirectory implements Closeable { return xAttrs; } + private boolean isUserVisible(XAttr xAttr) { + if (xAttr.getNameSpace() == XAttr.NameSpace.USER || + xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED) { + return true; + } + return false; + } + List getXAttrs(String src) throws IOException { String srcs = normalizePath(src); readLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java index 7c13744c9ed..6e31f2cb3b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java @@ -22,22 +22,29 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.BufferedReader; import java.io.IOException; import java.io.StringReader; +import java.util.EnumSet; +import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import com.google.common.collect.Lists; + /** * Test {@link FSDirectory}, the in-memory namespace tree. */ @@ -70,6 +77,7 @@ public class TestFSDirectory { @Before public void setUp() throws Exception { conf = new Configuration(); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(REPLICATION) .build(); @@ -171,4 +179,36 @@ public class TestFSDirectory { Assert.assertTrue(classname.startsWith(INodeFile.class.getSimpleName()) || classname.startsWith(INodeDirectory.class.getSimpleName())); } + + @Test + public void testINodeXAttrsLimit() throws Exception { + List existingXAttrs = Lists.newArrayListWithCapacity(2); + XAttr xAttr1 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER). + setName("a1").setValue(new byte[]{0x31, 0x32, 0x33}).build(); + XAttr xAttr2 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER). + setName("a2").setValue(new byte[]{0x31, 0x31, 0x31}).build(); + existingXAttrs.add(xAttr1); + existingXAttrs.add(xAttr2); + + // Adding a system namespace xAttr, isn't affected by inode xAttrs limit. + XAttr newXAttr = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM). + setName("a3").setValue(new byte[]{0x33, 0x33, 0x33}).build(); + List xAttrs = fsdir.setINodeXAttr(existingXAttrs, newXAttr, + EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); + Assert.assertEquals(xAttrs.size(), 3); + + // Adding a trusted namespace xAttr, is affected by inode xAttrs limit. + XAttr newXAttr1 = (new XAttr.Builder()).setNameSpace( + XAttr.NameSpace.TRUSTED).setName("a4"). + setValue(new byte[]{0x34, 0x34, 0x34}).build(); + try { + fsdir.setINodeXAttr(existingXAttrs, newXAttr1, + EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); + Assert.fail("Setting user visable xattr on inode should fail if " + + "reaching limit."); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Cannot add additional XAttr " + + "to inode, would exceed limit", e); + } + } }