HDFS-6395. Skip checking xattr limits for non-user-visible namespaces. Contributed by Yi Liu.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1602289 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
35eeacc122
commit
051049a4f5
|
@ -169,6 +169,9 @@ Release 2.5.0 - UNRELEASED
|
|||
HDFS-6471. Make moveFromLocal CLI testcases to be non-disruptive
|
||||
(Dasha Boudnik via cos)
|
||||
|
||||
HDFS-6395. Skip checking xattr limits for non-user-visible namespaces.
|
||||
(Yi Liu via wang).
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
|
||||
|
|
|
@ -2843,6 +2843,7 @@ public class FSDirectory implements Closeable {
|
|||
EnumSet<XAttrSetFlag> flag) throws QuotaExceededException, IOException {
|
||||
List<XAttr> xAttrs = Lists.newArrayListWithCapacity(
|
||||
existingXAttrs != null ? existingXAttrs.size() + 1 : 1);
|
||||
int userVisibleXAttrsNum = 0; // Number of user visible xAttrs
|
||||
boolean exist = false;
|
||||
if (existingXAttrs != null) {
|
||||
for (XAttr a: existingXAttrs) {
|
||||
|
@ -2851,6 +2852,10 @@ public class FSDirectory implements Closeable {
|
|||
exist = true;
|
||||
} else {
|
||||
xAttrs.add(a);
|
||||
|
||||
if (isUserVisible(a)) {
|
||||
userVisibleXAttrsNum++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2858,7 +2863,11 @@ public class FSDirectory implements Closeable {
|
|||
XAttrSetFlag.validate(xAttr.getName(), exist, flag);
|
||||
xAttrs.add(xAttr);
|
||||
|
||||
if (xAttrs.size() > inodeXAttrsLimit) {
|
||||
if (isUserVisible(xAttr)) {
|
||||
userVisibleXAttrsNum++;
|
||||
}
|
||||
|
||||
if (userVisibleXAttrsNum > inodeXAttrsLimit) {
|
||||
throw new IOException("Cannot add additional XAttr to inode, "
|
||||
+ "would exceed limit of " + inodeXAttrsLimit);
|
||||
}
|
||||
|
@ -2866,6 +2875,14 @@ public class FSDirectory implements Closeable {
|
|||
return xAttrs;
|
||||
}
|
||||
|
||||
private boolean isUserVisible(XAttr xAttr) {
|
||||
if (xAttr.getNameSpace() == XAttr.NameSpace.USER ||
|
||||
xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
List<XAttr> getXAttrs(String src) throws IOException {
|
||||
String srcs = normalizePath(src);
|
||||
readLock();
|
||||
|
|
|
@ -22,22 +22,29 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
/**
|
||||
* Test {@link FSDirectory}, the in-memory namespace tree.
|
||||
*/
|
||||
|
@ -70,6 +77,7 @@ public class TestFSDirectory {
|
|||
@Before
|
||||
public void setUp() throws Exception {
|
||||
conf = new Configuration();
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2);
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(REPLICATION)
|
||||
.build();
|
||||
|
@ -172,4 +180,36 @@ public class TestFSDirectory {
|
|||
Assert.assertTrue(classname.startsWith(INodeFile.class.getSimpleName())
|
||||
|| classname.startsWith(INodeDirectory.class.getSimpleName()));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testINodeXAttrsLimit() throws Exception {
|
||||
List<XAttr> existingXAttrs = Lists.newArrayListWithCapacity(2);
|
||||
XAttr xAttr1 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
|
||||
setName("a1").setValue(new byte[]{0x31, 0x32, 0x33}).build();
|
||||
XAttr xAttr2 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
|
||||
setName("a2").setValue(new byte[]{0x31, 0x31, 0x31}).build();
|
||||
existingXAttrs.add(xAttr1);
|
||||
existingXAttrs.add(xAttr2);
|
||||
|
||||
// Adding a system namespace xAttr, isn't affected by inode xAttrs limit.
|
||||
XAttr newXAttr = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).
|
||||
setName("a3").setValue(new byte[]{0x33, 0x33, 0x33}).build();
|
||||
List<XAttr> xAttrs = fsdir.setINodeXAttr(existingXAttrs, newXAttr,
|
||||
EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
|
||||
Assert.assertEquals(xAttrs.size(), 3);
|
||||
|
||||
// Adding a trusted namespace xAttr, is affected by inode xAttrs limit.
|
||||
XAttr newXAttr1 = (new XAttr.Builder()).setNameSpace(
|
||||
XAttr.NameSpace.TRUSTED).setName("a4").
|
||||
setValue(new byte[]{0x34, 0x34, 0x34}).build();
|
||||
try {
|
||||
fsdir.setINodeXAttr(existingXAttrs, newXAttr1,
|
||||
EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
|
||||
Assert.fail("Setting user visable xattr on inode should fail if " +
|
||||
"reaching limit.");
|
||||
} catch (IOException e) {
|
||||
GenericTestUtils.assertExceptionContains("Cannot add additional XAttr " +
|
||||
"to inode, would exceed limit", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue