From 6eb03a41cbb51d31d10b4b7b9ce4400498857519 Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Wed, 11 Jun 2014 19:25:17 +0000 Subject: [PATCH] Merge HDFS-2006 HDFS XAttrs feature from Trunk to Branch-2 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1601992 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 18 + .../apache/hadoop/fs/AbstractFileSystem.java | 158 +++++- .../org/apache/hadoop/fs/FileContext.java | 190 +++++++ .../java/org/apache/hadoop/fs/FileSystem.java | 158 ++++++ .../apache/hadoop/fs/FilterFileSystem.java | 34 ++ .../java/org/apache/hadoop/fs/FilterFs.java | 34 ++ .../java/org/apache/hadoop/fs/XAttrCodec.java | 121 +++++ .../org/apache/hadoop/fs/XAttrSetFlag.java | 71 +++ .../org/apache/hadoop/fs/shell/FsCommand.java | 1 + .../apache/hadoop/fs/shell/XAttrCommands.java | 188 +++++++ .../hadoop/fs/viewfs/ChRootedFileSystem.java | 29 ++ .../hadoop/fs/viewfs/ViewFileSystem.java | 39 ++ .../src/site/apt/FileSystemShell.apt.vm | 59 +++ .../apache/hadoop/fs/TestHarFileSystem.java | 16 + .../hadoop/fs/shell/TestXAttrCommands.java | 98 ++++ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 63 +++ hadoop-hdfs-project/hadoop-hdfs/pom.xml | 1 + .../main/java/org/apache/hadoop/fs/Hdfs.java | 28 ++ .../main/java/org/apache/hadoop/fs/XAttr.java | 149 ++++++ .../org/apache/hadoop/hdfs/DFSClient.java | 68 +++ .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 7 + .../hadoop/hdfs/DistributedFileSystem.java | 89 ++++ .../org/apache/hadoop/hdfs/XAttrHelper.java | 164 ++++++ .../hadoop/hdfs/protocol/ClientProtocol.java | 64 +++ ...amenodeProtocolServerSideTranslatorPB.java | 46 ++ .../ClientNamenodeProtocolTranslatorPB.java | 48 ++ .../hadoop/hdfs/protocolPB/PBHelper.java | 118 +++++ .../hdfs/server/namenode/AclConfigFlag.java | 56 --- .../hdfs/server/namenode/FSDirectory.java | 122 +++++ .../hdfs/server/namenode/FSEditLog.java | 32 +- .../hdfs/server/namenode/FSEditLogLoader.java | 18 + .../hdfs/server/namenode/FSEditLogOp.java | 134 +++++ .../server/namenode/FSEditLogOpCodes.java | 2 + .../hdfs/server/namenode/FSImageFormat.java | 8 +- .../server/namenode/FSImageFormatPBINode.java | 73 +++ .../hdfs/server/namenode/FSNamesystem.java | 186 ++++++- .../hadoop/hdfs/server/namenode/INode.java | 38 ++ .../hdfs/server/namenode/INodeAttributes.java | 15 +- .../namenode/INodeDirectoryAttributes.java | 10 +- .../server/namenode/INodeFileAttributes.java | 6 +- .../hdfs/server/namenode/INodeReference.java | 16 + .../hdfs/server/namenode/INodeSymlink.java | 16 + .../namenode/INodeWithAdditionalFields.java | 25 + .../hadoop/hdfs/server/namenode/NNConf.java | 104 ++++ .../namenode/NameNodeLayoutVersion.java | 3 +- .../server/namenode/NameNodeRpcServer.java | 19 + .../hdfs/server/namenode/XAttrFeature.java | 43 ++ .../namenode/XAttrPermissionFilter.java | 82 +++ .../hdfs/server/namenode/XAttrStorage.java | 80 +++ .../snapshot/FSImageFormatPBSnapshot.java | 17 +- .../server/namenode/snapshot/Snapshot.java | 17 +- .../web/resources/NamenodeWebHdfsMethods.java | 83 ++- .../org/apache/hadoop/hdfs/web/JsonUtil.java | 124 +++++ .../hadoop/hdfs/web/WebHdfsFileSystem.java | 64 +++ .../hadoop/hdfs/web/resources/GetOpParam.java | 2 + .../hadoop/hdfs/web/resources/PutOpParam.java | 3 + .../web/resources/XAttrEncodingParam.java | 56 +++ .../hdfs/web/resources/XAttrNameParam.java | 44 ++ .../hdfs/web/resources/XAttrSetFlagParam.java | 53 ++ .../hdfs/web/resources/XAttrValueParam.java | 45 ++ .../main/proto/ClientNamenodeProtocol.proto | 7 + .../hadoop-hdfs/src/main/proto/fsimage.proto | 21 +- .../hadoop-hdfs/src/main/proto/xattr.proto | 71 +++ .../src/main/resources/hdfs-default.xml | 24 + .../src/site/apt/ExtendedAttributes.apt.vm | 98 ++++ .../org/apache/hadoop/cli/TestXAttrCLI.java | 99 ++++ .../java/org/apache/hadoop/fs/TestXAttr.java | 85 ++++ .../org/apache/hadoop/hdfs/DFSTestUtil.java | 7 + .../org/apache/hadoop/hdfs/TestDFSShell.java | 330 +++++++++++- .../org/apache/hadoop/hdfs/TestSafeMode.java | 15 +- .../hdfs/server/namenode/FSXAttrBaseTest.java | 475 ++++++++++++++++++ .../server/namenode/TestFSImageWithXAttr.java | 127 +++++ .../server/namenode/TestFileContextXAttr.java | 96 ++++ .../hdfs/server/namenode/TestINodeFile.java | 21 + .../server/namenode/TestNameNodeXAttr.java | 75 +++ .../namenode/TestNamenodeRetryCache.java | 4 +- .../hdfs/server/namenode/TestStartup.java | 64 +++ .../server/namenode/TestXAttrConfigFlag.java | 150 ++++++ .../namenode/ha/TestRetryCacheWithHA.java | 56 ++- .../server/namenode/ha/TestXAttrsWithHA.java | 114 +++++ .../snapshot/TestXAttrWithSnapshot.java | 371 ++++++++++++++ .../apache/hadoop/hdfs/web/TestJsonUtil.java | 48 ++ .../hadoop/hdfs/web/TestWebHDFSXAttr.java | 36 ++ .../hadoop/hdfs/web/resources/TestParam.java | 40 ++ .../src/test/resources/editsStored | Bin 4684 -> 4805 bytes .../src/test/resources/editsStored.xml | 29 +- .../src/test/resources/testXAttrConf.xml | 409 +++++++++++++++ hadoop-project/src/site/site.xml | 1 + 88 files changed, 6415 insertions(+), 113 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrCodec.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrSetFlag.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestXAttrCommands.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclConfigFlag.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNConf.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextXAttr.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestXAttrsWithHA.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSXAttr.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 520580a926f..89c4d8cb75f 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -195,6 +195,24 @@ Release 2.5.0 - UNRELEASED HADOOP-10656. The password keystore file is not picked by LDAP group mapping (brandonli) + BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS + + HADOOP-10520. Extended attributes definition and FileSystem APIs for + extended attributes. (Yi Liu via wang) + + HADOOP-10546. Javadoc and other small fixes for extended attributes in + hadoop-common. (Charles Lamb via wang) + + HADOOP-10521. FsShell commands for extended attributes. (Yi Liu via wang) + + HADOOP-10548. Improve FsShell xattr error handling and other fixes. (Charles Lamb via umamahesh) + + HADOOP-10567. Shift XAttr value encoding code out for reuse. (Yi Liu via umamahesh) + + HADOOP-10621. Remove CRLF for xattr value base64 encoding for better display.(Yi Liu via umamahesh) + + HADOOP-10575. Small fixes for XAttrCommands and test. (Yi Liu via umamahesh) + Release 2.4.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index d0303be71af..38c7c1aa2a5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.fs; - import java.io.FileNotFoundException; import java.io.IOException; import java.lang.reflect.Constructor; @@ -1039,6 +1038,163 @@ public abstract class AbstractFileSystem { + " doesn't support getAclStatus"); } + /** + * Set an xattr of a file or directory. + * The name must be prefixed with user/trusted/security/system and + * followed by ".". For example, "user.attr". + *

+ * A regular user can only set an xattr for the "user" namespace. + * The super user can set an xattr of either the "user" or "trusted" namespaces. + * The xattrs of the "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * The access permissions of an xattr in the "user" namespace are + * defined by the file and directory permission bits. + * An xattr can only be set when the logged-in user has the correct permissions. + * If the xattr exists, it will be replaced. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to modify + * @param name xattr name. + * @param value xattr value. + * @throws IOException + */ + public void setXAttr(Path path, String name, byte[] value) + throws IOException { + setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE, + XAttrSetFlag.REPLACE)); + } + + /** + * Set an xattr of a file or directory. + * The name must be prefixed with user/trusted/security/system and + * followed by ".". For example, "user.attr". + *

+ * A regular user can only set an xattr for the "user" namespace. + * The super user can set an xattr of either the "user" or "trusted" namespaces. + * The xattrs of the "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * The access permissions of an xattr in the "user" namespace are + * defined by the file and directory permission bits. + * An xattr can only be set when the logged-in user has the correct permissions. + * If the xattr exists, it will be replaced. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to modify + * @param name xattr name. + * @param value xattr value. + * @param flag xattr set flag + * @throws IOException + */ + public void setXAttr(Path path, String name, byte[] value, + EnumSet flag) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support setXAttr"); + } + + /** + * Get an xattr for a file or directory. + * The name must be prefixed with user/trusted/security/system and + * followed by ".". For example, "user.attr". + *

+ * A regular user can only get an xattr for the "user" namespace. + * The super user can get an xattr of either the "user" or "trusted" namespaces. + * The xattrs of the "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * An xattr will only be returned when the logged-in user has the correct permissions. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to get extended attribute + * @param name xattr name. + * @return byte[] xattr value. + * @throws IOException + */ + public byte[] getXAttr(Path path, String name) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support getXAttr"); + } + + /** + * Get all of the xattrs for a file or directory. + * Only those xattrs for which the logged-in user has permissions to view + * are returned. + *

+ * A regular user can only get xattrs for the "user" namespace. + * The super user can only get xattrs for "user" and "trusted" namespaces. + * The xattr of "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to get extended attributes + * @return Map describing the XAttrs of the file or directory + * @throws IOException + */ + public Map getXAttrs(Path path) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support getXAttrs"); + } + + /** + * Get all of the xattrs for a file or directory. + * Only those xattrs for which the logged-in user has permissions to view + * are returned. + *

+ * A regular user can only get xattrs for the "user" namespace. + * The super user can only get xattrs for "user" and "trusted" namespaces. + * The xattr of "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to get extended attributes + * @param names XAttr names. + * @return Map describing the XAttrs of the file or directory + * @throws IOException + */ + public Map getXAttrs(Path path, List names) + throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support getXAttrs"); + } + + /** + * Remove an xattr of a file or directory. + * The name must be prefixed with user/trusted/security/system and + * followed by ".". For example, "user.attr". + *

+ * A regular user can only remove an xattr for the "user" namespace. + * The super user can remove an xattr of either the "user" or "trusted" namespaces. + * The xattrs of the "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * The access permissions of an xattr in the "user" namespace are + * defined by the file and directory permission bits. + * An xattr can only be set when the logged-in user has the correct permissions. + * If the xattr exists, it will be replaced. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to remove extended attribute + * @param name xattr name + * @throws IOException + */ + public void removeXAttr(Path path, String name) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support removeXAttr"); + } + @Override //Object public int hashCode() { return myUri.hashCode(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index c889d7dfb39..f9577dd912d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -2308,4 +2308,194 @@ public final class FileContext { } }.resolve(this, absF); } + + /** + * Set an xattr of a file or directory. + * The name must be prefixed with user/trusted/security/system and + * followed by ".". For example, "user.attr". + *

+ * A regular user can only set an xattr for the "user" namespace. + * The super user can set an xattr of either the "user" or "trusted" namespaces. + * The xattrs of the "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * The access permissions of an xattr in the "user" namespace are + * defined by the file and directory permission bits. + * An xattr can only be set when the logged-in user has the correct permissions. + * If the xattr exists, it will be replaced. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to modify + * @param name xattr name. + * @param value xattr value. + * @throws IOException + */ + public void setXAttr(Path path, String name, byte[] value) + throws IOException { + setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE, + XAttrSetFlag.REPLACE)); + } + + /** + * Set an xattr of a file or directory. + * The name must be prefixed with user/trusted/security/system and + * followed by ".". For example, "user.attr". + *

+ * A regular user can only set an xattr for the "user" namespace. + * The super user can set an xattr of either the "user" or "trusted" namespaces. + * The xattrs of the "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * The access permissions of an xattr in the "user" namespace are + * defined by the file and directory permission bits. + * An xattr can only be set when the logged-in user has the correct permissions. + * If the xattr exists, it will be replaced. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to modify + * @param name xattr name. + * @param value xattr value. + * @param flag xattr set flag + * @throws IOException + */ + public void setXAttr(Path path, final String name, final byte[] value, + final EnumSet flag) throws IOException { + final Path absF = fixRelativePart(path); + new FSLinkResolver() { + @Override + public Void next(final AbstractFileSystem fs, final Path p) + throws IOException { + fs.setXAttr(p, name, value, flag); + return null; + } + }.resolve(this, absF); + } + + /** + * Get an xattr for a file or directory. + * The name must be prefixed with user/trusted/security/system and + * followed by ".". For example, "user.attr". + *

+ * + * A regular user can only get an xattr for the "user" namespace. + * The super user can get an xattr of either the "user" or "trusted" namespaces. + * The xattrs of the "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * An xattr will only be returned when the logged-in user has the correct permissions. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to get extended attribute + * @param name xattr name. + * @return byte[] xattr value. + * @throws IOException + */ + public byte[] getXAttr(Path path, final String name) throws IOException { + final Path absF = fixRelativePart(path); + return new FSLinkResolver() { + @Override + public byte[] next(final AbstractFileSystem fs, final Path p) + throws IOException { + return fs.getXAttr(p, name); + } + }.resolve(this, absF); + } + + /** + * Get all of the xattrs for a file or directory. + * Only those xattrs for which the logged-in user has permissions to view + * are returned. + *

+ * A regular user can only get xattrs for the "user" namespace. + * The super user can only get xattrs for "user" and "trusted" namespaces. + * The xattr of "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to get extended attributes + * @return Map describing the XAttrs of the file or directory + * @throws IOException + */ + public Map getXAttrs(Path path) throws IOException { + final Path absF = fixRelativePart(path); + return new FSLinkResolver>() { + @Override + public Map next(final AbstractFileSystem fs, final Path p) + throws IOException { + return fs.getXAttrs(p); + } + }.resolve(this, absF); + } + + /** + * Get all of the xattrs for a file or directory. + * Only those xattrs for which the logged-in user has permissions to view + * are returned. + *

+ * A regular user can only get xattrs for the "user" namespace. + * The super user can only get xattrs for "user" and "trusted" namespaces. + * The xattr of "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to get extended attributes + * @param names XAttr names. + * @return Map describing the XAttrs of the file or directory + * @throws IOException + */ + public Map getXAttrs(Path path, final List names) + throws IOException { + final Path absF = fixRelativePart(path); + return new FSLinkResolver>() { + @Override + public Map next(final AbstractFileSystem fs, final Path p) + throws IOException { + return fs.getXAttrs(p, names); + } + }.resolve(this, absF); + } + + /** + * Remove an xattr of a file or directory. + * The name must be prefixed with user/trusted/security/system and + * followed by ".". For example, "user.attr". + *

+ * A regular user can only remove an xattr for the "user" namespace. + * The super user can remove an xattr of either the "user" or "trusted" namespaces. + * The xattrs of the "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * The access permissions of an xattr in the "user" namespace are + * defined by the file and directory permission bits. + * An xattr can only be set when the logged-in user has the correct permissions. + * If the xattr exists, it will be replaced. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to remove extended attribute + * @param name xattr name + * @throws IOException + */ + public void removeXAttr(Path path, final String name) throws IOException { + final Path absF = fixRelativePart(path); + new FSLinkResolver() { + @Override + public Void next(final AbstractFileSystem fs, final Path p) + throws IOException { + fs.removeXAttr(p, name); + return null; + } + }.resolve(this, absF); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 4c2a2f5ee58..a54c2c00a50 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -2363,6 +2363,164 @@ public abstract class FileSystem extends Configured implements Closeable { + " doesn't support getAclStatus"); } + /** + * Set an xattr of a file or directory. + * The name must be prefixed with user/trusted/security/system and + * followed by ".". For example, "user.attr". + *

+ * A regular user can only set an xattr for the "user" namespace. + * The super user can set an xattr of either the "user" or "trusted" namespaces. + * The xattrs of the "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * The access permissions of an xattr in the "user" namespace are + * defined by the file and directory permission bits. + * An xattr can only be set when the logged-in user has the correct permissions. + * If the xattr exists, it will be replaced. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to modify + * @param name xattr name. + * @param value xattr value. + * @throws IOException + */ + public void setXAttr(Path path, String name, byte[] value) + throws IOException { + setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE, + XAttrSetFlag.REPLACE)); + } + + /** + * Set an xattr of a file or directory. + * The name must be prefixed with user/trusted/security/system and + * followed by ".". For example, "user.attr". + *

+ * A regular user can only set an xattr for the "user" namespace. + * The super user can set an xattr of either the "user" or "trusted" namespaces. + * The xattrs of the "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * The access permissions of an xattr in the "user" namespace are + * defined by the file and directory permission bits. + * An xattr can only be set when the logged-in user has the correct permissions. + * If the xattr exists, it will be replaced. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to modify + * @param name xattr name. + * @param value xattr value. + * @param flag xattr set flag + * @throws IOException + */ + public void setXAttr(Path path, String name, byte[] value, + EnumSet flag) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support setXAttr"); + } + + /** + * Get an xattr for a file or directory. + * The name must be prefixed with user/trusted/security/system and + * followed by ".". For example, "user.attr". + *

+ * + * A regular user can only get an xattr for the "user" namespace. + * The super user can get an xattr of either the "user" or "trusted" namespaces. + * The xattrs of the "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * An xattr will only be returned when the logged-in user has the correct permissions. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to get extended attribute + * @param name xattr name. + * @return byte[] xattr value. + * @throws IOException + */ + public byte[] getXAttr(Path path, String name) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support getXAttr"); + } + + /** + * Get all of the xattrs for a file or directory. + * Only those xattrs for which the logged-in user has permissions to view + * are returned. + *

+ * A regular user can only get xattrs for the "user" namespace. + * The super user can only get xattrs for "user" and "trusted" namespaces. + * The xattr of "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to get extended attributes + * @return Map describing the XAttrs of the file or directory + * @throws IOException + */ + public Map getXAttrs(Path path) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support getXAttrs"); + } + + /** + * Get all of the xattrs for a file or directory. + * Only those xattrs for which the logged-in user has permissions to view + * are returned. + *

+ * A regular user can only get xattrs for the "user" namespace. + * The super user can only get xattrs for "user" and "trusted" namespaces. + * The xattr of "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to get extended attributes + * @param names XAttr names. + * @return Map describing the XAttrs of the file or directory + * @throws IOException + */ + public Map getXAttrs(Path path, List names) + throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support getXAttrs"); + } + + /** + * Remove an xattr of a file or directory. + * The name must be prefixed with user/trusted/security/system and + * followed by ".". For example, "user.attr". + *

+ * A regular user can only remove an xattr for the "user" namespace. + * The super user can remove an xattr of either the "user" or "trusted" namespaces. + * The xattrs of the "security" and "system" namespaces are only used/exposed + * internally by/to the FS impl. + *

+ * The access permissions of an xattr in the "user" namespace are + * defined by the file and directory permission bits. + * An xattr can only be set when the logged-in user has the correct permissions. + * If the xattr exists, it will be replaced. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to remove extended attribute + * @param name xattr name + * @throws IOException + */ + public void removeXAttr(Path path, String name) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support removeXAttr"); + } + // making it volatile to be able to do a double checked locking private volatile static boolean FILE_SYSTEMS_LOADED = false; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java index cef38f66ac6..b98ff40ab56 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java @@ -23,6 +23,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.EnumSet; import java.util.List; +import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -542,4 +543,37 @@ public class FilterFileSystem extends FileSystem { public AclStatus getAclStatus(Path path) throws IOException { return fs.getAclStatus(path); } + + @Override + public void setXAttr(Path path, String name, byte[] value) + throws IOException { + fs.setXAttr(path, name, value); + } + + @Override + public void setXAttr(Path path, String name, byte[] value, + EnumSet flag) throws IOException { + fs.setXAttr(path, name, value, flag); + } + + @Override + public byte[] getXAttr(Path path, String name) throws IOException { + return fs.getXAttr(path, name); + } + + @Override + public Map getXAttrs(Path path) throws IOException { + return fs.getXAttrs(path); + } + + @Override + public Map getXAttrs(Path path, List names) + throws IOException { + return fs.getXAttrs(path, names); + } + + @Override + public void removeXAttr(Path path, String name) throws IOException { + fs.removeXAttr(path, name); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java index 2239040ca32..307fbef9dcf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java @@ -22,6 +22,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.EnumSet; import java.util.List; +import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -316,4 +317,37 @@ public abstract class FilterFs extends AbstractFileSystem { public AclStatus getAclStatus(Path path) throws IOException { return myFs.getAclStatus(path); } + + @Override + public void setXAttr(Path path, String name, byte[] value) + throws IOException { + myFs.setXAttr(path, name, value); + } + + @Override + public void setXAttr(Path path, String name, byte[] value, + EnumSet flag) throws IOException { + myFs.setXAttr(path, name, value, flag); + } + + @Override + public byte[] getXAttr(Path path, String name) throws IOException { + return myFs.getXAttr(path, name); + } + + @Override + public Map getXAttrs(Path path) throws IOException { + return myFs.getXAttrs(path); + } + + @Override + public Map getXAttrs(Path path, List names) + throws IOException { + return myFs.getXAttrs(path, names); + } + + @Override + public void removeXAttr(Path path, String name) throws IOException { + myFs.removeXAttr(path, name); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrCodec.java new file mode 100644 index 00000000000..e15968dd6d2 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrCodec.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.IOException; + +import org.apache.commons.codec.DecoderException; +import org.apache.commons.codec.binary.Base64; +import org.apache.commons.codec.binary.Hex; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import com.google.common.base.Preconditions; + +/** + * The value of XAttr is byte[], this class is to + * covert byte[] to some kind of string representation or convert back. + * String representation is convenient for display and input. For example + * display in screen as shell response and json response, input as http + * or shell parameter. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public enum XAttrCodec { + /** + * Value encoded as text + * string is enclosed in double quotes (\"). + */ + TEXT, + + /** + * Value encoded as hexadecimal string + * is prefixed with 0x. + */ + HEX, + + /** + * Value encoded as base64 string + * is prefixed with 0s. + */ + BASE64; + + private static final String HEX_PREFIX = "0x"; + private static final String BASE64_PREFIX = "0s"; + private static final Base64 base64 = new Base64(0); + + /** + * Decode string representation of a value and check whether it's + * encoded. If the given string begins with 0x or 0X, it expresses + * a hexadecimal number. If the given string begins with 0s or 0S, + * base64 encoding is expected. If the given string is enclosed in + * double quotes, the inner string is treated as text. Otherwise + * the given string is treated as text. + * @param value string representation of the value. + * @return byte[] the value + * @throws IOException + */ + public static byte[] decodeValue(String value) throws IOException { + byte[] result = null; + if (value != null) { + if (value.length() >= 2) { + String en = value.substring(0, 2); + if (value.startsWith("\"") && value.endsWith("\"")) { + value = value.substring(1, value.length()-1); + result = value.getBytes("utf-8"); + } else if (en.equalsIgnoreCase(HEX_PREFIX)) { + value = value.substring(2, value.length()); + try { + result = Hex.decodeHex(value.toCharArray()); + } catch (DecoderException e) { + throw new IOException(e); + } + } else if (en.equalsIgnoreCase(BASE64_PREFIX)) { + value = value.substring(2, value.length()); + result = base64.decode(value); + } + } + if (result == null) { + result = value.getBytes("utf-8"); + } + } + return result; + } + + /** + * Encode byte[] value to string representation with encoding. + * Values encoded as text strings are enclosed in double quotes (\"), + * while strings encoded as hexadecimal and base64 are prefixed with + * 0x and 0s, respectively. + * @param value byte[] value + * @param encoding + * @return String string representation of value + * @throws IOException + */ + public static String encodeValue(byte[] value, XAttrCodec encoding) + throws IOException { + Preconditions.checkNotNull(value, "Value can not be null."); + if (encoding == HEX) { + return HEX_PREFIX + Hex.encodeHexString(value); + } else if (encoding == BASE64) { + return BASE64_PREFIX + base64.encodeToString(value); + } else { + return "\"" + new String(value, "utf-8") + "\""; + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrSetFlag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrSetFlag.java new file mode 100644 index 00000000000..345a3d81e2c --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrSetFlag.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.IOException; +import java.util.EnumSet; + +import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public enum XAttrSetFlag { + /** + * Create a new xattr. + * If the xattr exists already, exception will be thrown. + */ + CREATE((short) 0x01), + + /** + * Replace a existing xattr. + * If the xattr does not exist, exception will be thrown. + */ + REPLACE((short) 0x02); + + private final short flag; + + private XAttrSetFlag(short flag) { + this.flag = flag; + } + + short getFlag() { + return flag; + } + + public static void validate(String xAttrName, boolean xAttrExists, + EnumSet flag) throws IOException { + if (flag == null || flag.isEmpty()) { + throw new HadoopIllegalArgumentException("A flag must be specified."); + } + + if (xAttrExists) { + if (!flag.contains(REPLACE)) { + throw new IOException("XAttr: " + xAttrName + + " already exists. The REPLACE flag must be specified."); + } + } else { + if (!flag.contains(CREATE)) { + throw new IOException("XAttr: " + xAttrName + + " does not exist. The CREATE flag must be specified."); + } + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java index c4a6d80754d..3372809022e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java @@ -59,6 +59,7 @@ abstract public class FsCommand extends Command { factory.registerCommands(Test.class); factory.registerCommands(Touch.class); factory.registerCommands(SnapshotCommands.class); + factory.registerCommands(XAttrCommands.class); } protected FsCommand() {} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java new file mode 100644 index 00000000000..c6dafbcac65 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java @@ -0,0 +1,188 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.shell; + +import java.io.IOException; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.Map; +import java.util.Map.Entry; + +import com.google.common.base.Enums; +import com.google.common.base.Function; +import com.google.common.base.Preconditions; + +import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.XAttrCodec; +import org.apache.hadoop.util.StringUtils; + +/** + * XAttr related operations + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +class XAttrCommands extends FsCommand { + private static final String GET_FATTR = "getfattr"; + private static final String SET_FATTR = "setfattr"; + + public static void registerCommands(CommandFactory factory) { + factory.addClass(GetfattrCommand.class, "-" + GET_FATTR); + factory.addClass(SetfattrCommand.class, "-" + SET_FATTR); + } + + /** + * Implements the '-getfattr' command for the FsShell. + */ + public static class GetfattrCommand extends FsCommand { + public static final String NAME = GET_FATTR; + public static final String USAGE = "[-R] {-n name | -d} [-e en] "; + public static final String DESCRIPTION = + "Displays the extended attribute names and values (if any) for a " + + "file or directory.\n" + + "-R: Recursively list the attributes for all files and directories.\n" + + "-n name: Dump the named extended attribute value.\n" + + "-d: Dump all extended attribute values associated with pathname.\n" + + "-e : Encode values after retrieving them.\n" + + "Valid encodings are \"text\", \"hex\", and \"base64\".\n" + + "Values encoded as text strings are enclosed in double quotes (\"),\n" + + " and values encoded as hexadecimal and base64 are prefixed with\n" + + "0x and 0s, respectively.\n" + + ": The file or directory.\n"; + private final static Function enValueOfFunc = + Enums.valueOfFunction(XAttrCodec.class); + + private String name = null; + private boolean dump = false; + private XAttrCodec encoding = XAttrCodec.TEXT; + + @Override + protected void processOptions(LinkedList args) throws IOException { + name = StringUtils.popOptionWithArgument("-n", args); + String en = StringUtils.popOptionWithArgument("-e", args); + if (en != null) { + encoding = enValueOfFunc.apply(en.toUpperCase()); + Preconditions.checkArgument(encoding != null, + "Invalid/unsupported encoding option specified: " + en); + } + + boolean r = StringUtils.popOption("-R", args); + setRecursive(r); + dump = StringUtils.popOption("-d", args); + + if (!dump && name == null) { + throw new HadoopIllegalArgumentException( + "Must specify '-n name' or '-d' option."); + } + + if (args.isEmpty()) { + throw new HadoopIllegalArgumentException(" is missing."); + } + if (args.size() > 1) { + throw new HadoopIllegalArgumentException("Too many arguments."); + } + } + + @Override + protected void processPath(PathData item) throws IOException { + out.println("# file: " + item); + if (dump) { + Map xattrs = item.fs.getXAttrs(item.path); + if (xattrs != null) { + Iterator> iter = xattrs.entrySet().iterator(); + while(iter.hasNext()) { + Entry entry = iter.next(); + printXAttr(entry.getKey(), entry.getValue()); + } + } + } else { + byte[] value = item.fs.getXAttr(item.path, name); + printXAttr(name, value); + } + } + + private void printXAttr(String name, byte[] value) throws IOException{ + if (value != null) { + if (value.length != 0) { + out.println(name + "=" + XAttrCodec.encodeValue(value, encoding)); + } else { + out.println(name); + } + } + } + } + + /** + * Implements the '-setfattr' command for the FsShell. + */ + public static class SetfattrCommand extends FsCommand { + public static final String NAME = SET_FATTR; + public static final String USAGE = "{-n name [-v value] | -x name} "; + public static final String DESCRIPTION = + "Sets an extended attribute name and value for a file or directory.\n" + + "-n name: The extended attribute name.\n" + + "-v value: The extended attribute value. There are three different\n" + + "encoding methods for the value. If the argument is enclosed in double\n" + + "quotes, then the value is the string inside the quotes. If the\n" + + "argument is prefixed with 0x or 0X, then it is taken as a hexadecimal\n" + + "number. If the argument begins with 0s or 0S, then it is taken as a\n" + + "base64 encoding.\n" + + "-x name: Remove the extended attribute.\n" + + ": The file or directory.\n"; + + private String name = null; + private byte[] value = null; + private String xname = null; + + @Override + protected void processOptions(LinkedList args) throws IOException { + name = StringUtils.popOptionWithArgument("-n", args); + String v = StringUtils.popOptionWithArgument("-v", args); + if (v != null) { + value = XAttrCodec.decodeValue(v); + } + xname = StringUtils.popOptionWithArgument("-x", args); + + if (name != null && xname != null) { + throw new HadoopIllegalArgumentException( + "Can not specify both '-n name' and '-x name' option."); + } + if (name == null && xname == null) { + throw new HadoopIllegalArgumentException( + "Must specify '-n name' or '-x name' option."); + } + + if (args.isEmpty()) { + throw new HadoopIllegalArgumentException(" is missing."); + } + if (args.size() > 1) { + throw new HadoopIllegalArgumentException("Too many arguments."); + } + } + + @Override + protected void processPath(PathData item) throws IOException { + if (name != null) { + item.fs.setXAttr(item.path, name, value); + } else if (xname != null) { + item.fs.removeXAttr(item.path, xname); + } + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java index e9edcc889fc..bd2eef170fc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.net.URI; import java.util.EnumSet; import java.util.List; +import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -37,6 +38,7 @@ import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; @@ -313,6 +315,33 @@ class ChRootedFileSystem extends FilterFileSystem { return super.getAclStatus(fullPath(path)); } + @Override + public void setXAttr(Path path, String name, byte[] value, + EnumSet flag) throws IOException { + super.setXAttr(fullPath(path), name, value, flag); + } + + @Override + public byte[] getXAttr(Path path, String name) throws IOException { + return super.getXAttr(fullPath(path), name); + } + + @Override + public Map getXAttrs(Path path) throws IOException { + return super.getXAttrs(fullPath(path)); + } + + @Override + public Map getXAttrs(Path path, List names) + throws IOException { + return super.getXAttrs(fullPath(path), names); + } + + @Override + public void removeXAttr(Path path, String name) throws IOException { + super.removeXAttr(fullPath(path), name); + } + @Override public Path resolvePath(final Path p) throws IOException { return super.resolvePath(fullPath(p)); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index c95cb54a405..463cf67c73e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -27,6 +27,7 @@ import java.util.Arrays; import java.util.EnumSet; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.Map.Entry; @@ -46,6 +47,7 @@ import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; @@ -519,6 +521,43 @@ public class ViewFileSystem extends FileSystem { return res.targetFileSystem.getAclStatus(res.remainingPath); } + @Override + public void setXAttr(Path path, String name, byte[] value, + EnumSet flag) throws IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + res.targetFileSystem.setXAttr(res.remainingPath, name, value, flag); + } + + @Override + public byte[] getXAttr(Path path, String name) throws IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + return res.targetFileSystem.getXAttr(res.remainingPath, name); + } + + @Override + public Map getXAttrs(Path path) throws IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + return res.targetFileSystem.getXAttrs(res.remainingPath); + } + + @Override + public Map getXAttrs(Path path, List names) + throws IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + return res.targetFileSystem.getXAttrs(res.remainingPath, names); + } + + @Override + public void removeXAttr(Path path, String name) throws IOException { + InodeTree.ResolveResult res = fsState.resolve(getUriPath(path), + true); + res.targetFileSystem.removeXAttr(res.remainingPath, name); + } + @Override public void setVerifyChecksum(final boolean verifyChecksum) { List> mountPoints = diff --git a/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm index 18a361d42f6..af33f597883 100644 --- a/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm +++ b/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm @@ -254,6 +254,35 @@ getfacl Returns 0 on success and non-zero on error. +getfattr + + Usage: << >>> + + Displays the extended attribute names and values (if any) for a file or + directory. + + Options: + + * -R: Recursively list the attributes for all files and directories. + + * -n name: Dump the named extended attribute value. + + * -d: Dump all extended attribute values associated with pathname. + + * -e : Encode values after retrieving them. Valid encodings are "text", "hex", and "base64". Values encoded as text strings are enclosed in double quotes ("), and values encoded as hexadecimal and base64 are prefixed with 0x and 0s, respectively. + + * : The file or directory. + + Examples: + + * <<>> + + * <<>> + + Exit Code: + + Returns 0 on success and non-zero on error. + getmerge Usage: << [addnl]>>> @@ -450,6 +479,36 @@ setfacl Returns 0 on success and non-zero on error. +setfattr + + Usage: << >>> + + Sets an extended attribute name and value for a file or directory. + + Options: + + * -b: Remove all but the base ACL entries. The entries for user, group and others are retained for compatibility with permission bits. + + * -n name: The extended attribute name. + + * -v value: The extended attribute value. There are three different encoding methods for the value. If the argument is enclosed in double quotes, then the value is the string inside the quotes. If the argument is prefixed with 0x or 0X, then it is taken as a hexadecimal number. If the argument begins with 0s or 0S, then it is taken as a base64 encoding. + + * -x name: Remove the extended attribute. + + * : The file or directory. + + Examples: + + * <<>> + + * <<>> + + * <<>> + + Exit Code: + + Returns 0 on success and non-zero on error. + setrep Usage: << >>> diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java index 76f97f2a44b..16db5b11160 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java @@ -36,6 +36,7 @@ import java.lang.reflect.Modifier; import java.util.EnumSet; import java.util.Iterator; import java.util.List; +import java.util.Map; import static org.apache.hadoop.fs.Options.ChecksumOpt; import static org.apache.hadoop.fs.Options.CreateOpts; @@ -182,6 +183,21 @@ public class TestHarFileSystem { public void setAcl(Path path, List aclSpec) throws IOException; + public void setXAttr(Path path, String name, byte[] value) + throws IOException; + + public void setXAttr(Path path, String name, byte[] value, + EnumSet flag) throws IOException; + + public byte[] getXAttr(Path path, String name) throws IOException; + + public Map getXAttrs(Path path) throws IOException; + + public Map getXAttrs(Path path, List names) + throws IOException; + + public void removeXAttr(Path path, String name) throws IOException; + public AclStatus getAclStatus(Path path) throws IOException; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestXAttrCommands.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestXAttrCommands.java new file mode 100644 index 00000000000..af0a2c352d2 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestXAttrCommands.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.shell; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintStream; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FsShell; +import org.apache.hadoop.util.ToolRunner; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestXAttrCommands { + private final ByteArrayOutputStream errContent = + new ByteArrayOutputStream(); + private Configuration conf = null; + private PrintStream initialStdErr; + + @Before + public void setup() throws IOException { + errContent.reset(); + initialStdErr = System.err; + System.setErr(new PrintStream(errContent)); + conf = new Configuration(); + } + + @After + public void cleanUp() throws Exception { + errContent.reset(); + System.setErr(initialStdErr); + } + + @Test + public void testGetfattrValidations() throws Exception { + errContent.reset(); + assertFalse("getfattr should fail without path", + 0 == runCommand(new String[] { "-getfattr", "-d"})); + assertTrue(errContent.toString().contains(" is missing")); + + errContent.reset(); + assertFalse("getfattr should fail with extra argument", + 0 == runCommand(new String[] { "-getfattr", "extra", "-d", "/test"})); + assertTrue(errContent.toString().contains("Too many arguments")); + + errContent.reset(); + assertFalse("getfattr should fail without \"-n name\" or \"-d\"", + 0 == runCommand(new String[] { "-getfattr", "/test"})); + assertTrue(errContent.toString().contains("Must specify '-n name' or '-d' option")); + + errContent.reset(); + assertFalse("getfattr should fail with invalid encoding", + 0 == runCommand(new String[] { "-getfattr", "-d", "-e", "aaa", "/test"})); + assertTrue(errContent.toString().contains("Invalid/unsupported encoding option specified: aaa")); + } + + @Test + public void testSetfattrValidations() throws Exception { + errContent.reset(); + assertFalse("setfattr should fail without path", + 0 == runCommand(new String[] { "-setfattr", "-n", "user.a1" })); + assertTrue(errContent.toString().contains(" is missing")); + + errContent.reset(); + assertFalse("setfattr should fail with extra arguments", + 0 == runCommand(new String[] { "-setfattr", "extra", "-n", "user.a1", "/test"})); + assertTrue(errContent.toString().contains("Too many arguments")); + + errContent.reset(); + assertFalse("setfattr should fail without \"-n name\" or \"-x name\"", + 0 == runCommand(new String[] { "-setfattr", "/test"})); + assertTrue(errContent.toString().contains("Must specify '-n name' or '-x name' option")); + } + + private int runCommand(String[] commands) throws Exception { + return ToolRunner.run(conf, new FsShell(), commands); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 28f0f132acc..fea55d7cc01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -350,6 +350,69 @@ Release 2.5.0 - UNRELEASED HDFS-6503. Fix typo of DFSAdmin restoreFailedStorage. (Zesheng Wu via wheat9) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS + + HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) + + HDFS-6302. Implement XAttr as a INode feature. (Yi Liu via umamahesh) + + HDFS-6309. Javadocs for Xattrs apis in DFSClient and other minor fixups. (Charles Lamb via umamahesh) + + HDFS-6258. Namenode server-side storage for XAttrs. (Yi Liu via umamahesh) + + HDFS-6303. HDFS implementation of FileContext API for XAttrs. (Yi Liu and Charles Lamb via umamahesh) + + HDFS-6324. Shift XAttr helper code out for reuse. (Yi Liu via umamahesh) + + HDFS-6301. NameNode: persist XAttrs in fsimage and record XAttrs modifications to edit log. + (Yi Liu via umamahesh) + + HDFS-6298. XML based End-to-End test for getfattr and setfattr commands. (Yi Liu via umamahesh) + + HDFS-6314. Test cases for XAttrs. (Yi Liu via umamahesh) + + HDFS-6344. Maximum limit on the size of an xattr. (Yi Liu via umamahesh) + + HDFS-6377. Unify xattr name and value limits into a single limit. (wang) + + HDFS-6373. Remove support for extended attributes on symlinks. (Charles Lamb via wang) + + HDFS-6283. Write end user documentation for xattrs. (wang) + + HDFS-6412. Interface audience and stability annotations missing from + several new classes related to xattrs. (wang) + + HDFS-6259. Support extended attributes via WebHDFS. (yliu) + + HDFS-6346. Optimize OP_SET_XATTRS by persisting single Xattr entry per setXattr/removeXattr api call + (Yi Liu via umamahesh) + + HDFS-6331. ClientProtocol#setXattr should not be annotated idempotent. + (umamahesh via wang) + + HDFS-6335. TestOfflineEditsViewer for XAttr. (Yi Liu via umamahesh) + + HDFS-6343. fix TestNamenodeRetryCache and TestRetryCacheWithHA failures. (umamahesh) + + HDFS-6366. FsImage loading failed with RemoveXattr op (umamahesh) + + HDFS-6357. SetXattr should persist rpcIDs for handling retrycache with Namenode restart and HA + (umamahesh) + + HDFS-6372. Handle setXattr rpcIDs for OfflineEditsViewer. (umamahesh) + + HDFS-6410. DFSClient unwraps AclException in xattr methods, but those + methods cannot throw AclException. (wang) + + HDFS-6413. xattr names erroneously handled as case-insensitive. + (Charles Lamb via cnauroth) + + HDFS-6414. xattr modification operations are based on state of latest + snapshot instead of current version of inode. (Andrew Wang via cnauroth) + + HDFS-6374. setXAttr should require the user to be the owner of the file + or directory (Charles Lamb via wang) + Release 2.4.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 362b39f44b2..187da265146 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -434,6 +434,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> NamenodeProtocol.proto QJournalProtocol.proto acl.proto + xattr.proto datatransfer.proto fsimage.proto hdfs.proto diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java index 78da4647df3..e308a966f55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -25,6 +25,7 @@ import java.net.URISyntaxException; import java.util.ArrayList; import java.util.EnumSet; import java.util.List; +import java.util.Map; import java.util.NoSuchElementException; import org.apache.hadoop.classification.InterfaceAudience; @@ -414,6 +415,33 @@ public class Hdfs extends AbstractFileSystem { public AclStatus getAclStatus(Path path) throws IOException { return dfs.getAclStatus(getUriPath(path)); } + + @Override + public void setXAttr(Path path, String name, byte[] value, + EnumSet flag) throws IOException { + dfs.setXAttr(getUriPath(path), name, value, flag); + } + + @Override + public byte[] getXAttr(Path path, String name) throws IOException { + return dfs.getXAttr(getUriPath(path), name); + } + + @Override + public Map getXAttrs(Path path) throws IOException { + return dfs.getXAttrs(getUriPath(path)); + } + + @Override + public Map getXAttrs(Path path, List names) + throws IOException { + return dfs.getXAttrs(getUriPath(path), names); + } + + @Override + public void removeXAttr(Path path, String name) throws IOException { + dfs.removeXAttr(getUriPath(path), name); + } /** * Renew an existing delegation token. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java new file mode 100644 index 00000000000..35a768062f6 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java @@ -0,0 +1,149 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.util.Arrays; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * XAttr is the POSIX Extended Attribute model similar to that found in + * traditional Operating Systems. Extended Attributes consist of one + * or more name/value pairs associated with a file or directory. Four + * namespaces are defined: user, trusted, security and system. + * 1) USER namespace attributes may be used by any user to store + * arbitrary information. Access permissions in this namespace are + * defined by a file directory's permission bits. + *
+ * 2) TRUSTED namespace attributes are only visible and accessible to + * privileged users (a file or directory's owner or the fs + * admin). This namespace is available from both user space + * (filesystem API) and fs kernel. + *
+ * 3) SYSTEM namespace attributes are used by the fs kernel to store + * system objects. This namespace is only available in the fs + * kernel. It is not visible to users. + *
+ * 4) SECURITY namespace attributes are used by the fs kernel for + * security features. It is not visible to users. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + */ +@InterfaceAudience.Private +public class XAttr { + + public static enum NameSpace { + USER, + TRUSTED, + SECURITY, + SYSTEM; + } + + private final NameSpace ns; + private final String name; + private final byte[] value; + + public static class Builder { + private NameSpace ns = NameSpace.USER; + private String name; + private byte[] value; + + public Builder setNameSpace(NameSpace ns) { + this.ns = ns; + return this; + } + + public Builder setName(String name) { + this.name = name; + return this; + } + + public Builder setValue(byte[] value) { + this.value = value; + return this; + } + + public XAttr build() { + return new XAttr(ns, name, value); + } + } + + private XAttr(NameSpace ns, String name, byte[] value) { + this.ns = ns; + this.name = name; + this.value = value; + } + + public NameSpace getNameSpace() { + return ns; + } + + public String getName() { + return name; + } + + public byte[] getValue() { + return value; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((name == null) ? 0 : name.hashCode()); + result = prime * result + ((ns == null) ? 0 : ns.hashCode()); + result = prime * result + Arrays.hashCode(value); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + XAttr other = (XAttr) obj; + if (name == null) { + if (other.name != null) { + return false; + } + } else if (!name.equals(other.name)) { + return false; + } + if (ns != other.ns) { + return false; + } + if (!Arrays.equals(value, other.value)) { + return false; + } + return true; + } + + @Override + public String toString() { + return "XAttr [ns=" + ns + ", name=" + name + ", value=" + + Arrays.toString(value) + "]"; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index b317a76752c..a8f1a4589cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -112,6 +112,8 @@ import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; @@ -2790,6 +2792,72 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory { UnresolvedPathException.class); } } + + public void setXAttr(String src, String name, byte[] value, + EnumSet flag) throws IOException { + checkOpen(); + try { + namenode.setXAttr(src, XAttrHelper.buildXAttr(name, value), flag); + } catch (RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + FileNotFoundException.class, + NSQuotaExceededException.class, + SafeModeException.class, + SnapshotAccessControlException.class, + UnresolvedPathException.class); + } + } + + public byte[] getXAttr(String src, String name) throws IOException { + checkOpen(); + try { + final List xAttrs = XAttrHelper.buildXAttrAsList(name); + final List result = namenode.getXAttrs(src, xAttrs); + return XAttrHelper.getFirstXAttrValue(result); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + FileNotFoundException.class, + UnresolvedPathException.class); + } + } + + public Map getXAttrs(String src) throws IOException { + checkOpen(); + try { + return XAttrHelper.buildXAttrMap(namenode.getXAttrs(src, null)); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + FileNotFoundException.class, + UnresolvedPathException.class); + } + } + + public Map getXAttrs(String src, List names) + throws IOException { + checkOpen(); + try { + return XAttrHelper.buildXAttrMap(namenode.getXAttrs( + src, XAttrHelper.buildXAttrs(names))); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + FileNotFoundException.class, + UnresolvedPathException.class); + } + } + + public void removeXAttr(String src, String name) throws IOException { + checkOpen(); + try { + namenode.removeXAttr(src, XAttrHelper.buildXAttr(name)); + } catch(RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + FileNotFoundException.class, + NSQuotaExceededException.class, + SafeModeException.class, + SnapshotAccessControlException.class, + UnresolvedPathException.class); + } + } @Override // RemotePeerFactory public Peer newConnectedPeer(InetSocketAddress addr) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index a19fbfe55fe..4ceec4a871e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -194,6 +194,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = "supergroup"; public static final String DFS_NAMENODE_ACLS_ENABLED_KEY = "dfs.namenode.acls.enabled"; public static final boolean DFS_NAMENODE_ACLS_ENABLED_DEFAULT = false; + public static final String DFS_NAMENODE_XATTRS_ENABLED_KEY = "dfs.namenode.xattrs.enabled"; + public static final boolean DFS_NAMENODE_XATTRS_ENABLED_DEFAULT = true; public static final String DFS_ADMIN = "dfs.cluster.administrators"; public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.https.server.keystore.resource"; public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml"; @@ -297,6 +299,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final long DFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT = 1024*1024; public static final String DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY = "dfs.namenode.fs-limits.max-blocks-per-file"; public static final long DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT = 1024*1024; + public static final String DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY = "dfs.namenode.fs-limits.max-xattrs-per-inode"; + public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32; + public static final String DFS_NAMENODE_MAX_XATTR_SIZE_KEY = "dfs.namenode.fs-limits.max-xattr-size"; + public static final int DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT = 16384; + //Following keys have no defaults public static final String DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 37beb3416ea..c6549f5e7d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -25,6 +25,7 @@ import java.net.URI; import java.util.ArrayList; import java.util.EnumSet; import java.util.List; +import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -46,6 +47,7 @@ import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; @@ -1840,4 +1842,91 @@ public class DistributedFileSystem extends FileSystem { } }.resolve(this, absF); } + + @Override + public void setXAttr(Path path, final String name, final byte[] value, + final EnumSet flag) throws IOException { + Path absF = fixRelativePart(path); + new FileSystemLinkResolver() { + + @Override + public Void doCall(final Path p) throws IOException { + dfs.setXAttr(getPathName(p), name, value, flag); + return null; + } + + @Override + public Void next(final FileSystem fs, final Path p) throws IOException { + fs.setXAttr(p, name, value, flag); + return null; + } + }.resolve(this, absF); + } + + @Override + public byte[] getXAttr(Path path, final String name) throws IOException { + final Path absF = fixRelativePart(path); + return new FileSystemLinkResolver() { + @Override + public byte[] doCall(final Path p) throws IOException { + return dfs.getXAttr(getPathName(p), name); + } + @Override + public byte[] next(final FileSystem fs, final Path p) + throws IOException, UnresolvedLinkException { + return fs.getXAttr(p, name); + } + }.resolve(this, absF); + } + + @Override + public Map getXAttrs(Path path) throws IOException { + final Path absF = fixRelativePart(path); + return new FileSystemLinkResolver>() { + @Override + public Map doCall(final Path p) throws IOException { + return dfs.getXAttrs(getPathName(p)); + } + @Override + public Map next(final FileSystem fs, final Path p) + throws IOException, UnresolvedLinkException { + return fs.getXAttrs(p); + } + }.resolve(this, absF); + } + + @Override + public Map getXAttrs(Path path, final List names) + throws IOException { + final Path absF = fixRelativePart(path); + return new FileSystemLinkResolver>() { + @Override + public Map doCall(final Path p) throws IOException { + return dfs.getXAttrs(getPathName(p), names); + } + @Override + public Map next(final FileSystem fs, final Path p) + throws IOException, UnresolvedLinkException { + return fs.getXAttrs(p, names); + } + }.resolve(this, absF); + } + + @Override + public void removeXAttr(Path path, final String name) throws IOException { + Path absF = fixRelativePart(path); + new FileSystemLinkResolver() { + @Override + public Void doCall(final Path p) throws IOException { + dfs.removeXAttr(getPathName(p), name); + return null; + } + + @Override + public Void next(final FileSystem fs, final Path p) throws IOException { + fs.removeXAttr(p, name); + return null; + } + }.resolve(this, absF); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java new file mode 100644 index 00000000000..82b3f7cc300 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java @@ -0,0 +1,164 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttr.NameSpace; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +@InterfaceAudience.Private +public class XAttrHelper { + + /** + * Build XAttr from xattr name with prefix. + */ + public static XAttr buildXAttr(String name) { + return buildXAttr(name, null); + } + + /** + * Build XAttr from name with prefix and value. + * Name can not be null. Value can be null. The name and prefix + * are validated. + * Both name and namespace are case sensitive. + */ + public static XAttr buildXAttr(String name, byte[] value) { + Preconditions.checkNotNull(name, "XAttr name cannot be null."); + + final int prefixIndex = name.indexOf("."); + if (prefixIndex < 4) {// Prefix length is at least 4. + throw new HadoopIllegalArgumentException("An XAttr name must be " + + "prefixed with user/trusted/security/system, followed by a '.'"); + } else if (prefixIndex == name.length() - 1) { + throw new HadoopIllegalArgumentException("XAttr name cannot be empty."); + } + + NameSpace ns; + final String prefix = name.substring(0, prefixIndex).toLowerCase(); + if (prefix.equals(NameSpace.USER.toString().toLowerCase())) { + ns = NameSpace.USER; + } else if (prefix.equals(NameSpace.TRUSTED.toString().toLowerCase())) { + ns = NameSpace.TRUSTED; + } else if (prefix.equals(NameSpace.SYSTEM.toString().toLowerCase())) { + ns = NameSpace.SYSTEM; + } else if (prefix.equals(NameSpace.SECURITY.toString().toLowerCase())) { + ns = NameSpace.SECURITY; + } else { + throw new HadoopIllegalArgumentException("An XAttr name must be " + + "prefixed with user/trusted/security/system, followed by a '.'"); + } + XAttr xAttr = (new XAttr.Builder()).setNameSpace(ns).setName(name. + substring(prefixIndex + 1)).setValue(value).build(); + + return xAttr; + } + + /** + * Build xattr name with prefix as XAttr list. + */ + public static List buildXAttrAsList(String name) { + XAttr xAttr = buildXAttr(name); + List xAttrs = Lists.newArrayListWithCapacity(1); + xAttrs.add(xAttr); + + return xAttrs; + } + + /** + * Get value of first xattr from XAttr list + */ + public static byte[] getFirstXAttrValue(List xAttrs) { + byte[] value = null; + XAttr xAttr = getFirstXAttr(xAttrs); + if (xAttr != null) { + value = xAttr.getValue(); + if (value == null) { + value = new byte[0]; // xattr exists, but no value. + } + } + return value; + } + + /** + * Get first xattr from XAttr list + */ + public static XAttr getFirstXAttr(List xAttrs) { + if (xAttrs != null && !xAttrs.isEmpty()) { + return xAttrs.get(0); + } + + return null; + } + + /** + * Build xattr map from XAttr list, the key is + * xattr name with prefix, and value is xattr value. + */ + public static Map buildXAttrMap(List xAttrs) { + if (xAttrs == null) { + return null; + } + Map xAttrMap = Maps.newHashMap(); + for (XAttr xAttr : xAttrs) { + String name = getPrefixName(xAttr); + byte[] value = xAttr.getValue(); + if (value == null) { + value = new byte[0]; + } + xAttrMap.put(name, value); + } + + return xAttrMap; + } + + /** + * Get name with prefix from XAttr + */ + public static String getPrefixName(XAttr xAttr) { + if (xAttr == null) { + return null; + } + + String namespace = xAttr.getNameSpace().toString(); + return namespace.toLowerCase() + "." + xAttr.getName(); + } + + /** + * Build XAttr list from xattr name list. + */ + public static List buildXAttrs(List names) { + if (names == null || names.isEmpty()) { + throw new HadoopIllegalArgumentException("XAttr names can not be " + + "null or empty."); + } + + List xAttrs = Lists.newArrayListWithCapacity(names.size()); + for (String name : names) { + xAttrs.add(buildXAttr(name, null)); + } + return xAttrs; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 59f38147c45..c97a8c800a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -31,10 +31,12 @@ import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; @@ -1254,4 +1256,66 @@ public interface ClientProtocol { */ @Idempotent public AclStatus getAclStatus(String src) throws IOException; + + /** + * Set xattr of a file or directory. + * A regular user only can set xattr of "user" namespace. + * A super user can set xattr of "user" and "trusted" namespace. + * XAttr of "security" and "system" namespace is only used/exposed + * internally to the FS impl. + *

+ * For xattr of "user" namespace, its access permissions are + * defined by the file or directory permission bits. + * XAttr will be set only when login user has correct permissions. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * @param src file or directory + * @param xAttr XAttr to set + * @param flag set flag + * @throws IOException + */ + @AtMostOnce + public void setXAttr(String src, XAttr xAttr, EnumSet flag) + throws IOException; + + /** + * Get xattrs of file or directory. Values in xAttrs parameter are ignored. + * If xattrs is null or empty, equals getting all xattrs of the file or + * directory. + * Only xattrs which login user has correct permissions will be returned. + *

+ * A regular user only can get xattr of "user" namespace. + * A super user can get xattr of "user" and "trusted" namespace. + * XAttr of "security" and "system" namespace is only used/exposed + * internally to the FS impl. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * @param src file or directory + * @param xAttrs xAttrs to get + * @return List XAttr list + * @throws IOException + */ + @Idempotent + public List getXAttrs(String src, List xAttrs) + throws IOException; + + /** + * Remove xattr of a file or directory.Value in xAttr parameter is ignored. + * Name must be prefixed with user/trusted/security/system. + *

+ * A regular user only can remove xattr of "user" namespace. + * A super user can remove xattr of "user" and "trusted" namespace. + * XAttr of "security" and "system" namespace is only used/exposed + * internally to the FS impl. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * @param src file or directory + * @param xAttr XAttr to remove + * @throws IOException + */ + @Idempotent + public void removeXAttr(String src, XAttr xAttr) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 0795d2e8a79..2b19669d6aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -174,6 +174,12 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; +import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrResponseProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.namenode.INodeId; @@ -302,6 +308,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements private static final RemoveAclResponseProto VOID_REMOVEACL_RESPONSE = RemoveAclResponseProto.getDefaultInstance(); + + private static final SetXAttrResponseProto + VOID_SETXATTR_RESPONSE = SetXAttrResponseProto.getDefaultInstance(); + + private static final RemoveXAttrResponseProto + VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance(); /** * Constructor @@ -1262,4 +1274,38 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements throw new ServiceException(e); } } + + @Override + public SetXAttrResponseProto setXAttr(RpcController controller, + SetXAttrRequestProto req) throws ServiceException { + try { + server.setXAttr(req.getSrc(), PBHelper.convertXAttr(req.getXAttr()), + PBHelper.convert(req.getFlag())); + } catch (IOException e) { + throw new ServiceException(e); + } + return VOID_SETXATTR_RESPONSE; + } + + @Override + public GetXAttrsResponseProto getXAttrs(RpcController controller, + GetXAttrsRequestProto req) throws ServiceException { + try { + return PBHelper.convertXAttrsResponse(server.getXAttrs(req.getSrc(), + PBHelper.convertXAttrs(req.getXAttrsList()))); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public RemoveXAttrResponseProto removeXAttr(RpcController controller, + RemoveXAttrRequestProto req) throws ServiceException { + try { + server.removeXAttr(req.getSrc(), PBHelper.convertXAttr(req.getXAttr())); + } catch (IOException e) { + throw new ServiceException(e); + } + return VOID_REMOVEXATTR_RESPONSE; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index f87bf569441..57a46515356 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -35,6 +35,8 @@ import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; @@ -141,6 +143,9 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSaf import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; @@ -1268,4 +1273,47 @@ public class ClientNamenodeProtocolTranslatorPB implements throw ProtobufHelper.getRemoteException(e); } } + + @Override + public void setXAttr(String src, XAttr xAttr, EnumSet flag) + throws IOException { + SetXAttrRequestProto req = SetXAttrRequestProto.newBuilder() + .setSrc(src) + .setXAttr(PBHelper.convertXAttrProto(xAttr)) + .setFlag(PBHelper.convert(flag)) + .build(); + try { + rpcProxy.setXAttr(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public List getXAttrs(String src, List xAttrs) + throws IOException { + GetXAttrsRequestProto.Builder builder = GetXAttrsRequestProto.newBuilder(); + builder.setSrc(src); + if (xAttrs != null) { + builder.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs)); + } + GetXAttrsRequestProto req = builder.build(); + try { + return PBHelper.convert(rpcProxy.getXAttrs(null, req)); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void removeXAttr(String src, XAttr xAttr) throws IOException { + RemoveXAttrRequestProto req = RemoveXAttrRequestProto + .newBuilder().setSrc(src) + .setXAttr(PBHelper.convertXAttrProto(xAttr)).build(); + try { + rpcProxy.removeXAttr(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 9faa0cdc12e..72dc3cb90d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -32,6 +32,8 @@ import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryType; @@ -149,6 +151,10 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto; import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto; +import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto; +import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.XAttrNamespaceProto; +import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrSetFlagProto; import org.apache.hadoop.hdfs.security.token.block.BlockKey; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; @@ -220,6 +226,8 @@ public class PBHelper { AclEntryType.values(); private static final FsAction[] FSACTION_VALUES = FsAction.values(); + private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES = + XAttr.NameSpace.values(); private PBHelper() { /** Hidden constructor */ @@ -2006,6 +2014,14 @@ public class PBHelper { private static AclEntryType convert(AclEntryTypeProto v) { return castEnum(v, ACL_ENTRY_TYPE_VALUES); } + + private static XAttrNamespaceProto convert(XAttr.NameSpace v) { + return XAttrNamespaceProto.valueOf(v.ordinal()); + } + + private static XAttr.NameSpace convert(XAttrNamespaceProto v) { + return castEnum(v, XATTR_NAMESPACE_VALUES); + } private static FsActionProto convert(FsAction v) { return FsActionProto.valueOf(v != null ? v.ordinal() : 0); @@ -2059,6 +2075,108 @@ public class PBHelper { .addAllEntries(convertAclEntryProto(e.getEntries())).build(); return GetAclStatusResponseProto.newBuilder().setResult(r).build(); } + + public static XAttrProto convertXAttrProto(XAttr a) { + XAttrProto.Builder builder = XAttrProto.newBuilder(); + builder.setNamespace(convert(a.getNameSpace())); + if (a.getName() != null) { + builder.setName(a.getName()); + } + if (a.getValue() != null) { + builder.setValue(getByteString(a.getValue())); + } + return builder.build(); + } + + public static List convertXAttrProto( + List xAttrSpec) { + ArrayList xAttrs = Lists.newArrayListWithCapacity( + xAttrSpec.size()); + for (XAttr a : xAttrSpec) { + XAttrProto.Builder builder = XAttrProto.newBuilder(); + builder.setNamespace(convert(a.getNameSpace())); + if (a.getName() != null) { + builder.setName(a.getName()); + } + if (a.getValue() != null) { + builder.setValue(getByteString(a.getValue())); + } + xAttrs.add(builder.build()); + } + return xAttrs; + } + + /** + * The flag field in PB is a bitmask whose values are the same a the + * emum values of XAttrSetFlag + */ + public static int convert(EnumSet flag) { + int value = 0; + if (flag.contains(XAttrSetFlag.CREATE)) { + value |= XAttrSetFlagProto.XATTR_CREATE.getNumber(); + } + if (flag.contains(XAttrSetFlag.REPLACE)) { + value |= XAttrSetFlagProto.XATTR_REPLACE.getNumber(); + } + return value; + } + + public static EnumSet convert(int flag) { + EnumSet result = + EnumSet.noneOf(XAttrSetFlag.class); + if ((flag & XAttrSetFlagProto.XATTR_CREATE_VALUE) == + XAttrSetFlagProto.XATTR_CREATE_VALUE) { + result.add(XAttrSetFlag.CREATE); + } + if ((flag & XAttrSetFlagProto.XATTR_REPLACE_VALUE) == + XAttrSetFlagProto.XATTR_REPLACE_VALUE) { + result.add(XAttrSetFlag.REPLACE); + } + return result; + } + + public static XAttr convertXAttr(XAttrProto a) { + XAttr.Builder builder = new XAttr.Builder(); + builder.setNameSpace(convert(a.getNamespace())); + if (a.hasName()) { + builder.setName(a.getName()); + } + if (a.hasValue()) { + builder.setValue(a.getValue().toByteArray()); + } + return builder.build(); + } + + public static List convertXAttrs(List xAttrSpec) { + ArrayList xAttrs = Lists.newArrayListWithCapacity(xAttrSpec.size()); + for (XAttrProto a : xAttrSpec) { + XAttr.Builder builder = new XAttr.Builder(); + builder.setNameSpace(convert(a.getNamespace())); + if (a.hasName()) { + builder.setName(a.getName()); + } + if (a.hasValue()) { + builder.setValue(a.getValue().toByteArray()); + } + xAttrs.add(builder.build()); + } + return xAttrs; + } + + public static List convert(GetXAttrsResponseProto a) { + List xAttrs = a.getXAttrsList(); + return convertXAttrs(xAttrs); + } + + public static GetXAttrsResponseProto convertXAttrsResponse( + List xAttrs) { + GetXAttrsResponseProto.Builder builder = GetXAttrsResponseProto + .newBuilder(); + if (xAttrs != null) { + builder.addAllXAttrs(convertXAttrProto(xAttrs)); + } + return builder.build(); + } public static ShortCircuitShmSlotProto convert(SlotId slotId) { return ShortCircuitShmSlotProto.newBuilder(). diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclConfigFlag.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclConfigFlag.java deleted file mode 100644 index bfc6b4d7a4c..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclConfigFlag.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.namenode; - -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.protocol.AclException; - -/** - * Support for ACLs is controlled by a configuration flag. If the configuration - * flag is false, then the NameNode will reject all ACL-related operations. - */ -final class AclConfigFlag { - private final boolean enabled; - - /** - * Creates a new AclConfigFlag from configuration. - * - * @param conf Configuration to check - */ - public AclConfigFlag(Configuration conf) { - enabled = conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, - DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_DEFAULT); - LogFactory.getLog(AclConfigFlag.class).info("ACLs enabled? " + enabled); - } - - /** - * Checks the flag on behalf of an ACL API call. - * - * @throws AclException if ACLs are disabled - */ - public void checkForApiCall() throws AclException { - if (!enabled) { - throw new AclException(String.format( - "The ACL operation has been rejected. " - + "Support for ACLs has been disabled by setting %s to false.", - DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY)); - } - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 5e2b2e6926c..b762acc62d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -24,6 +24,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.EnumSet; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; @@ -39,6 +40,8 @@ import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; @@ -47,6 +50,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; @@ -79,6 +83,7 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; /************************************************* * FSDirectory stores the filesystem directory state. @@ -125,6 +130,7 @@ public class FSDirectory implements Closeable { private final int contentCountLimit; // max content summary counts per run private final INodeMap inodeMap; // Synchronized by dirLock private long yieldCount = 0; // keep track of lock yield count. + private final int inodeXAttrsLimit; //inode xattrs max limit // lock to protect the directory and BlockMap private final ReentrantReadWriteLock dirLock; @@ -190,6 +196,12 @@ public class FSDirectory implements Closeable { this.maxDirItems = conf.getInt( DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT); + this.inodeXAttrsLimit = conf.getInt( + DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, + DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT); + Preconditions.checkArgument(this.inodeXAttrsLimit >= 0, + "Cannot set a negative limit on the number of xattrs per inode (%s).", + DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY); // We need a maximum maximum because by default, PB limits message sizes // to 64MB. This means we can only store approximately 6.7 million entries // per directory, but let's use 6.4 million for some safety. @@ -2756,6 +2768,116 @@ public class FSDirectory implements Closeable { readUnlock(); } } + + void removeXAttr(String src, XAttr xAttr) throws IOException { + writeLock(); + try { + XAttr removedXAttr = unprotectedRemoveXAttr(src, xAttr); + if (removedXAttr != null) { + fsImage.getEditLog().logRemoveXAttr(src, removedXAttr); + } else { + NameNode.stateChangeLog.info("DIR* FSDirectory.removeXAttr: XAttr " + + XAttrHelper.getPrefixName(xAttr) + + " does not exist on the path " + src); + } + } finally { + writeUnlock(); + } + } + + XAttr unprotectedRemoveXAttr(String src, + XAttr xAttr) throws IOException { + assert hasWriteLock(); + INodesInPath iip = getINodesInPath4Write(normalizePath(src), true); + INode inode = resolveLastINode(src, iip); + int snapshotId = iip.getLatestSnapshotId(); + List existingXAttrs = XAttrStorage.readINodeXAttrs(inode); + List newXAttrs = filterINodeXAttr(existingXAttrs, xAttr); + if (existingXAttrs.size() != newXAttrs.size()) { + XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId); + return xAttr; + } + return null; + } + + List filterINodeXAttr(List existingXAttrs, + XAttr xAttr) throws QuotaExceededException { + if (existingXAttrs == null || existingXAttrs.isEmpty()) { + return existingXAttrs; + } + + List xAttrs = Lists.newArrayListWithCapacity(existingXAttrs.size()); + for (XAttr a : existingXAttrs) { + if (!(a.getNameSpace() == xAttr.getNameSpace() + && a.getName().equals(xAttr.getName()))) { + xAttrs.add(a); + } + } + + return xAttrs; + } + + void setXAttr(String src, XAttr xAttr, EnumSet flag, + boolean logRetryCache) throws IOException { + writeLock(); + try { + unprotectedSetXAttr(src, xAttr, flag); + fsImage.getEditLog().logSetXAttr(src, xAttr, logRetryCache); + } finally { + writeUnlock(); + } + } + + void unprotectedSetXAttr(String src, XAttr xAttr, + EnumSet flag) throws IOException { + assert hasWriteLock(); + INodesInPath iip = getINodesInPath4Write(normalizePath(src), true); + INode inode = resolveLastINode(src, iip); + int snapshotId = iip.getLatestSnapshotId(); + List existingXAttrs = XAttrStorage.readINodeXAttrs(inode); + List newXAttrs = setINodeXAttr(existingXAttrs, xAttr, flag); + XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId); + } + + List setINodeXAttr(List existingXAttrs, XAttr xAttr, + EnumSet flag) throws QuotaExceededException, IOException { + List xAttrs = Lists.newArrayListWithCapacity( + existingXAttrs != null ? existingXAttrs.size() + 1 : 1); + boolean exist = false; + if (existingXAttrs != null) { + for (XAttr a: existingXAttrs) { + if ((a.getNameSpace() == xAttr.getNameSpace() + && a.getName().equals(xAttr.getName()))) { + exist = true; + } else { + xAttrs.add(a); + } + } + } + + XAttrSetFlag.validate(xAttr.getName(), exist, flag); + xAttrs.add(xAttr); + + if (xAttrs.size() > inodeXAttrsLimit) { + throw new IOException("Cannot add additional XAttr to inode, " + + "would exceed limit of " + inodeXAttrsLimit); + } + + return xAttrs; + } + + List getXAttrs(String src) throws IOException { + String srcs = normalizePath(src); + readLock(); + try { + INodesInPath iip = getLastINodeInPath(srcs, true); + INode inode = resolveLastINode(src, iip); + int snapshotId = iip.getPathSnapshotId(); + return XAttrStorage.readINodeXAttrs(inode, snapshotId); + } finally { + readUnlock(); + } + } private static INode resolveLastINode(String src, INodesInPath iip) throws FileNotFoundException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index a45a842714e..178e1feda32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; @@ -69,6 +70,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveXAttrOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp; @@ -81,6 +83,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp; @@ -1032,13 +1035,6 @@ public class FSEditLog implements LogsPurgeable { logEdit(op); } - void logSetAcl(String src, List entries) { - SetAclOp op = SetAclOp.getInstance(); - op.src = src; - op.aclEntries = entries; - logEdit(op); - } - void logStartRollingUpgrade(long startTime) { RollingUpgradeOp op = RollingUpgradeOp.getStartInstance(cache.get()); op.setTime(startTime); @@ -1051,6 +1047,28 @@ public class FSEditLog implements LogsPurgeable { logEdit(op); } + void logSetAcl(String src, List entries) { + SetAclOp op = SetAclOp.getInstance(); + op.src = src; + op.aclEntries = entries; + logEdit(op); + } + + void logSetXAttr(String src, XAttr xAttr, boolean toLogRpcIds) { + final SetXAttrOp op = SetXAttrOp.getInstance(); + op.src = src; + op.xAttr = xAttr; + logRpcIds(op, toLogRpcIds); + logEdit(op); + } + + void logRemoveXAttr(String src, XAttr xAttr) { + final RemoveXAttrOp op = RemoveXAttrOp.getInstance(); + op.src = src; + op.xAttr = xAttr; + logEdit(op); + } + /** * Get all the journals this edit log is currently operating on. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 94fce5a6417..2973ce3f817 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.io.InputStream; import java.util.Arrays; import java.util.EnumMap; +import java.util.EnumSet; import java.util.List; import org.apache.commons.logging.Log; @@ -32,6 +33,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -77,6 +79,8 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveXAttrOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp; @@ -802,6 +806,20 @@ public class FSEditLogLoader { fsDir.unprotectedSetAcl(setAclOp.src, setAclOp.aclEntries); break; } + case OP_SET_XATTR: { + SetXAttrOp setXAttrOp = (SetXAttrOp) op; + fsDir.unprotectedSetXAttr(setXAttrOp.src, setXAttrOp.xAttr, + EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); + if (toAddRetryCache) { + fsNamesys.addCacheEntry(setXAttrOp.rpcClientId, setXAttrOp.rpcCallId); + } + break; + } + case OP_REMOVE_XATTR: { + RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op; + fsDir.unprotectedRemoveXAttr(removeXAttrOp.src, removeXAttrOp.xAttr); + break; + } default: throw new IOException("Invalid operation read " + op.opCode); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index ac98dd110f2..a821a5b4b0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -54,6 +54,8 @@ import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_OWN import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_PERMISSIONS; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_QUOTA; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_REPLICATION; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_XATTR; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_XATTR; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_START_LOG_SEGMENT; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SYMLINK; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_TIMES; @@ -79,12 +81,14 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.Options.Rename; +import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DeprecatedUTF8; import org.apache.hadoop.hdfs.protocol.Block; @@ -95,6 +99,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEditLogProto; +import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrEditLogProto; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.util.XMLUtils; @@ -188,6 +193,8 @@ public abstract class FSEditLogOp { OP_ROLLING_UPGRADE_START, "start")); inst.put(OP_ROLLING_UPGRADE_FINALIZE, new RollingUpgradeOp( OP_ROLLING_UPGRADE_FINALIZE, "finalize")); + inst.put(OP_SET_XATTR, new SetXAttrOp()); + inst.put(OP_REMOVE_XATTR, new RemoveXAttrOp()); } public FSEditLogOp get(FSEditLogOpCodes opcode) { @@ -3492,6 +3499,95 @@ public abstract class FSEditLogOp { return builder.toString(); } } + + static class RemoveXAttrOp extends FSEditLogOp { + XAttr xAttr; + String src; + + private RemoveXAttrOp() { + super(OP_REMOVE_XATTR); + } + + static RemoveXAttrOp getInstance() { + return new RemoveXAttrOp(); + } + + @Override + void readFields(DataInputStream in, int logVersion) throws IOException { + XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in); + src = p.getSrc(); + xAttr = PBHelper.convertXAttr(p.getXAttr()); + } + + @Override + public void writeFields(DataOutputStream out) throws IOException { + XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder(); + if (src != null) { + b.setSrc(src); + } + b.setXAttr(PBHelper.convertXAttrProto(xAttr)); + b.build().writeDelimitedTo(out); + } + + @Override + protected void toXml(ContentHandler contentHandler) throws SAXException { + XMLUtils.addSaxString(contentHandler, "SRC", src); + appendXAttrToXml(contentHandler, xAttr); + } + + @Override + void fromXml(Stanza st) throws InvalidXmlException { + src = st.getValue("SRC"); + xAttr = readXAttrFromXml(st); + } + } + + static class SetXAttrOp extends FSEditLogOp { + XAttr xAttr; + String src; + + private SetXAttrOp() { + super(OP_SET_XATTR); + } + + static SetXAttrOp getInstance() { + return new SetXAttrOp(); + } + + @Override + void readFields(DataInputStream in, int logVersion) throws IOException { + XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in); + src = p.getSrc(); + xAttr = PBHelper.convertXAttr(p.getXAttr()); + readRpcIds(in, logVersion); + } + + @Override + public void writeFields(DataOutputStream out) throws IOException { + XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder(); + if (src != null) { + b.setSrc(src); + } + b.setXAttr(PBHelper.convertXAttrProto(xAttr)); + b.build().writeDelimitedTo(out); + // clientId and callId + writeRpcIds(rpcClientId, rpcCallId, out); + } + + @Override + protected void toXml(ContentHandler contentHandler) throws SAXException { + XMLUtils.addSaxString(contentHandler, "SRC", src); + appendXAttrToXml(contentHandler, xAttr); + appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId); + } + + @Override + void fromXml(Stanza st) throws InvalidXmlException { + src = st.getValue("SRC"); + xAttr = readXAttrFromXml(st); + readRpcIdsFromXml(st); + } + } static class SetAclOp extends FSEditLogOp { List aclEntries = Lists.newArrayList(); @@ -4108,4 +4204,42 @@ public abstract class FSEditLogOp { } return aclEntries; } + + private static void appendXAttrToXml(ContentHandler contentHandler, + XAttr xAttr) throws SAXException { + contentHandler.startElement("", "", "XATTR", new AttributesImpl()); + XMLUtils.addSaxString(contentHandler, "NAMESPACE", + xAttr.getNameSpace().toString()); + XMLUtils.addSaxString(contentHandler, "NAME", xAttr.getName()); + if (xAttr.getValue() != null) { + try { + XMLUtils.addSaxString(contentHandler, "VALUE", + XAttrCodec.encodeValue(xAttr.getValue(), XAttrCodec.HEX)); + } catch (IOException e) { + throw new SAXException(e); + } + } + contentHandler.endElement("", "", "XATTR"); + } + + private static XAttr readXAttrFromXml(Stanza st) + throws InvalidXmlException { + if (!st.hasChildren("XATTR")) { + return null; + } + + Stanza a = st.getChildren("XATTR").get(0); + XAttr.Builder builder = new XAttr.Builder(); + builder.setNameSpace(XAttr.NameSpace.valueOf(a.getValue("NAMESPACE"))). + setName(a.getValue("NAME")); + String v = a.getValueOrNull("VALUE"); + if (v != null) { + try { + builder.setValue(XAttrCodec.decodeValue(v)); + } catch (IOException e) { + throw new InvalidXmlException(e.toString()); + } + } + return builder.build(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java index 72c304017d6..bf4bbb4a60b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java @@ -70,6 +70,8 @@ public enum FSEditLogOpCodes { OP_SET_ACL ((byte) 40), OP_ROLLING_UPGRADE_START ((byte) 41), OP_ROLLING_UPGRADE_FINALIZE ((byte) 42), + OP_SET_XATTR ((byte) 43), + OP_REMOVE_XATTR ((byte) 44), // Note that the current range of the valid OP code is 0~127 OP_INVALID ((byte) -1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 0d83926f045..6ad0d719eef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -882,7 +882,7 @@ public class FSImageFormat { final long preferredBlockSize = in.readLong(); return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime, - accessTime, replication, preferredBlockSize); + accessTime, replication, preferredBlockSize, null); } public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in) @@ -902,10 +902,10 @@ public class FSImageFormat { final long nsQuota = in.readLong(); final long dsQuota = in.readLong(); - return nsQuota == -1L && dsQuota == -1L? - new INodeDirectoryAttributes.SnapshotCopy(name, permissions, null, modificationTime) + return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy( + name, permissions, null, modificationTime, null) : new INodeDirectoryAttributes.CopyWithQuota(name, permissions, - null, modificationTime, nsQuota, dsQuota); + null, modificationTime, nsQuota, dsQuota, null); } private void loadFilesUnderConstruction(DataInput in, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java index 3c3ca49fc4b..2a4f2141d97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; import org.apache.hadoop.hdfs.protocolPB.PBHelper; @@ -49,7 +50,10 @@ import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructio import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto; +import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto; +import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; +import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; import org.apache.hadoop.hdfs.util.ReadOnlyList; import com.google.common.base.Preconditions; @@ -75,6 +79,14 @@ public final class FSImageFormatPBINode { .values(); private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType .values(); + + private static final int XATTR_NAMESPACE_MASK = 3; + private static final int XATTR_NAMESPACE_OFFSET = 30; + private static final int XATTR_NAME_MASK = (1 << 24) - 1; + private static final int XATTR_NAME_OFFSET = 6; + private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES = + XAttr.NameSpace.values(); + public final static class Loader { public static PermissionStatus loadPermission(long id, @@ -102,6 +114,25 @@ public final class FSImageFormatPBINode { } return b.build(); } + + public static ImmutableList loadXAttrs( + XAttrFeatureProto proto, final String[] stringTable) { + ImmutableList.Builder b = ImmutableList.builder(); + for (XAttrCompactProto xAttrCompactProto : proto.getXAttrsList()) { + int v = xAttrCompactProto.getName(); + int nid = (v >> XATTR_NAME_OFFSET) & XATTR_NAME_MASK; + int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK; + String name = stringTable[nid]; + byte[] value = null; + if (xAttrCompactProto.getValue() != null) { + value = xAttrCompactProto.getValue().toByteArray(); + } + b.add(new XAttr.Builder().setNameSpace(XATTR_NAMESPACE_VALUES[ns]) + .setName(name).setValue(value).build()); + } + + return b.build(); + } public static INodeDirectory loadINodeDirectory(INodeSection.INode n, LoaderContext state) { @@ -122,6 +153,10 @@ public final class FSImageFormatPBINode { dir.addAclFeature(new AclFeature(loadAclEntries(d.getAcl(), state.getStringTable()))); } + if (d.hasXAttrs()) { + dir.addXAttrFeature(new XAttrFeature( + loadXAttrs(d.getXAttrs(), state.getStringTable()))); + } return dir; } @@ -254,6 +289,11 @@ public final class FSImageFormatPBINode { file.addAclFeature(new AclFeature(loadAclEntries(f.getAcl(), state.getStringTable()))); } + + if (f.hasXAttrs()) { + file.addXAttrFeature(new XAttrFeature( + loadXAttrs(f.getXAttrs(), state.getStringTable()))); + } // under-construction information if (f.hasFileUC()) { @@ -292,6 +332,11 @@ public final class FSImageFormatPBINode { } dir.rootDir.cloneModificationTime(root); dir.rootDir.clonePermissionStatus(root); + // root dir supports having extended attributes according to POSIX + final XAttrFeature f = root.getXAttrFeature(); + if (f != null) { + dir.rootDir.addXAttrFeature(f); + } } } @@ -317,6 +362,26 @@ public final class FSImageFormatPBINode { } return b; } + + private static XAttrFeatureProto.Builder buildXAttrs(XAttrFeature f, + final SaverContext.DeduplicationMap stringMap) { + XAttrFeatureProto.Builder b = XAttrFeatureProto.newBuilder(); + for (XAttr a : f.getXAttrs()) { + XAttrCompactProto.Builder xAttrCompactBuilder = XAttrCompactProto. + newBuilder(); + int v = ((a.getNameSpace().ordinal() & XATTR_NAMESPACE_MASK) << + XATTR_NAMESPACE_OFFSET) + | ((stringMap.getId(a.getName()) & XATTR_NAME_MASK) << + XATTR_NAME_OFFSET); + xAttrCompactBuilder.setName(v); + if (a.getValue() != null) { + xAttrCompactBuilder.setValue(PBHelper.getByteString(a.getValue())); + } + b.addXAttrs(xAttrCompactBuilder.build()); + } + + return b; + } public static INodeSection.INodeFile.Builder buildINodeFile( INodeFileAttributes file, final SaverContext state) { @@ -331,6 +396,10 @@ public final class FSImageFormatPBINode { if (f != null) { b.setAcl(buildAclEntries(f, state.getStringMap())); } + XAttrFeature xAttrFeature = file.getXAttrFeature(); + if (xAttrFeature != null) { + b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap())); + } return b; } @@ -347,6 +416,10 @@ public final class FSImageFormatPBINode { if (f != null) { b.setAcl(buildAclEntries(f, state.getStringMap())); } + XAttrFeature xAttrFeature = dir.getXAttrFeature(); + if (xAttrFeature != null) { + b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap())); + } return b; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 1da856908f2..4e9f3fd0fd6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -134,6 +134,8 @@ import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; @@ -497,7 +499,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, private final RetryCache retryCache; - private final AclConfigFlag aclConfigFlag; + private final NNConf nnConf; /** * Set the last allocated inode id when fsimage or editlog is loaded. @@ -766,7 +768,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, this.isDefaultAuditLogger = auditLoggers.size() == 1 && auditLoggers.get(0) instanceof DefaultAuditLogger; this.retryCache = ignoreRetryCache ? null : initRetryCache(conf); - this.aclConfigFlag = new AclConfigFlag(conf); + this.nnConf = new NNConf(conf); } catch(IOException e) { LOG.error(getClass().getSimpleName() + " initialization failed.", e); close(); @@ -1103,8 +1105,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, // so that the tailer starts from the right spot. dir.fsImage.updateLastAppliedTxIdFromWritten(); } - cacheManager.stopMonitorThread(); - cacheManager.clearDirectiveStats(); + if (cacheManager != null) { + cacheManager.stopMonitorThread(); + cacheManager.clearDirectiveStats(); + } blockManager.getDatanodeManager().clearPendingCachingCommands(); blockManager.getDatanodeManager().setShouldSendCachingCommands(false); // Don't want to keep replication queues when not in Active. @@ -7770,7 +7774,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } void modifyAclEntries(String src, List aclSpec) throws IOException { - aclConfigFlag.checkForApiCall(); + nnConf.checkAclsConfigFlag(); HdfsFileStatus resultingStat = null; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); @@ -7792,7 +7796,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } void removeAclEntries(String src, List aclSpec) throws IOException { - aclConfigFlag.checkForApiCall(); + nnConf.checkAclsConfigFlag(); HdfsFileStatus resultingStat = null; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); @@ -7814,7 +7818,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } void removeDefaultAcl(String src) throws IOException { - aclConfigFlag.checkForApiCall(); + nnConf.checkAclsConfigFlag(); HdfsFileStatus resultingStat = null; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); @@ -7836,7 +7840,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } void removeAcl(String src) throws IOException { - aclConfigFlag.checkForApiCall(); + nnConf.checkAclsConfigFlag(); HdfsFileStatus resultingStat = null; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); @@ -7858,7 +7862,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } void setAcl(String src, List aclSpec) throws IOException { - aclConfigFlag.checkForApiCall(); + nnConf.checkAclsConfigFlag(); HdfsFileStatus resultingStat = null; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); @@ -7880,7 +7884,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } AclStatus getAclStatus(String src) throws IOException { - aclConfigFlag.checkForApiCall(); + nnConf.checkAclsConfigFlag(); FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); readLock(); @@ -7894,6 +7898,167 @@ public class FSNamesystem implements Namesystem, FSClusterStats, readUnlock(); } } + + /** + * Set xattr for a file or directory. + * + * @param src + * - path on which it sets the xattr + * @param xAttr + * - xAttr details to set + * @param flag + * - xAttrs flags + * @throws AccessControlException + * @throws SafeModeException + * @throws UnresolvedLinkException + * @throws IOException + */ + void setXAttr(String src, XAttr xAttr, EnumSet flag) + throws AccessControlException, SafeModeException, + UnresolvedLinkException, IOException { + CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); + if (cacheEntry != null && cacheEntry.isSuccess()) { + return; // Return previous response + } + boolean success = false; + try { + setXAttrInt(src, xAttr, flag, cacheEntry != null); + success = true; + } catch (AccessControlException e) { + logAuditEvent(false, "setXAttr", src); + throw e; + } finally { + RetryCache.setState(cacheEntry, success); + } + } + + private void setXAttrInt(String src, XAttr xAttr, EnumSet flag, + boolean logRetryCache) throws IOException { + nnConf.checkXAttrsConfigFlag(); + checkXAttrSize(xAttr); + HdfsFileStatus resultingStat = null; + FSPermissionChecker pc = getPermissionChecker(); + XAttrPermissionFilter.checkPermissionForApi(pc, xAttr); + checkOperation(OperationCategory.WRITE); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + writeLock(); + try { + checkOperation(OperationCategory.WRITE); + checkNameNodeSafeMode("Cannot set XAttr on " + src); + src = FSDirectory.resolvePath(src, pathComponents, dir); + if (isPermissionEnabled) { + checkOwner(pc, src); + checkPathAccess(pc, src, FsAction.WRITE); + } + dir.setXAttr(src, xAttr, flag, logRetryCache); + resultingStat = getAuditFileInfo(src, false); + } finally { + writeUnlock(); + } + getEditLog().logSync(); + logAuditEvent(true, "setXAttr", src, null, resultingStat); + } + + /** + * Verifies that the combined size of the name and value of an xattr is within + * the configured limit. Setting a limit of zero disables this check. + */ + private void checkXAttrSize(XAttr xAttr) { + if (nnConf.xattrMaxSize == 0) { + return; + } + int size = xAttr.getName().getBytes(Charsets.UTF_8).length; + if (xAttr.getValue() != null) { + size += xAttr.getValue().length; + } + if (size > nnConf.xattrMaxSize) { + throw new HadoopIllegalArgumentException( + "The XAttr is too big. The maximum combined size of the" + + " name and value is " + nnConf.xattrMaxSize + + ", but the total size is " + size); + } + } + + List getXAttrs(String src, List xAttrs) throws IOException { + nnConf.checkXAttrsConfigFlag(); + FSPermissionChecker pc = getPermissionChecker(); + boolean getAll = xAttrs == null || xAttrs.isEmpty(); + List filteredXAttrs = null; + if (!getAll) { + filteredXAttrs = XAttrPermissionFilter.filterXAttrsForApi(pc, xAttrs); + if (filteredXAttrs.isEmpty()) { + return filteredXAttrs; + } + } + checkOperation(OperationCategory.READ); + readLock(); + try { + checkOperation(OperationCategory.READ); + if (isPermissionEnabled) { + checkPathAccess(pc, src, FsAction.READ); + } + List all = dir.getXAttrs(src); + List filteredAll = XAttrPermissionFilter. + filterXAttrsForApi(pc, all); + if (getAll) { + return filteredAll; + } else { + if (filteredAll == null || filteredAll.isEmpty()) { + return null; + } + List toGet = Lists.newArrayListWithCapacity(filteredXAttrs.size()); + for (XAttr xAttr : filteredXAttrs) { + for (XAttr a : filteredAll) { + if (xAttr.getNameSpace() == a.getNameSpace() + && xAttr.getName().equals(a.getName())) { + toGet.add(a); + break; + } + } + } + return toGet; + } + } catch (AccessControlException e) { + logAuditEvent(false, "getXAttrs", src); + throw e; + } finally { + readUnlock(); + } + } + + void removeXAttr(String src, XAttr xAttr) throws IOException { + nnConf.checkXAttrsConfigFlag(); + HdfsFileStatus resultingStat = null; + FSPermissionChecker pc = getPermissionChecker(); + try { + XAttrPermissionFilter.checkPermissionForApi(pc, xAttr); + } catch (AccessControlException e) { + logAuditEvent(false, "removeXAttr", src); + throw e; + } + checkOperation(OperationCategory.WRITE); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + writeLock(); + try { + checkOperation(OperationCategory.WRITE); + checkNameNodeSafeMode("Cannot remove XAttr entry on " + src); + src = FSDirectory.resolvePath(src, pathComponents, dir); + if (isPermissionEnabled) { + checkOwner(pc, src); + checkPathAccess(pc, src, FsAction.WRITE); + } + + dir.removeXAttr(src, xAttr); + resultingStat = getAuditFileInfo(src, false); + } catch (AccessControlException e) { + logAuditEvent(false, "removeXAttr", src); + throw e; + } finally { + writeUnlock(); + } + getEditLog().logSync(); + logAuditEvent(true, "removeXAttr", src, null, resultingStat); + } /** * Default AuditLogger implementation; used when no access logger is @@ -7980,6 +8145,5 @@ public class FSNamesystem implements Namesystem, FSClusterStats, logger.addAppender(asyncAppender); } } - } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index b62670842d7..b1e4982165e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -177,6 +177,44 @@ public abstract class INode implements INodeAttributes, Diff.Element { nodeToUpdate.removeAclFeature(); return nodeToUpdate; } + + /** + * @param snapshotId + * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result + * from the given snapshot; otherwise, get the result from the + * current inode. + * @return XAttrFeature + */ + abstract XAttrFeature getXAttrFeature(int snapshotId); + + @Override + public final XAttrFeature getXAttrFeature() { + return getXAttrFeature(Snapshot.CURRENT_STATE_ID); + } + + /** + * Set XAttrFeature + */ + abstract void addXAttrFeature(XAttrFeature xAttrFeature); + + final INode addXAttrFeature(XAttrFeature xAttrFeature, int latestSnapshotId) + throws QuotaExceededException { + final INode nodeToUpdate = recordModification(latestSnapshotId); + nodeToUpdate.addXAttrFeature(xAttrFeature); + return nodeToUpdate; + } + + /** + * Remove XAttrFeature + */ + abstract void removeXAttrFeature(); + + final INode removeXAttrFeature(int lastestSnapshotId) + throws QuotaExceededException { + final INode nodeToUpdate = recordModification(lastestSnapshotId); + nodeToUpdate.removeXAttrFeature(); + return nodeToUpdate; + } /** * @return if the given snapshot id is {@link Snapshot#CURRENT_STATE_ID}, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java index b6c9b981082..09abf272ccb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.server.namenode.INodeWithAdditionalFields.PermissionStatusFormat; +import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; /** * The attributes of an inode. @@ -50,6 +51,9 @@ public interface INodeAttributes { /** @return the ACL feature. */ public AclFeature getAclFeature(); + + /** @return the XAttrs feature. */ + public XAttrFeature getXAttrFeature(); /** @return the modification time. */ public long getModificationTime(); @@ -64,14 +68,17 @@ public interface INodeAttributes { private final AclFeature aclFeature; private final long modificationTime; private final long accessTime; + private XAttrFeature xAttrFeature; SnapshotCopy(byte[] name, PermissionStatus permissions, - AclFeature aclFeature, long modificationTime, long accessTime) { + AclFeature aclFeature, long modificationTime, long accessTime, + XAttrFeature xAttrFeature) { this.name = name; this.permission = PermissionStatusFormat.toLong(permissions); this.aclFeature = aclFeature; this.modificationTime = modificationTime; this.accessTime = accessTime; + this.xAttrFeature = xAttrFeature; } SnapshotCopy(INode inode) { @@ -80,6 +87,7 @@ public interface INodeAttributes { this.aclFeature = inode.getAclFeature(); this.modificationTime = inode.getModificationTime(); this.accessTime = inode.getAccessTime(); + this.xAttrFeature = inode.getXAttrFeature(); } @Override @@ -128,5 +136,10 @@ public interface INodeAttributes { public final long getAccessTime() { return accessTime; } + + @Override + public final XAttrFeature getXAttrFeature() { + return xAttrFeature; + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java index 861e85226d8..d95fa4651b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; import com.google.common.base.Preconditions; @@ -35,8 +36,9 @@ public interface INodeDirectoryAttributes extends INodeAttributes { public static class SnapshotCopy extends INodeAttributes.SnapshotCopy implements INodeDirectoryAttributes { public SnapshotCopy(byte[] name, PermissionStatus permissions, - AclFeature aclFeature, long modificationTime) { - super(name, permissions, aclFeature, modificationTime, 0L); + AclFeature aclFeature, long modificationTime, + XAttrFeature xAttrsFeature) { + super(name, permissions, aclFeature, modificationTime, 0L, xAttrsFeature); } public SnapshotCopy(INodeDirectory dir) { @@ -63,8 +65,8 @@ public interface INodeDirectoryAttributes extends INodeAttributes { public CopyWithQuota(byte[] name, PermissionStatus permissions, AclFeature aclFeature, long modificationTime, long nsQuota, - long dsQuota) { - super(name, permissions, aclFeature, modificationTime); + long dsQuota, XAttrFeature xAttrsFeature) { + super(name, permissions, aclFeature, modificationTime, xAttrsFeature); this.nsQuota = nsQuota; this.dsQuota = dsQuota; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java index 7e656f17f15..127642506ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat; +import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; /** * The attributes of a file. @@ -42,8 +43,9 @@ public interface INodeFileAttributes extends INodeAttributes { public SnapshotCopy(byte[] name, PermissionStatus permissions, AclFeature aclFeature, long modificationTime, long accessTime, - short replication, long preferredBlockSize) { - super(name, permissions, aclFeature, modificationTime, accessTime); + short replication, long preferredBlockSize, XAttrFeature xAttrsFeature) { + super(name, permissions, aclFeature, modificationTime, accessTime, + xAttrsFeature); final long h = HeaderFormat.combineReplication(0L, replication); header = HeaderFormat.combinePreferredBlockSize(h, preferredBlockSize); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index eba7cd43033..461f075a0b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; +import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; import com.google.common.base.Preconditions; @@ -228,6 +229,21 @@ public abstract class INodeReference extends INode { final void removeAclFeature() { referred.removeAclFeature(); } + + @Override + final XAttrFeature getXAttrFeature(int snapshotId) { + return referred.getXAttrFeature(snapshotId); + } + + @Override + final void addXAttrFeature(XAttrFeature xAttrFeature) { + referred.addXAttrFeature(xAttrFeature); + } + + @Override + final void removeXAttrFeature() { + referred.removeXAttrFeature(); + } @Override public final short getFsPermissionShort() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java index 2b709b2c332..deb3ada16e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.AclFeature; +import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; /** * An {@link INode} representing a symbolic link. @@ -130,4 +131,19 @@ public class INodeSymlink extends INodeWithAdditionalFields { public void addAclFeature(AclFeature f) { throw new UnsupportedOperationException("ACLs are not supported on symlinks"); } + + @Override + final XAttrFeature getXAttrFeature(int snapshotId) { + throw new UnsupportedOperationException("XAttrs are not supported on symlinks"); + } + + @Override + public void removeXAttrFeature() { + throw new UnsupportedOperationException("XAttrs are not supported on symlinks"); + } + + @Override + public void addXAttrFeature(XAttrFeature f) { + throw new UnsupportedOperationException("XAttrs are not supported on symlinks"); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java index 77f9bde78d2..94ce12f65ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.INode.Feature; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; +import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; import org.apache.hadoop.util.LightWeightGSet.LinkedElement; import com.google.common.base.Preconditions; @@ -340,6 +341,30 @@ public abstract class INodeWithAdditionalFields extends INode addFeature(f); } + + @Override + XAttrFeature getXAttrFeature(int snapshotId) { + if (snapshotId != Snapshot.CURRENT_STATE_ID) { + return getSnapshotINode(snapshotId).getXAttrFeature(); + } + + return getFeature(XAttrFeature.class); + } + + @Override + public void removeXAttrFeature() { + XAttrFeature f = getXAttrFeature(); + Preconditions.checkNotNull(f); + removeFeature(f); + } + + @Override + public void addXAttrFeature(XAttrFeature f) { + XAttrFeature f1 = getXAttrFeature(); + Preconditions.checkState(f1 == null, "Duplicated XAttrFeature"); + + addFeature(f); + } public final Feature[] getFeatures() { return features; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNConf.java new file mode 100644 index 00000000000..94dc6b9bf30 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNConf.java @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; + +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.protocol.AclException; + +import com.google.common.base.Preconditions; + +/** + * This class is a common place for NN configuration. + */ +@InterfaceAudience.Private +final class NNConf { + /** + * Support for ACLs is controlled by a configuration flag. If the + * configuration flag is false, then the NameNode will reject all + * ACL-related operations. + */ + private final boolean aclsEnabled; + + /** + * Support for XAttrs is controlled by a configuration flag. If the + * configuration flag is false, then the NameNode will reject all + * XAttr-related operations. + */ + private final boolean xattrsEnabled; + + /** + * Maximum size of a single name-value extended attribute. + */ + final int xattrMaxSize; + + /** + * Creates a new NNConf from configuration. + * + * @param conf Configuration to check + */ + public NNConf(Configuration conf) { + aclsEnabled = conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, + DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_DEFAULT); + LogFactory.getLog(NNConf.class).info("ACLs enabled? " + aclsEnabled); + xattrsEnabled = conf.getBoolean( + DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, + DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_DEFAULT); + LogFactory.getLog(NNConf.class).info("XAttrs enabled? " + xattrsEnabled); + xattrMaxSize = conf.getInt( + DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, + DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT); + Preconditions.checkArgument(xattrMaxSize >= 0, + "Cannot set a negative value for the maximum size of an xattr (%s).", + DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY); + final String unlimited = xattrMaxSize == 0 ? " (unlimited)" : ""; + LogFactory.getLog(NNConf.class).info( + "Maximum size of an xattr: " + xattrMaxSize + unlimited); + } + + /** + * Checks the flag on behalf of an ACL API call. + * + * @throws AclException if ACLs are disabled + */ + public void checkAclsConfigFlag() throws AclException { + if (!aclsEnabled) { + throw new AclException(String.format( + "The ACL operation has been rejected. " + + "Support for ACLs has been disabled by setting %s to false.", + DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY)); + } + } + + /** + * Checks the flag on behalf of an XAttr API call. + * @throws IOException if XAttrs are disabled + */ + public void checkXAttrsConfigFlag() throws IOException { + if (!xattrsEnabled) { + throw new IOException(String.format( + "The XAttr operation has been rejected. " + + "Support for XAttrs has been disabled by setting %s to false.", + DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY)); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java index 4700837d400..6ae2806d8f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java @@ -64,7 +64,8 @@ public class NameNodeLayoutVersion { */ public static enum Feature implements LayoutFeature { ROLLING_UPGRADE(-55, -53, "Support rolling upgrade", false), - EDITLOG_LENGTH(-56, "Add length field to every edit log op"); + EDITLOG_LENGTH(-56, "Add length field to every edit log op"), + XATTRS(-57, "Extended attributes"); private final FeatureInfo info; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 3df579104d5..527c2e35da0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -49,6 +49,8 @@ import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; @@ -1377,5 +1379,22 @@ class NameNodeRpcServer implements NamenodeProtocols { public AclStatus getAclStatus(String src) throws IOException { return namesystem.getAclStatus(src); } + + @Override + public void setXAttr(String src, XAttr xAttr, EnumSet flag) + throws IOException { + namesystem.setXAttr(src, xAttr, flag); + } + + @Override + public List getXAttrs(String src, List xAttrs) + throws IOException { + return namesystem.getXAttrs(src, xAttrs); + } + + @Override + public void removeXAttr(String src, XAttr xAttr) throws IOException { + namesystem.removeXAttr(src, xAttr); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java new file mode 100644 index 00000000000..2e12578525d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.hdfs.server.namenode.INode; + +import com.google.common.collect.ImmutableList; + +/** + * Feature for extended attributes. + */ +@InterfaceAudience.Private +public class XAttrFeature implements INode.Feature { + public static final ImmutableList EMPTY_ENTRY_LIST = + ImmutableList.of(); + + private final ImmutableList xAttrs; + + public XAttrFeature(ImmutableList xAttrs) { + this.xAttrs = xAttrs; + } + + public ImmutableList getXAttrs() { + return xAttrs; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java new file mode 100644 index 00000000000..7fed362b9b2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.hdfs.XAttrHelper; +import org.apache.hadoop.security.AccessControlException; + +import com.google.common.collect.Lists; + +/** + * There are four types of extended attributes defined by the + * following namespaces: + *
+ * USER - extended user attributes: these can be assigned to files and + * directories to store arbitrary additional information. The access + * permissions for user attributes are defined by the file permission + * bits. + *
+ * TRUSTED - trusted extended attributes: these are visible/accessible + * only to/by the super user. + *
+ * SECURITY - extended security attributes: these are used by the HDFS + * core for security purposes and are not available through admin/user + * API. + *
+ * SYSTEM - extended system attributes: these are used by the HDFS + * core and are not available through admin/user API. + */ +@InterfaceAudience.Private +public class XAttrPermissionFilter { + + static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr) + throws AccessControlException { + if (xAttr.getNameSpace() == XAttr.NameSpace.USER || + (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && + pc.isSuperUser())) { + return; + } + throw new AccessControlException("User doesn't have permission for xattr: " + + XAttrHelper.getPrefixName(xAttr)); + } + + static List filterXAttrsForApi(FSPermissionChecker pc, + List xAttrs) { + assert xAttrs != null : "xAttrs can not be null"; + if (xAttrs == null || xAttrs.isEmpty()) { + return xAttrs; + } + + List filteredXAttrs = Lists.newArrayListWithCapacity(xAttrs.size()); + for (XAttr xAttr : xAttrs) { + if (xAttr.getNameSpace() == XAttr.NameSpace.USER) { + filteredXAttrs.add(xAttr); + } else if (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && + pc.isSuperUser()) { + filteredXAttrs.add(xAttr); + } + } + + return filteredXAttrs; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java new file mode 100644 index 00000000000..fdb549648f2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.hdfs.protocol.QuotaExceededException; +import org.apache.hadoop.hdfs.server.namenode.INode; + +import com.google.common.collect.ImmutableList; + +/** + * XAttrStorage is used to read and set xattrs for an inode. + */ +@InterfaceAudience.Private +public class XAttrStorage { + + /** + * Reads the existing extended attributes of an inode. If the + * inode does not have an XAttr, then this method + * returns an empty list. + * @param inode INode to read + * @param snapshotId + * @return List XAttr list. + */ + public static List readINodeXAttrs(INode inode, int snapshotId) { + XAttrFeature f = inode.getXAttrFeature(snapshotId); + return f == null ? ImmutableList. of() : f.getXAttrs(); + } + + /** + * Reads the existing extended attributes of an inode. + * @param inode INode to read. + * @return List XAttr list. + */ + public static List readINodeXAttrs(INode inode) { + XAttrFeature f = inode.getXAttrFeature(); + return f == null ? ImmutableList. of() : f.getXAttrs(); + } + + /** + * Update xattrs of inode. + * @param inode INode to update + * @param xAttrs to update xAttrs. + * @param snapshotId id of the latest snapshot of the inode + */ + public static void updateINodeXAttrs(INode inode, + List xAttrs, int snapshotId) throws QuotaExceededException { + if (xAttrs == null || xAttrs.isEmpty()) { + if (inode.getXAttrFeature() != null) { + inode.removeXAttrFeature(snapshotId); + } + return; + } + + ImmutableList newXAttrs = ImmutableList.copyOf(xAttrs); + if (inode.getXAttrFeature() != null) { + inode.removeXAttrFeature(snapshotId); + } + inode.addXAttrFeature(new XAttrFeature(newXAttrs), snapshotId); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java index 4f0e507d9c1..b36b4ab965b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java @@ -65,6 +65,7 @@ import org.apache.hadoop.hdfs.server.namenode.SaveNamespaceContext; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root; +import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; import org.apache.hadoop.hdfs.util.Diff.ListType; import com.google.common.base.Preconditions; @@ -215,11 +216,16 @@ public class FSImageFormatPBSnapshot { acl = new AclFeature(FSImageFormatPBINode.Loader.loadAclEntries( fileInPb.getAcl(), state.getStringTable())); } + XAttrFeature xAttrs = null; + if (fileInPb.hasXAttrs()) { + xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.loadXAttrs( + fileInPb.getXAttrs(), state.getStringTable())); + } copy = new INodeFileAttributes.SnapshotCopy(pbf.getName() .toByteArray(), permission, acl, fileInPb.getModificationTime(), fileInPb.getAccessTime(), (short) fileInPb.getReplication(), - fileInPb.getPreferredBlockSize()); + fileInPb.getPreferredBlockSize(), xAttrs); } FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null, @@ -310,16 +316,21 @@ public class FSImageFormatPBSnapshot { acl = new AclFeature(FSImageFormatPBINode.Loader.loadAclEntries( dirCopyInPb.getAcl(), state.getStringTable())); } + XAttrFeature xAttrs = null; + if (dirCopyInPb.hasXAttrs()) { + xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.loadXAttrs( + dirCopyInPb.getXAttrs(), state.getStringTable())); + } long modTime = dirCopyInPb.getModificationTime(); boolean noQuota = dirCopyInPb.getNsQuota() == -1 && dirCopyInPb.getDsQuota() == -1; copy = noQuota ? new INodeDirectoryAttributes.SnapshotCopy(name, - permission, acl, modTime) + permission, acl, modTime, xAttrs) : new INodeDirectoryAttributes.CopyWithQuota(name, permission, acl, modTime, dirCopyInPb.getNsQuota(), - dirCopyInPb.getDsQuota()); + dirCopyInPb.getDsQuota(), xAttrs); } // load created list List clist = loadCreatedList(in, dir, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java index 59f73f3caef..c1450e02134 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java @@ -34,8 +34,10 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; +import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; import org.apache.hadoop.hdfs.util.ReadOnlyList; +import com.google.common.base.Predicate; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; @@ -144,9 +146,20 @@ public class Snapshot implements Comparable { /** The root directory of the snapshot. */ static public class Root extends INodeDirectory { Root(INodeDirectory other) { - // Always preserve ACL. + // Always preserve ACL, XAttr. super(other, false, Lists.newArrayList( - Iterables.filter(Arrays.asList(other.getFeatures()), AclFeature.class)) + Iterables.filter(Arrays.asList(other.getFeatures()), new Predicate() { + + @Override + public boolean apply(Feature input) { + if (AclFeature.class.isInstance(input) + || XAttrFeature.class.isInstance(input)) { + return true; + } + return false; + } + + })) .toArray(new Feature[0])); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index eb34a165c2a..0ffb813611e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -28,6 +28,7 @@ import java.net.URISyntaxException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.EnumSet; +import java.util.List; import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; @@ -53,8 +54,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.hdfs.StorageType; +import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -103,6 +106,10 @@ import org.apache.hadoop.hdfs.web.resources.SnapshotNameParam; import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; import org.apache.hadoop.hdfs.web.resources.UriFsPathParam; import org.apache.hadoop.hdfs.web.resources.UserParam; +import org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam; +import org.apache.hadoop.hdfs.web.resources.XAttrNameParam; +import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam; +import org.apache.hadoop.hdfs.web.resources.XAttrValueParam; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException; @@ -344,6 +351,12 @@ public class NamenodeWebHdfsMethods { final TokenArgumentParam delegationTokenArgument, @QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT) final AclPermissionParam aclPermission, + @QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT) + final XAttrNameParam xattrName, + @QueryParam(XAttrValueParam.NAME) @DefaultValue(XAttrValueParam.DEFAULT) + final XAttrValueParam xattrValue, + @QueryParam(XAttrSetFlagParam.NAME) @DefaultValue(XAttrSetFlagParam.DEFAULT) + final XAttrSetFlagParam xattrSetFlag, @QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT) final SnapshotNameParam snapshotName, @QueryParam(OldSnapshotNameParam.NAME) @DefaultValue(OldSnapshotNameParam.DEFAULT) @@ -352,7 +365,8 @@ public class NamenodeWebHdfsMethods { return put(ugi, delegation, username, doAsUser, ROOT, op, destination, owner, group, permission, overwrite, bufferSize, replication, blockSize, modificationTime, accessTime, renameOptions, createParent, - delegationTokenArgument, aclPermission, snapshotName, oldSnapshotName); + delegationTokenArgument, aclPermission, xattrName, xattrValue, + xattrSetFlag, snapshotName, oldSnapshotName); } /** Handle HTTP PUT request. */ @@ -399,6 +413,12 @@ public class NamenodeWebHdfsMethods { final TokenArgumentParam delegationTokenArgument, @QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT) final AclPermissionParam aclPermission, + @QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT) + final XAttrNameParam xattrName, + @QueryParam(XAttrValueParam.NAME) @DefaultValue(XAttrValueParam.DEFAULT) + final XAttrValueParam xattrValue, + @QueryParam(XAttrSetFlagParam.NAME) @DefaultValue(XAttrSetFlagParam.DEFAULT) + final XAttrSetFlagParam xattrSetFlag, @QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT) final SnapshotNameParam snapshotName, @QueryParam(OldSnapshotNameParam.NAME) @DefaultValue(OldSnapshotNameParam.DEFAULT) @@ -408,7 +428,8 @@ public class NamenodeWebHdfsMethods { init(ugi, delegation, username, doAsUser, path, op, destination, owner, group, permission, overwrite, bufferSize, replication, blockSize, modificationTime, accessTime, renameOptions, delegationTokenArgument, - aclPermission, snapshotName, oldSnapshotName); + aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName, + oldSnapshotName); return ugi.doAs(new PrivilegedExceptionAction() { @Override @@ -418,8 +439,8 @@ public class NamenodeWebHdfsMethods { path.getAbsolutePath(), op, destination, owner, group, permission, overwrite, bufferSize, replication, blockSize, modificationTime, accessTime, renameOptions, createParent, - delegationTokenArgument, aclPermission, snapshotName, - oldSnapshotName); + delegationTokenArgument, aclPermission, xattrName, xattrValue, + xattrSetFlag, snapshotName, oldSnapshotName); } finally { reset(); } @@ -448,6 +469,9 @@ public class NamenodeWebHdfsMethods { final CreateParentParam createParent, final TokenArgumentParam delegationTokenArgument, final AclPermissionParam aclPermission, + final XAttrNameParam xattrName, + final XAttrValueParam xattrValue, + final XAttrSetFlagParam xattrSetFlag, final SnapshotNameParam snapshotName, final OldSnapshotNameParam oldSnapshotName ) throws IOException, URISyntaxException { @@ -549,6 +573,17 @@ public class NamenodeWebHdfsMethods { np.setAcl(fullpath, aclPermission.getAclPermission(true)); return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); } + case SETXATTR: { + np.setXAttr( + fullpath, + XAttrHelper.buildXAttr(xattrName.getXAttrName(), + xattrValue.getXAttrValue()), xattrSetFlag.getFlag()); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); + } + case REMOVEXATTR: { + np.removeXAttr(fullpath, XAttrHelper.buildXAttr(xattrName.getXAttrName())); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); + } case CREATESNAPSHOT: { String snapshotPath = np.createSnapshot(fullpath, snapshotName.getValue()); final String js = JsonUtil.toJsonString( @@ -675,10 +710,14 @@ public class NamenodeWebHdfsMethods { @QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT) final RenewerParam renewer, @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT) - final BufferSizeParam bufferSize + final BufferSizeParam bufferSize, + @QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT) + final XAttrNameParam xattrName, + @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) + final XAttrEncodingParam xattrEncoding ) throws IOException, InterruptedException { - return get(ugi, delegation, username, doAsUser, ROOT, op, - offset, length, renewer, bufferSize); + return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length, + renewer, bufferSize, xattrName, xattrEncoding); } /** Handle HTTP GET request. */ @@ -703,18 +742,23 @@ public class NamenodeWebHdfsMethods { @QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT) final RenewerParam renewer, @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT) - final BufferSizeParam bufferSize + final BufferSizeParam bufferSize, + @QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT) + final XAttrNameParam xattrName, + @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) + final XAttrEncodingParam xattrEncoding ) throws IOException, InterruptedException { - init(ugi, delegation, username, doAsUser, path, op, - offset, length, renewer, bufferSize); + init(ugi, delegation, username, doAsUser, path, op, offset, length, + renewer, bufferSize, xattrName, xattrEncoding); return ugi.doAs(new PrivilegedExceptionAction() { @Override public Response run() throws IOException, URISyntaxException { try { return get(ugi, delegation, username, doAsUser, - path.getAbsolutePath(), op, offset, length, renewer, bufferSize); + path.getAbsolutePath(), op, offset, length, renewer, bufferSize, + xattrName, xattrEncoding); } finally { reset(); } @@ -732,7 +776,9 @@ public class NamenodeWebHdfsMethods { final OffsetParam offset, final LengthParam length, final RenewerParam renewer, - final BufferSizeParam bufferSize + final BufferSizeParam bufferSize, + final XAttrNameParam xattrName, + final XAttrEncodingParam xattrEncoding ) throws IOException, URISyntaxException { final NameNode namenode = (NameNode)context.getAttribute("name.node"); final NamenodeProtocols np = getRPCServer(namenode); @@ -807,6 +853,19 @@ public class NamenodeWebHdfsMethods { final String js = JsonUtil.toJsonString(status); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } + case GETXATTR: { + XAttr xAttr = XAttrHelper.getFirstXAttr(np.getXAttrs(fullpath, + XAttrHelper.buildXAttrAsList(xattrName.getXAttrName()))); + final String js = JsonUtil.toJsonString(xAttr, + xattrEncoding.getEncoding()); + return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + } + case GETXATTRS: { + List xAttrs = np.getXAttrs(fullpath, null); + final String js = JsonUtil.toJsonString(xAttrs, + xattrEncoding.getEncoding()); + return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + } default: throw new UnsupportedOperationException(op + " is not supported"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index eda8006ebea..db541cb61a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; @@ -34,6 +35,8 @@ import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.StringUtils; import org.mortbay.util.ajax.JSON; +import com.google.common.collect.Maps; + import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.IOException; @@ -664,4 +667,125 @@ public class JsonUtil { aclStatusBuilder.addEntries(aclEntryList); return aclStatusBuilder.build(); } + + public static String toJsonString(final XAttr xAttr, + final XAttrCodec encoding) throws IOException { + if (xAttr == null) { + return "{}"; + } + final Map m = new TreeMap(); + m.put("name", XAttrHelper.getPrefixName(xAttr)); + m.put("value", xAttr.getValue() != null ? + XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null); + final Map> finalMap = + new TreeMap>(); + finalMap.put(XAttr.class.getSimpleName(), m); + return JSON.toString(finalMap); + } + + private static Map toJsonMap(final XAttr xAttr, + final XAttrCodec encoding) throws IOException { + if (xAttr == null) { + return null; + } + + final Map m = new TreeMap(); + m.put("name", XAttrHelper.getPrefixName(xAttr)); + m.put("value", xAttr.getValue() != null ? + XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null); + return m; + } + + private static Object[] toJsonArray(final List array, + final XAttrCodec encoding) throws IOException { + if (array == null) { + return null; + } else if (array.size() == 0) { + return EMPTY_OBJECT_ARRAY; + } else { + final Object[] a = new Object[array.size()]; + for(int i = 0; i < array.size(); i++) { + a[i] = toJsonMap(array.get(i), encoding); + } + return a; + } + } + + public static String toJsonString(final List xAttrs, + final XAttrCodec encoding) throws IOException { + final Map finalMap = new TreeMap(); + finalMap.put("XAttrs", toJsonArray(xAttrs, encoding)); + return JSON.toString(finalMap); + } + + public static XAttr toXAttr(final Map json) throws IOException { + if (json == null) { + return null; + } + + Map m = (Map) json.get(XAttr.class.getSimpleName()); + if (m == null) { + return null; + } + String name = (String) m.get("name"); + String value = (String) m.get("value"); + return XAttrHelper.buildXAttr(name, decodeXAttrValue(value)); + } + + public static Map toXAttrs(final Map json) + throws IOException { + if (json == null) { + return null; + } + + return toXAttrMap((Object[])json.get("XAttrs")); + } + + public static Map toXAttrs(final Map json, + List names) throws IOException { + if (json == null || names == null) { + return null; + } + if (names.isEmpty()) { + return Maps.newHashMap(); + } + Map xAttrs = toXAttrs(json); + if (xAttrs == null || xAttrs.isEmpty()) { + return xAttrs; + } + + Map result = Maps.newHashMap(); + for (String name : names) { + if (xAttrs.containsKey(name)) { + result.put(name, xAttrs.get(name)); + } + } + return result; + } + + private static Map toXAttrMap(final Object[] objects) + throws IOException { + if (objects == null) { + return null; + } else if (objects.length == 0) { + return Maps.newHashMap(); + } else { + final Map xAttrs = Maps.newHashMap(); + for(int i = 0; i < objects.length; i++) { + Map m = (Map) objects[i]; + String name = (String) m.get("name"); + String value = (String) m.get("value"); + xAttrs.put(name, decodeXAttrValue(value)); + } + return xAttrs; + } + } + + private static byte[] decodeXAttrValue(String value) throws IOException { + if (value != null) { + return XAttrCodec.decodeValue(value); + } else { + return new byte[0]; + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 2f230d1dbac..fe36dafe12d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -30,6 +30,7 @@ import java.net.URI; import java.net.URL; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; +import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.StringTokenizer; @@ -49,6 +50,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrCodec; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; @@ -813,6 +817,66 @@ public class WebHdfsFileSystem extends FileSystem new RenameOptionSetParam(options) ).run(); } + + @Override + public void setXAttr(Path p, String name, byte[] value, + EnumSet flag) throws IOException { + statistics.incrementWriteOps(1); + final HttpOpParam.Op op = PutOpParam.Op.SETXATTR; + if (value != null) { + new FsPathRunner(op, p, new XAttrNameParam(name), new XAttrValueParam( + XAttrCodec.encodeValue(value, XAttrCodec.HEX)), + new XAttrSetFlagParam(flag)).run(); + } else { + new FsPathRunner(op, p, new XAttrNameParam(name), + new XAttrSetFlagParam(flag)).run(); + } + } + + @Override + public byte[] getXAttr(Path p, String name) throws IOException { + final HttpOpParam.Op op = GetOpParam.Op.GETXATTR; + return new FsPathResponseRunner(op, p, new XAttrNameParam(name), + new XAttrEncodingParam(XAttrCodec.HEX)) { + @Override + byte[] decodeResponse(Map json) throws IOException { + XAttr xAttr = JsonUtil.toXAttr(json); + return xAttr != null ? xAttr.getValue() : null; + } + }.run(); + } + + @Override + public Map getXAttrs(Path p) throws IOException { + final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS; + return new FsPathResponseRunner>(op, p, + new XAttrEncodingParam(XAttrCodec.HEX)) { + @Override + Map decodeResponse(Map json) throws IOException { + return JsonUtil.toXAttrs(json); + } + }.run(); + } + + @Override + public Map getXAttrs(Path p, final List names) + throws IOException { + final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS; + return new FsPathResponseRunner>(op, p, + new XAttrEncodingParam(XAttrCodec.HEX)) { + @Override + Map decodeResponse(Map json) throws IOException { + return JsonUtil.toXAttrs(json, names); + } + }.run(); + } + + @Override + public void removeXAttr(Path p, String name) throws IOException { + statistics.incrementWriteOps(1); + final HttpOpParam.Op op = PutOpParam.Op.REMOVEXATTR; + new FsPathRunner(op, p, new XAttrNameParam(name)).run(); + } @Override public void setOwner(final Path p, final String owner, final String group diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java index 3c6d47b0f43..b8c740b47cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java @@ -36,6 +36,8 @@ public class GetOpParam extends HttpOpParam { /** GET_BLOCK_LOCATIONS is a private unstable op. */ GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK), GETACLSTATUS(false, HttpURLConnection.HTTP_OK), + GETXATTR(false, HttpURLConnection.HTTP_OK), + GETXATTRS(false, HttpURLConnection.HTTP_OK), NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java index 6d80e44364f..7fd2b717cc1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java @@ -42,6 +42,9 @@ public class PutOpParam extends HttpOpParam { REMOVEDEFAULTACL(false, HttpURLConnection.HTTP_OK), REMOVEACL(false, HttpURLConnection.HTTP_OK), SETACL(false, HttpURLConnection.HTTP_OK), + + SETXATTR(false, HttpURLConnection.HTTP_OK), + REMOVEXATTR(false, HttpURLConnection.HTTP_OK), CREATESNAPSHOT(false, HttpURLConnection.HTTP_OK), RENAMESNAPSHOT(false, HttpURLConnection.HTTP_OK), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java new file mode 100644 index 00000000000..36057c58f61 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web.resources; + +import org.apache.hadoop.fs.XAttrCodec; + +public class XAttrEncodingParam extends EnumParam { + /** Parameter name. */ + public static final String NAME = "encoding"; + /** Default parameter value. */ + public static final String DEFAULT = ""; + + private static final Domain DOMAIN = + new Domain(NAME, XAttrCodec.class); + + public XAttrEncodingParam(final XAttrCodec encoding) { + super(DOMAIN, encoding); + } + + /** + * Constructor. + * @param str a string representation of the parameter value. + */ + public XAttrEncodingParam(final String str) { + super(DOMAIN, str != null && !str.isEmpty() ? DOMAIN.parse(str) : null); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public String getValueString() { + return value.toString(); + } + + public XAttrCodec getEncoding() { + return getValue(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java new file mode 100644 index 00000000000..3860f916e48 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web.resources; + +import java.util.regex.Pattern; + +public class XAttrNameParam extends StringParam { + /** Parameter name. **/ + public static final String NAME = "xattr.name"; + /** Default parameter value. **/ + public static final String DEFAULT = ""; + + private static Domain DOMAIN = new Domain(NAME, + Pattern.compile("^(user\\.|trusted\\.|system\\.|security\\.).+")); + + public XAttrNameParam(final String str) { + super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str); + } + + @Override + public String getName() { + return NAME; + } + + public String getXAttrName() { + final String v = getValue(); + return v; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java new file mode 100644 index 00000000000..7fa298299bb --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web.resources; + +import java.util.EnumSet; + +import org.apache.hadoop.fs.XAttrSetFlag; + +public class XAttrSetFlagParam extends EnumSetParam { + /** Parameter name. */ + public static final String NAME = "flag"; + /** Default parameter value. */ + public static final String DEFAULT = ""; + + private static final Domain DOMAIN = new Domain( + NAME, XAttrSetFlag.class); + + public XAttrSetFlagParam(final EnumSet flag) { + super(DOMAIN, flag); + } + + /** + * Constructor. + * @param str a string representation of the parameter value. + */ + public XAttrSetFlagParam(final String str) { + super(DOMAIN, DOMAIN.parse(str)); + } + + @Override + public String getName() { + return NAME; + } + + public EnumSet getFlag() { + return getValue(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java new file mode 100644 index 00000000000..60f86ae226c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web.resources; + +import java.io.IOException; + +import org.apache.hadoop.fs.XAttrCodec; + +public class XAttrValueParam extends StringParam { + /** Parameter name. **/ + public static final String NAME = "xattr.value"; + /** Default parameter value. **/ + public static final String DEFAULT = ""; + + private static Domain DOMAIN = new Domain(NAME, null); + + public XAttrValueParam(final String str) { + super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str); + } + + @Override + public String getName() { + return NAME; + } + + public byte[] getXAttrValue() throws IOException { + final String v = getValue(); + return XAttrCodec.decodeValue(v); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 17fecf1c1db..80b96f43173 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -31,6 +31,7 @@ package hadoop.hdfs; import "Security.proto"; import "hdfs.proto"; import "acl.proto"; +import "xattr.proto"; /** * The ClientNamenodeProtocol Service defines the interface between a client @@ -759,4 +760,10 @@ service ClientNamenodeProtocol { returns(SetAclResponseProto); rpc getAclStatus(GetAclStatusRequestProto) returns(GetAclStatusResponseProto); + rpc setXAttr(SetXAttrRequestProto) + returns(SetXAttrResponseProto); + rpc getXAttrs(GetXAttrsRequestProto) + returns(GetXAttrsResponseProto); + rpc removeXAttr(RemoveXAttrRequestProto) + returns(RemoveXAttrResponseProto); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto index b6bf10032d5..1c8edfa0c16 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto @@ -23,6 +23,7 @@ package hadoop.hdfs.fsimage; import "hdfs.proto"; import "acl.proto"; +import "xattr.proto"; /** * This file defines the on-disk layout of the file system image. The @@ -106,7 +107,23 @@ message INodeSection { */ repeated fixed32 entries = 2 [packed = true]; } - + + message XAttrCompactProto { + /** + * + * [0:2) -- the namespace of XAttr (XAttrNamespaceProto) + * [2:26) -- the name of the entry, which is an ID that points to a + * string in the StringTableSection. + * [26:32) -- reserved for future uses. + */ + required fixed32 name = 1; + optional bytes value = 2; + } + + message XAttrFeatureProto { + repeated XAttrCompactProto xAttrs = 1; + } + message INodeFile { optional uint32 replication = 1; optional uint64 modificationTime = 2; @@ -116,6 +133,7 @@ message INodeSection { repeated BlockProto blocks = 6; optional FileUnderConstructionFeature fileUC = 7; optional AclFeatureProto acl = 8; + optional XAttrFeatureProto xAttrs = 9; } message INodeDirectory { @@ -126,6 +144,7 @@ message INodeSection { optional uint64 dsQuota = 3; optional fixed64 permission = 4; optional AclFeatureProto acl = 5; + optional XAttrFeatureProto xAttrs = 6; } message INodeSymlink { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto new file mode 100644 index 00000000000..eef5ae947f8 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option java_package = "org.apache.hadoop.hdfs.protocol.proto"; +option java_outer_classname = "XAttrProtos"; +option java_generate_equals_and_hash = true; +package hadoop.hdfs; + +message XAttrProto { + enum XAttrNamespaceProto { + USER = 0; + TRUSTED = 1; + SECURITY = 2; + SYSTEM = 3; + } + + required XAttrNamespaceProto namespace = 1; + required string name = 2; + optional bytes value = 3; +} + +message XAttrEditLogProto { + required string src = 1; + optional XAttrProto xAttr = 2; +} + +enum XAttrSetFlagProto { + XATTR_CREATE = 0x01; + XATTR_REPLACE = 0x02; +} + +message SetXAttrRequestProto { + required string src = 1; + optional XAttrProto xAttr = 2; + optional uint32 flag = 3; //bits set using XAttrSetFlagProto +} + +message SetXAttrResponseProto { +} + +message GetXAttrsRequestProto { + required string src = 1; + repeated XAttrProto xAttrs = 2; +} + +message GetXAttrsResponseProto { + repeated XAttrProto xAttrs = 1; +} + +message RemoveXAttrRequestProto { + required string src = 1; + optional XAttrProto xAttr = 2; +} + +message RemoveXAttrResponseProto { +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index dc75e209333..e781d91484c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -1972,4 +1972,28 @@ + + dfs.namenode.xattrs.enabled + true + + Whether support for extended attributes is enabled on the NameNode. + + + + + dfs.namenode.fs-limits.max-xattrs-per-inode + 32 + + Maximum number of extended attributes per inode. + + + + + dfs.namenode.fs-limits.max-xattr-size + 16384 + + The maximum combined size of the name and value of an extended attribute in bytes. + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm new file mode 100644 index 00000000000..af6132bdef5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm @@ -0,0 +1,98 @@ +~~ Licensed under the Apache License, Version 2.0 (the "License"); +~~ you may not use this file except in compliance with the License. +~~ You may obtain a copy of the License at +~~ +~~ http://www.apache.org/licenses/LICENSE-2.0 +~~ +~~ Unless required by applicable law or agreed to in writing, software +~~ distributed under the License is distributed on an "AS IS" BASIS, +~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +~~ See the License for the specific language governing permissions and +~~ limitations under the License. See accompanying LICENSE file. + + --- + Hadoop Distributed File System-${project.version} - Extended Attributes + --- + --- + ${maven.build.timestamp} + +Extended Attributes in HDFS + + \[ {{{../../index.html}Go Back}} \] + +%{toc|section=1|fromDepth=2|toDepth=4} + +* {Overview} + + (abbreviated as ) are a filesystem feature that allow user applications to associate additional metadata with a file or directory. Unlike system-level inode metadata such as file permissions or modification time, extended attributes are not interpreted by the system and are instead used by applications to store additional information about an inode. Extended attributes could be used, for instance, to specify the character encoding of a plain-text document. + +** {HDFS extended attributes} + + Extended attributes in HDFS are modeled after extended attributes in Linux (see the Linux manpage for {{{http://www.bestbits.at/acl/man/man5/attr.txt}attr(5)}} and {{{http://www.bestbits.at/acl/}related documentation}}). An extended attribute is a , with a string name and binary value. Xattrs names must also be prefixed with a . For example, an xattr named in the namespace would be specified as <>. Multiple xattrs can be associated with a single inode. + +** {Namespaces and Permissions} + + In HDFS, as in Linux, there are four valid namespaces: <<>>, <<>>, <<>>, and <<>>. Each of these namespaces have different access restrictions. + + The <<>> namespace is the namespace that will commonly be used by client applications. Access to extended attributes in the user namespace is controlled by the corresponding file permissions. + + The <<>> namespace is available only to HDFS superusers. + + The <<>> namespace is reserved for internal HDFS use. This namespace is not accessible through userspace methods, and is reserved for implementing internal HDFS features. + + The <<>> namespace is reserved for internal HDFS use. This namespace is not accessible through userspace methods. It is currently unused. + +* {Interacting with extended attributes} + + The Hadoop shell has support for interacting with extended attributes via <<>> and <<>>. These commands are styled after the Linux {{{http://www.bestbits.at/acl/man/man1/getfattr.txt}getfattr(1)}} and {{{http://www.bestbits.at/acl/man/man1/setfattr.txt}setfattr(1)}} commands. + +** {getfattr} + + <<>>> + + Displays the extended attribute names and values (if any) for a file or directory. + +*--+--+ +-R | Recursively list the attributes for all files and directories. +*--+--+ +-n name | Dump the named extended attribute value. +*--+--+ +-d | Dump all extended attribute values associated with pathname. +*--+--+ +-e \ | Encode values after retrieving them. Valid encodings are "text", "hex", and "base64". Values encoded as text strings are enclosed in double quotes ("), and values encoded as hexadecimal and base64 are prefixed with 0x and 0s, respectively. +*--+--+ +\ | The file or directory. +*--+--+ + +** {setfattr} + + <<>>> + + Sets an extended attribute name and value for a file or directory. + +*--+--+ +-n name | The extended attribute name. +*--+--+ +-v value | The extended attribute value. There are three different encoding methods for the value. If the argument is enclosed in double quotes, then the value is the string inside the quotes. If the argument is prefixed with 0x or 0X, then it is taken as a hexadecimal number. If the argument begins with 0s or 0S, then it is taken as a base64 encoding. +*--+--+ +-x name | Remove the extended attribute. +*--+--+ +\ | The file or directory. +*--+--+ + +* {Configuration options} + + + HDFS supports extended attributes out of the box, without additional configuration. Administrators could potentially be interested in the options limiting the number of xattrs per inode and the size of xattrs, since xattrs increase the on-disk and in-memory space consumption of an inode. + + * <<>> + + Whether support for extended attributes is enabled on the NameNode. By default, extended attributes are enabled. + + * <<>> + + The maximum number of extended attributes per inode. By default, this limit is 32. + + * <<>> + + The maximum combined size of the name and value of an extended attribute in bytes. By default, this limit is 16384 bytes. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java new file mode 100644 index 00000000000..ce107ef56c1 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java @@ -0,0 +1,99 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.cli; + +import static org.junit.Assert.assertTrue; + +import org.apache.hadoop.cli.util.CLICommand; +import org.apache.hadoop.cli.util.CommandExecutor.Result; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HDFSPolicyProvider; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.security.authorize.PolicyProvider; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestXAttrCLI extends CLITestHelperDFS { + protected MiniDFSCluster dfsCluster = null; + protected FileSystem fs = null; + protected String namenode = null; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); + conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, + HDFSPolicyProvider.class, PolicyProvider.class); + conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); + + dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + dfsCluster.waitClusterUp(); + namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///"); + + username = System.getProperty("user.name"); + + fs = dfsCluster.getFileSystem(); + assertTrue("Not a HDFS: "+fs.getUri(), + fs instanceof DistributedFileSystem); + } + + @Override + protected String getTestFile() { + return "testXAttrConf.xml"; + } + + @After + @Override + public void tearDown() throws Exception { + if (fs != null) { + fs.close(); + } + if (dfsCluster != null) { + dfsCluster.shutdown(); + } + Thread.sleep(2000); + super.tearDown(); + } + + @Override + protected String expandCommand(final String cmd) { + String expCmd = cmd; + expCmd = expCmd.replaceAll("NAMENODE", namenode); + expCmd = expCmd.replaceAll("#LF#", + System.getProperty("line.separator")); + expCmd = super.expandCommand(expCmd); + return expCmd; + } + + @Override + protected Result execute(CLICommand cmd) throws Exception { + return cmd.getExecutor(namenode).executeCommand(cmd.getCmd()); + } + + @Test + @Override + public void testAll () { + super.testAll(); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java new file mode 100644 index 00000000000..032a8dfead0 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotSame; + +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Tests for XAttr objects. + */ +public class TestXAttr { + private static XAttr XATTR, XATTR1, XATTR2, XATTR3, XATTR4; + + @BeforeClass + public static void setUp() throws Exception { + byte[] value = {0x31, 0x32, 0x33}; + XATTR = new XAttr.Builder() + .setName("name") + .setValue(value) + .build(); + XATTR1 = new XAttr.Builder() + .setNameSpace(XAttr.NameSpace.USER) + .setName("name") + .setValue(value) + .build(); + XATTR2 = new XAttr.Builder() + .setNameSpace(XAttr.NameSpace.TRUSTED) + .setName("name") + .setValue(value) + .build(); + XATTR3 = new XAttr.Builder() + .setNameSpace(XAttr.NameSpace.SYSTEM) + .setName("name") + .setValue(value) + .build(); + XATTR4 = new XAttr.Builder() + .setNameSpace(XAttr.NameSpace.SECURITY) + .setName("name") + .setValue(value) + .build(); + } + + @Test + public void testXAttrEquals() { + assertNotSame(XATTR1, XATTR2); + assertNotSame(XATTR2, XATTR3); + assertNotSame(XATTR3, XATTR4); + assertEquals(XATTR, XATTR1); + assertEquals(XATTR1, XATTR1); + assertEquals(XATTR2, XATTR2); + assertEquals(XATTR3, XATTR3); + assertEquals(XATTR4, XATTR4); + assertFalse(XATTR1.equals(XATTR2)); + assertFalse(XATTR2.equals(XATTR3)); + assertFalse(XATTR3.equals(XATTR4)); + } + + @Test + public void testXAttrHashCode() { + assertEquals(XATTR.hashCode(), XATTR1.hashCode()); + assertFalse(XATTR1.hashCode() == XATTR2.hashCode()); + assertFalse(XATTR2.hashCode() == XATTR3.hashCode()); + assertFalse(XATTR3.hashCode() == XATTR4.hashCode()); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 8ccc8fc82e9..ab4ba77ca1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -1201,6 +1201,13 @@ public class DFSTestUtil { .setType(AclEntryType.OTHER) .build()); filesystem.setAcl(pathConcatTarget, aclEntryList); + // OP_SET_XATTR + filesystem.setXAttr(pathConcatTarget, "user.a1", + new byte[]{0x31, 0x32, 0x33}); + filesystem.setXAttr(pathConcatTarget, "user.a2", + new byte[]{0x37, 0x38, 0x39}); + // OP_REMOVE_XATTR + filesystem.removeXAttr(pathConcatTarget, "user.a2"); } public static void abortStream(DFSOutputStream out) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index 52ae7c92330..c31b3a19364 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -155,7 +155,7 @@ public class TestDFSShell { } @Test (timeout = 30000) - public void testRecrusiveRm() throws IOException { + public void testRecursiveRm() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs = cluster.getFileSystem(); @@ -1567,6 +1567,7 @@ public class TestDFSShell { cluster.shutdown(); } } + private static String runLsr(final FsShell shell, String root, int returnvalue ) throws Exception { System.out.println("root=" + root + ", returnvalue=" + returnvalue); @@ -1858,6 +1859,333 @@ public class TestDFSShell { cluster.shutdown(); } } + + @Test (timeout = 30000) + public void testSetXAttrPermission() throws Exception { + UserGroupInformation user = UserGroupInformation. + createUserForTesting("user", new String[] {"mygroup"}); + MiniDFSCluster cluster = null; + PrintStream bak = null; + try { + final Configuration conf = new HdfsConfiguration(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + + FileSystem fs = cluster.getFileSystem(); + Path p = new Path("/foo"); + fs.mkdirs(p); + bak = System.err; + + final FsShell fshell = new FsShell(conf); + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + System.setErr(new PrintStream(out)); + + // No permission to write xattr + fs.setPermission(p, new FsPermission((short) 0700)); + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + int ret = ToolRunner.run(fshell, new String[]{ + "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"}); + assertEquals("Returned should be 1", 1, ret); + String str = out.toString(); + assertTrue("Permission denied printed", + str.indexOf("Permission denied") != -1); + out.reset(); + return null; + } + }); + + int ret = ToolRunner.run(fshell, new String[]{ + "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"}); + assertEquals("Returned should be 0", 0, ret); + out.reset(); + + // No permission to read and remove + fs.setPermission(p, new FsPermission((short) 0750)); + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + // Read + int ret = ToolRunner.run(fshell, new String[]{ + "-getfattr", "-n", "user.a1", "/foo"}); + assertEquals("Returned should be 1", 1, ret); + String str = out.toString(); + assertTrue("Permission denied printed", + str.indexOf("Permission denied") != -1); + out.reset(); + // Remove + ret = ToolRunner.run(fshell, new String[]{ + "-setfattr", "-x", "user.a1", "/foo"}); + assertEquals("Returned should be 1", 1, ret); + str = out.toString(); + assertTrue("Permission denied printed", + str.indexOf("Permission denied") != -1); + out.reset(); + return null; + } + }); + } finally { + if (bak != null) { + System.setErr(bak); + } + if (cluster != null) { + cluster.shutdown(); + } + } + } + + /* HDFS-6413 xattr names erroneously handled as case-insensitive */ + @Test (timeout = 30000) + public void testSetXAttrCaseSensitivity() throws Exception { + UserGroupInformation user = UserGroupInformation. + createUserForTesting("user", new String[] {"mygroup"}); + MiniDFSCluster cluster = null; + PrintStream bak = null; + try { + final Configuration conf = new HdfsConfiguration(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + + FileSystem fs = cluster.getFileSystem(); + Path p = new Path("/mydir"); + fs.mkdirs(p); + bak = System.err; + + final FsShell fshell = new FsShell(conf); + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + System.setOut(new PrintStream(out)); + + doSetXattr(out, fshell, + new String[] {"-setfattr", "-n", "User.Foo", "/mydir"}, + new String[] {"-getfattr", "-d", "/mydir"}, + new String[] {"user.Foo"}, + new String[] {}); + + doSetXattr(out, fshell, + new String[] {"-setfattr", "-n", "user.FOO", "/mydir"}, + new String[] {"-getfattr", "-d", "/mydir"}, + new String[] {"user.Foo", "user.FOO"}, + new String[] {}); + + doSetXattr(out, fshell, + new String[] {"-setfattr", "-n", "USER.foo", "/mydir"}, + new String[] {"-getfattr", "-d", "/mydir"}, + new String[] {"user.Foo", "user.FOO", "user.foo"}, + new String[] {}); + + doSetXattr(out, fshell, + new String[] {"-setfattr", "-n", "USER.fOo", "-v", "myval", "/mydir"}, + new String[] {"-getfattr", "-d", "/mydir"}, + new String[] {"user.Foo", "user.FOO", "user.foo", "user.fOo=\"myval\""}, + new String[] {"user.Foo=", "user.FOO=", "user.foo="}); + + doSetXattr(out, fshell, + new String[] {"-setfattr", "-x", "useR.foo", "/mydir"}, + new String[] {"-getfattr", "-d", "/mydir"}, + new String[] {"user.Foo", "user.FOO"}, + new String[] {"foo"}); + + doSetXattr(out, fshell, + new String[] {"-setfattr", "-x", "USER.FOO", "/mydir"}, + new String[] {"-getfattr", "-d", "/mydir"}, + new String[] {"user.Foo"}, + new String[] {"FOO"}); + + doSetXattr(out, fshell, + new String[] {"-setfattr", "-x", "useR.Foo", "/mydir"}, + new String[] {"-getfattr", "-n", "User.Foo", "/mydir"}, + new String[] {}, + new String[] {"Foo"}); + + } finally { + if (bak != null) { + System.setOut(bak); + } + if (cluster != null) { + cluster.shutdown(); + } + } + } + + private void doSetXattr(ByteArrayOutputStream out, FsShell fshell, + String[] setOp, String[] getOp, String[] expectArr, + String[] dontExpectArr) throws Exception { + int ret = ToolRunner.run(fshell, setOp); + out.reset(); + ret = ToolRunner.run(fshell, getOp); + final String str = out.toString(); + for (int i = 0; i < expectArr.length; i++) { + final String expect = expectArr[i]; + final StringBuilder sb = new StringBuilder + ("Incorrect results from getfattr. Expected: "); + sb.append(expect).append(" Full Result: "); + sb.append(str); + assertTrue(sb.toString(), + str.indexOf(expect) != -1); + } + + for (int i = 0; i < dontExpectArr.length; i++) { + String dontExpect = dontExpectArr[i]; + final StringBuilder sb = new StringBuilder + ("Incorrect results from getfattr. Didn't Expect: "); + sb.append(dontExpect).append(" Full Result: "); + sb.append(str); + assertTrue(sb.toString(), + str.indexOf(dontExpect) == -1); + } + out.reset(); + } + + /** + * HDFS-6374 setXAttr should require the user to be the owner of the file + * or directory. + * + * Test to make sure that only the owner of a file or directory can set + * or remove the xattrs. + * + * As user1: + * Create a directory (/foo) as user1, chown it to user1 (and user1's group), + * grant rwx to "other". + * + * As user2: + * Set an xattr (should fail). + * + * As user1: + * Set an xattr (should pass). + * + * As user2: + * Read the xattr (should pass). + * Remove the xattr (should fail). + * + * As user1: + * Read the xattr (should pass). + * Remove the xattr (should pass). + */ + @Test (timeout = 30000) + public void testSetXAttrPermissionAsDifferentOwner() throws Exception { + final String USER1 = "user1"; + final String GROUP1 = "mygroup1"; + final UserGroupInformation user1 = UserGroupInformation. + createUserForTesting(USER1, new String[] {GROUP1}); + final UserGroupInformation user2 = UserGroupInformation. + createUserForTesting("user2", new String[] {"mygroup2"}); + MiniDFSCluster cluster = null; + PrintStream bak = null; + try { + final Configuration conf = new HdfsConfiguration(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + + final FileSystem fs = cluster.getFileSystem(); + fs.setOwner(new Path("/"), USER1, GROUP1); + bak = System.err; + + final FsShell fshell = new FsShell(conf); + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + System.setErr(new PrintStream(out)); + + // mkdir foo as user1 + user1.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final int ret = ToolRunner.run(fshell, new String[]{ + "-mkdir", "/foo"}); + assertEquals("Return should be 0", 0, ret); + out.reset(); + return null; + } + }); + + user1.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + // Give access to "other" + final int ret = ToolRunner.run(fshell, new String[]{ + "-chmod", "707", "/foo"}); + assertEquals("Return should be 0", 0, ret); + out.reset(); + return null; + } + }); + + // No permission to write xattr for non-owning user (user2). + user2.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final int ret = ToolRunner.run(fshell, new String[]{ + "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"}); + assertEquals("Returned should be 1", 1, ret); + final String str = out.toString(); + assertTrue("Permission denied printed", + str.indexOf("Permission denied") != -1); + out.reset(); + return null; + } + }); + + // But there should be permission to write xattr for + // the owning user. + user1.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final int ret = ToolRunner.run(fshell, new String[]{ + "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"}); + assertEquals("Returned should be 0", 0, ret); + out.reset(); + return null; + } + }); + + // There should be permission to read,but not to remove for + // non-owning user (user2). + user2.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + // Read + int ret = ToolRunner.run(fshell, new String[]{ + "-getfattr", "-n", "user.a1", "/foo"}); + assertEquals("Returned should be 0", 0, ret); + out.reset(); + // Remove + ret = ToolRunner.run(fshell, new String[]{ + "-setfattr", "-x", "user.a1", "/foo"}); + assertEquals("Returned should be 1", 1, ret); + final String str = out.toString(); + assertTrue("Permission denied printed", + str.indexOf("Permission denied") != -1); + out.reset(); + return null; + } + }); + + // But there should be permission to read/remove for + // the owning user. + user1.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + // Read + int ret = ToolRunner.run(fshell, new String[]{ + "-getfattr", "-n", "user.a1", "/foo"}); + assertEquals("Returned should be 0", 0, ret); + out.reset(); + // Remove + ret = ToolRunner.run(fshell, new String[]{ + "-setfattr", "-x", "user.a1", "/foo"}); + assertEquals("Returned should be 0", 0, ret); + out.reset(); + return null; + } + }); + } finally { + if (bak != null) { + System.setErr(bak); + } + if (cluster != null) { + cluster.shutdown(); + } + } + } /** * Test that the server trash configuration is respected when diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java index 8e1b92ccde4..25ec8c9eb0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java @@ -73,6 +73,7 @@ public class TestSafeMode { conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); @@ -381,7 +382,19 @@ public class TestSafeMode { public void run(FileSystem fs) throws IOException { fs.setAcl(file1, Lists.newArrayList()); }}); - + + runFsFun("setXAttr while in SM", new FSRun() { + @Override + public void run(FileSystem fs) throws IOException { + fs.setXAttr(file1, "user.a1", null); + }}); + + runFsFun("removeXAttr while in SM", new FSRun() { + @Override + public void run(FileSystem fs) throws IOException { + fs.removeXAttr(file1, "user.a1"); + }}); + try { DFSTestUtil.readFile(fs, file1); } catch (IOException ioe) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java new file mode 100644 index 00000000000..b2a090e20e9 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java @@ -0,0 +1,475 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.google.common.collect.Lists; + +/** + * Tests NameNode interaction for all XAttr APIs. + * This test suite covers restarting the NN, saving a new checkpoint. + */ +public class FSXAttrBaseTest { + + private static final int MAX_SIZE = 16; + + protected static MiniDFSCluster dfsCluster; + protected static Configuration conf; + private static int pathCount = 0; + private static Path path; + + // XAttrs + protected static final String name1 = "user.a1"; + protected static final byte[] value1 = {0x31, 0x32, 0x33}; + protected static final byte[] newValue1 = {0x31, 0x31, 0x31}; + protected static final String name2 = "user.a2"; + protected static final byte[] value2 = {0x37, 0x38, 0x39}; + protected static final String name3 = "user.a3"; + protected static final String name4 = "user.a4"; + + protected FileSystem fs; + + @BeforeClass + public static void init() throws Exception { + conf = new HdfsConfiguration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 3); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, MAX_SIZE); + initCluster(true); + } + + @AfterClass + public static void shutdown() { + if (dfsCluster != null) { + dfsCluster.shutdown(); + } + } + + @Before + public void setUp() throws Exception { + pathCount += 1; + path = new Path("/p" + pathCount); + initFileSystem(); + } + + @After + public void destroyFileSystems() { + IOUtils.cleanup(null, fs); + fs = null; + } + + /** + * Tests for creating xattr + * 1. Create an xattr using XAttrSetFlag.CREATE. + * 2. Create an xattr which already exists and expect an exception. + * 3. Create multiple xattrs. + * 4. Restart NN and save checkpoint scenarios. + */ + @Test(timeout = 120000) + public void testCreateXAttr() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + + Map xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 1); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + + fs.removeXAttr(path, name1); + + xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 0); + + // Create xattr which already exists. + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + try { + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + Assert.fail("Creating xattr which already exists should fail."); + } catch (IOException e) { + } + fs.removeXAttr(path, name1); + + // Create two xattrs + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name2, null, EnumSet.of(XAttrSetFlag.CREATE)); + xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(new byte[0], xattrs.get(name2)); + + restart(false); + initFileSystem(); + xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(new byte[0], xattrs.get(name2)); + + restart(true); + initFileSystem(); + xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(new byte[0], xattrs.get(name2)); + + fs.removeXAttr(path, name1); + fs.removeXAttr(path, name2); + } + + /** + * Tests for replacing xattr + * 1. Replace an xattr using XAttrSetFlag.REPLACE. + * 2. Replace an xattr which doesn't exist and expect an exception. + * 3. Create multiple xattrs and replace some. + * 4. Restart NN and save checkpoint scenarios. + */ + @Test(timeout = 120000) + public void testReplaceXAttr() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE)); + + Map xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 1); + Assert.assertArrayEquals(newValue1, xattrs.get(name1)); + + fs.removeXAttr(path, name1); + + // Replace xattr which does not exist. + try { + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.REPLACE)); + Assert.fail("Replacing xattr which does not exist should fail."); + } catch (IOException e) { + } + + // Create two xattrs, then replace one + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name2, null, EnumSet.of(XAttrSetFlag.REPLACE)); + xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(new byte[0], xattrs.get(name2)); + + restart(false); + initFileSystem(); + xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(new byte[0], xattrs.get(name2)); + + restart(true); + initFileSystem(); + xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(new byte[0], xattrs.get(name2)); + + fs.removeXAttr(path, name1); + fs.removeXAttr(path, name2); + } + + /** + * Tests for setting xattr + * 1. Set xattr with XAttrSetFlag.CREATE|XAttrSetFlag.REPLACE flag. + * 2. Set xattr with illegal name. + * 3. Set xattr without XAttrSetFlag. + * 4. Set xattr and total number exceeds max limit. + * 5. Set xattr and name is too long. + * 6. Set xattr and value is too long. + */ + @Test(timeout = 120000) + public void testSetXAttr() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE, + XAttrSetFlag.REPLACE)); + + Map xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 1); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + fs.removeXAttr(path, name1); + + // Set xattr with null name + try { + fs.setXAttr(path, null, value1, EnumSet.of(XAttrSetFlag.CREATE, + XAttrSetFlag.REPLACE)); + Assert.fail("Setting xattr with null name should fail."); + } catch (NullPointerException e) { + GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e); + } catch (RemoteException e) { + GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e); + } + + // Set xattr with empty name: "user." + try { + fs.setXAttr(path, "user.", value1, EnumSet.of(XAttrSetFlag.CREATE, + XAttrSetFlag.REPLACE)); + Assert.fail("Setting xattr with empty name should fail."); + } catch (HadoopIllegalArgumentException e) { + GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e); + } catch (IllegalArgumentException e) { + GenericTestUtils.assertExceptionContains("Invalid value: \"user.\" does " + + "not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e); + } + + // Set xattr with invalid name: "a1" + try { + fs.setXAttr(path, "a1", value1, EnumSet.of(XAttrSetFlag.CREATE, + XAttrSetFlag.REPLACE)); + Assert.fail("Setting xattr with invalid name prefix or without " + + "name prefix should fail."); + } catch (HadoopIllegalArgumentException e) { + GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e); + } catch (IllegalArgumentException e) { + GenericTestUtils.assertExceptionContains("Invalid value: \"a1\" does " + + "not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e); + } + + // Set xattr without XAttrSetFlag + fs.setXAttr(path, name1, value1); + xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 1); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + fs.removeXAttr(path, name1); + + // XAttr exists, and replace it using CREATE|REPLACE flag. + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.CREATE, + XAttrSetFlag.REPLACE)); + + xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 1); + Assert.assertArrayEquals(newValue1, xattrs.get(name1)); + + fs.removeXAttr(path, name1); + + // Total number exceeds max limit + fs.setXAttr(path, name1, value1); + fs.setXAttr(path, name2, value2); + fs.setXAttr(path, name3, null); + try { + fs.setXAttr(path, name4, null); + Assert.fail("Setting xattr should fail if total number of xattrs " + + "for inode exceeds max limit."); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Cannot add additional XAttr", e); + } + fs.removeXAttr(path, name1); + fs.removeXAttr(path, name2); + fs.removeXAttr(path, name3); + + // Name length exceeds max limit + String longName = "user.0123456789abcdefX"; + try { + fs.setXAttr(path, longName, null); + Assert.fail("Setting xattr should fail if name is too long."); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("XAttr is too big", e); + GenericTestUtils.assertExceptionContains("total size is 17", e); + } + + // Value length exceeds max limit + byte[] longValue = new byte[MAX_SIZE]; + try { + fs.setXAttr(path, "user.a", longValue); + Assert.fail("Setting xattr should fail if value is too long."); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("XAttr is too big", e); + GenericTestUtils.assertExceptionContains("total size is 17", e); + } + + // Name + value exactly equal the limit + String name = "user.111"; + byte[] value = new byte[MAX_SIZE-3]; + fs.setXAttr(path, name, value); + } + + /** + * Tests for getting xattr + * 1. To get xattr which does not exist. + * 2. To get multiple xattrs. + */ + @Test(timeout = 120000) + public void testGetXAttrs() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); + + // XAttr does not exist. + byte[] value = fs.getXAttr(path, name3); + Assert.assertEquals(value, null); + + List names = Lists.newArrayList(); + names.add(name1); + names.add(name2); + names.add(name3); + Map xattrs = fs.getXAttrs(path, names); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(value2, xattrs.get(name2)); + + fs.removeXAttr(path, name1); + fs.removeXAttr(path, name2); + } + + /** + * Tests for removing xattr + * 1. Remove xattr. + * 2. Restart NN and save checkpoint scenarios. + */ + @Test(timeout = 120000) + public void testRemoveXAttr() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE)); + + fs.removeXAttr(path, name1); + fs.removeXAttr(path, name2); + + Map xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 1); + Assert.assertArrayEquals(new byte[0], xattrs.get(name3)); + + restart(false); + initFileSystem(); + xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 1); + Assert.assertArrayEquals(new byte[0], xattrs.get(name3)); + + restart(true); + initFileSystem(); + xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 1); + Assert.assertArrayEquals(new byte[0], xattrs.get(name3)); + + fs.removeXAttr(path, name3); + } + + /** + * Steps: + * 1) Set xattrs on a file. + * 2) Remove xattrs from that file. + * 3) Save a checkpoint and restart NN. + * 4) Set xattrs again on the same file. + * 5) Remove xattrs from that file. + * 6) Restart NN without saving a checkpoint. + * 7) Set xattrs again on the same file. + */ + @Test(timeout = 120000) + public void testCleanupXAttrs() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); + fs.removeXAttr(path, name1); + fs.removeXAttr(path, name2); + + restart(true); + initFileSystem(); + + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); + fs.removeXAttr(path, name1); + fs.removeXAttr(path, name2); + + restart(false); + initFileSystem(); + + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); + fs.removeXAttr(path, name1); + fs.removeXAttr(path, name2); + + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); + + Map xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(value2, xattrs.get(name2)); + } + + /** + * Creates a FileSystem for the super-user. + * + * @return FileSystem for super-user + * @throws Exception if creation fails + */ + protected FileSystem createFileSystem() throws Exception { + return dfsCluster.getFileSystem(); + } + + /** + * Initializes all FileSystem instances used in the tests. + * + * @throws Exception if initialization fails + */ + private void initFileSystem() throws Exception { + fs = createFileSystem(); + } + + /** + * Initialize the cluster, wait for it to become active, and get FileSystem + * instances for our test users. + * + * @param format if true, format the NameNode and DataNodes before starting up + * @throws Exception if any step fails + */ + protected static void initCluster(boolean format) throws Exception { + dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format) + .build(); + dfsCluster.waitActive(); + } + + /** + * Restart the cluster, optionally saving a new checkpoint. + * + * @param checkpoint boolean true to save a new checkpoint + * @throws Exception if restart fails + */ + protected static void restart(boolean checkpoint) throws Exception { + NameNode nameNode = dfsCluster.getNameNode(); + if (checkpoint) { + NameNodeAdapter.enterSafeMode(nameNode, false); + NameNodeAdapter.saveNamespace(nameNode); + } + shutdown(); + initCluster(false); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java new file mode 100644 index 00000000000..3240c5df2b9 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * 1) save xattrs, restart NN, assert xattrs reloaded from edit log, + * 2) save xattrs, create new checkpoint, restart NN, assert xattrs + * reloaded from fsimage + */ +public class TestFSImageWithXAttr { + private static Configuration conf; + private static MiniDFSCluster cluster; + + //xattrs + private static final String name1 = "user.a1"; + private static final byte[] value1 = {0x31, 0x32, 0x33}; + private static final byte[] newValue1 = {0x31, 0x31, 0x31}; + private static final String name2 = "user.a2"; + private static final byte[] value2 = {0x37, 0x38, 0x39}; + + @BeforeClass + public static void setUp() throws IOException { + conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + } + + @AfterClass + public static void tearDown() { + cluster.shutdown(); + } + + private void testXAttr(boolean persistNamespace) throws IOException { + Path path = new Path("/p"); + DistributedFileSystem fs = cluster.getFileSystem(); + fs.create(path).close(); + + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); + + restart(fs, persistNamespace); + + Map xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(value2, xattrs.get(name2)); + + fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE)); + + restart(fs, persistNamespace); + + xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(newValue1, xattrs.get(name1)); + Assert.assertArrayEquals(value2, xattrs.get(name2)); + + fs.removeXAttr(path, name1); + fs.removeXAttr(path, name2); + + restart(fs, persistNamespace); + xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 0); + } + + @Test + public void testPersistXAttr() throws IOException { + testXAttr(true); + } + + @Test + public void testXAttrEditLog() throws IOException { + testXAttr(false); + } + + /** + * Restart the NameNode, optionally saving a new checkpoint. + * + * @param fs DistributedFileSystem used for saving namespace + * @param persistNamespace boolean true to save a new checkpoint + * @throws IOException if restart fails + */ + private void restart(DistributedFileSystem fs, boolean persistNamespace) + throws IOException { + if (persistNamespace) { + fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + fs.saveNamespace(); + fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + } + + cluster.restartNameNode(); + cluster.waitActive(); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextXAttr.java new file mode 100644 index 00000000000..da09298b777 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextXAttr.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.net.URI; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.junit.BeforeClass; + +/** + * Tests of XAttr operations using FileContext APIs. + */ +public class TestFileContextXAttr extends FSXAttrBaseTest { + + @Override + protected FileSystem createFileSystem() throws Exception { + FileContextFS fcFs = new FileContextFS(); + fcFs.initialize(FileSystem.getDefaultUri(conf), conf); + return fcFs; + } + + /** + * This reuses FSXAttrBaseTest's testcases by creating a filesystem + * implementation which uses FileContext by only overriding the xattr related + * methods. Other operations will use the normal filesystem. + */ + public static class FileContextFS extends DistributedFileSystem { + + private FileContext fc; + + @Override + public void initialize(URI uri, Configuration conf) throws IOException { + super.initialize(uri, conf); + fc = FileContext.getFileContext(conf); + } + + @Override + public void setXAttr(Path path, final String name, final byte[] value) + throws IOException { + fc.setXAttr(path, name, value); + } + + @Override + public void setXAttr(Path path, final String name, final byte[] value, + final EnumSet flag) throws IOException { + fc.setXAttr(path, name, value, flag); + } + + @Override + public byte[] getXAttr(Path path, final String name) throws IOException { + return fc.getXAttr(path, name); + } + + @Override + public Map getXAttrs(Path path) throws IOException { + return fc.getXAttrs(path); + } + + @Override + public Map getXAttrs(Path path, final List names) + throws IOException { + return fc.getXAttrs(path, names); + } + + @Override + public void removeXAttr(Path path, final String name) throws IOException { + fc.removeXAttr(path, name); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index b81970663c6..6ab86db4914 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -45,6 +45,7 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSClient; @@ -66,6 +67,8 @@ import org.apache.hadoop.util.Time; import org.junit.Test; import org.mockito.Mockito; +import com.google.common.collect.ImmutableList; + public class TestINodeFile { // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052 static { @@ -1041,4 +1044,22 @@ public class TestINodeFile { file.toCompleteFile(Time.now()); assertFalse(file.isUnderConstruction()); } + + @Test + public void testXAttrFeature() { + replication = 3; + preferredBlockSize = 128*1024*1024; + INodeFile inf = createINodeFile(replication, preferredBlockSize); + ImmutableList.Builder builder = new ImmutableList.Builder(); + XAttr xAttr = new XAttr.Builder().setNameSpace(XAttr.NameSpace.USER). + setName("a1").setValue(new byte[]{0x31, 0x32, 0x33}).build(); + builder.add(xAttr); + XAttrFeature f = new XAttrFeature(builder.build()); + inf.addXAttrFeature(f); + XAttrFeature f1 = inf.getXAttrFeature(); + assertEquals(xAttr, f1.getXAttrs().get(0)); + inf.removeXAttrFeature(); + f1 = inf.getXAttrFeature(); + assertEquals(f1, null); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java new file mode 100644 index 00000000000..1722e12089a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.Map; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.junit.Assert; +import org.junit.Test; + +/** + * Tests NameNode interaction for all XAttr APIs. + * This test suite covers restarting NN, saving new checkpoint, + * and also includes test of xattrs for symlinks. + */ +public class TestNameNodeXAttr extends FSXAttrBaseTest { + + private static final Path linkParent = new Path("/symdir1"); + private static final Path targetParent = new Path("/symdir2"); + private static final Path link = new Path(linkParent, "link"); + private static final Path target = new Path(targetParent, "target"); + + @Test(timeout = 120000) + public void testXAttrSymlinks() throws Exception { + fs.mkdirs(linkParent); + fs.mkdirs(targetParent); + DFSTestUtil.createFile(fs, target, 1024, (short)3, 0xBEEFl); + fs.createSymlink(target, link, false); + + fs.setXAttr(target, name1, value1); + fs.setXAttr(target, name2, value2); + + Map xattrs = fs.getXAttrs(link); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(value2, xattrs.get(name2)); + + fs.setXAttr(link, name3, null); + xattrs = fs.getXAttrs(target); + Assert.assertEquals(xattrs.size(), 3); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(value2, xattrs.get(name2)); + Assert.assertArrayEquals(new byte[0], xattrs.get(name3)); + + fs.removeXAttr(link, name1); + xattrs = fs.getXAttrs(target); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value2, xattrs.get(name2)); + Assert.assertArrayEquals(new byte[0], xattrs.get(name3)); + + fs.removeXAttr(target, name3); + xattrs = fs.getXAttrs(link); + Assert.assertEquals(xattrs.size(), 1); + Assert.assertArrayEquals(value2, xattrs.get(name2)); + + fs.delete(linkParent, true); + fs.delete(targetParent, true); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java index 5e1c1925ee7..d6f38853474 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java @@ -415,7 +415,7 @@ public class TestNamenodeRetryCache { LightWeightCache cacheSet = (LightWeightCache) namesystem.getRetryCache().getCacheSet(); - assertEquals(20, cacheSet.size()); + assertEquals(22, cacheSet.size()); Map oldEntries = new HashMap(); @@ -434,7 +434,7 @@ public class TestNamenodeRetryCache { assertTrue(namesystem.hasRetryCache()); cacheSet = (LightWeightCache) namesystem .getRetryCache().getCacheSet(); - assertEquals(20, cacheSet.size()); + assertEquals(22, cacheSet.size()); iter = cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry = iter.next(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index fa24856c840..3ebe46eab79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -635,4 +635,68 @@ public class TestStartup { fileSys.delete(name, true); assertTrue(!fileSys.exists(name)); } + + + @Test(timeout = 120000) + public void testXattrConfiguration() throws Exception { + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = null; + + try { + conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, -1); + cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); + fail("Expected exception with negative xattr size"); + } catch (IllegalArgumentException e) { + GenericTestUtils.assertExceptionContains( + "Cannot set a negative value for the maximum size of an xattr", e); + } finally { + conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, + DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT); + if (cluster != null) { + cluster.shutdown(); + } + } + + try { + conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, -1); + cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); + fail("Expected exception with negative # xattrs per inode"); + } catch (IllegalArgumentException e) { + GenericTestUtils.assertExceptionContains( + "Cannot set a negative limit on the number of xattrs per inode", e); + } finally { + conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, + DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT); + if (cluster != null) { + cluster.shutdown(); + } + } + + try { + // Set up a logger to check log message + final LogVerificationAppender appender = new LogVerificationAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); + int count = appender.countLinesWithMessage( + "Maximum size of an xattr: 0 (unlimited)"); + assertEquals("Expected no messages about unlimited xattr size", 0, count); + + conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, 0); + cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); + + count = appender.countLinesWithMessage( + "Maximum size of an xattr: 0 (unlimited)"); + // happens twice because we format then run + assertEquals("Expected unlimited xattr size", 2, count); + } finally { + conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, + DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT); + if (cluster != null) { + cluster.shutdown(); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java new file mode 100644 index 00000000000..e4fb4bbad9b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java @@ -0,0 +1,150 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.io.IOUtils; +import org.junit.After; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +/** + * Tests that the configuration flag that controls support for XAttrs is off + * and causes all attempted operations related to XAttrs to fail. The + * NameNode can still load XAttrs from fsimage or edits. + */ +public class TestXAttrConfigFlag { + private static final Path PATH = new Path("/path"); + + private MiniDFSCluster cluster; + private DistributedFileSystem fs; + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @After + public void shutdown() throws Exception { + IOUtils.cleanup(null, fs); + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testSetXAttr() throws Exception { + initCluster(true, false); + fs.mkdirs(PATH); + expectException(); + fs.setXAttr(PATH, "user.foo", null); + } + + @Test + public void testGetXAttrs() throws Exception { + initCluster(true, false); + fs.mkdirs(PATH); + expectException(); + fs.getXAttrs(PATH); + } + + @Test + public void testRemoveXAttr() throws Exception { + initCluster(true, false); + fs.mkdirs(PATH); + expectException(); + fs.removeXAttr(PATH, "user.foo"); + } + + @Test + public void testEditLog() throws Exception { + // With XAttrs enabled, set an XAttr. + initCluster(true, true); + fs.mkdirs(PATH); + fs.setXAttr(PATH, "user.foo", null); + + // Restart with XAttrs disabled. Expect successful restart. + restart(false, false); + } + + @Test + public void testFsImage() throws Exception { + // With XAttrs enabled, set an XAttr. + initCluster(true, true); + fs.mkdirs(PATH); + fs.setXAttr(PATH, "user.foo", null); + + // Save a new checkpoint and restart with XAttrs still enabled. + restart(true, true); + + // Restart with XAttrs disabled. Expect successful restart. + restart(false, false); + } + + /** + * We expect an IOException, and we want the exception text to state the + * configuration key that controls XAttr support. + */ + private void expectException() { + exception.expect(IOException.class); + exception.expectMessage(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY); + } + + /** + * Initialize the cluster, wait for it to become active, and get FileSystem. + * + * @param format if true, format the NameNode and DataNodes before starting up + * @param xattrsEnabled if true, XAttr support is enabled + * @throws Exception if any step fails + */ + private void initCluster(boolean format, boolean xattrsEnabled) + throws Exception { + Configuration conf = new Configuration(); + // not explicitly setting to false, should be false by default + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, xattrsEnabled); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format) + .build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + } + + /** + * Restart the cluster, optionally saving a new checkpoint. + * + * @param checkpoint boolean true to save a new checkpoint + * @param xattrsEnabled if true, XAttr support is enabled + * @throws Exception if restart fails + */ + private void restart(boolean checkpoint, boolean xattrsEnabled) + throws Exception { + NameNode nameNode = cluster.getNameNode(); + if (checkpoint) { + NameNodeAdapter.enterSafeMode(nameNode, false); + NameNodeAdapter.saveNamespace(nameNode); + } + shutdown(); + initCluster(false, xattrsEnabled); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index 95e55cc2b41..a34a0365f99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -33,6 +33,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.Map; import java.util.Random; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; @@ -45,6 +46,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -126,6 +128,7 @@ public class TestRetryCacheWithHA { conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(DataNodes).build(); @@ -157,7 +160,7 @@ public class TestRetryCacheWithHA { FSNamesystem fsn0 = cluster.getNamesystem(0); LightWeightCache cacheSet = (LightWeightCache) fsn0.getRetryCache().getCacheSet(); - assertEquals(20, cacheSet.size()); + assertEquals(22, cacheSet.size()); Map oldEntries = new HashMap(); @@ -178,7 +181,7 @@ public class TestRetryCacheWithHA { FSNamesystem fsn1 = cluster.getNamesystem(1); cacheSet = (LightWeightCache) fsn1 .getRetryCache().getCacheSet(); - assertEquals(20, cacheSet.size()); + assertEquals(22, cacheSet.size()); iter = cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry = iter.next(); @@ -1001,6 +1004,48 @@ public class TestRetryCacheWithHA { return null; } } + + /** setXAttr */ + class SetXAttrOp extends AtMostOnceOp { + private final String src; + + SetXAttrOp(DFSClient client, String src) { + super("setXAttr", client); + this.src = src; + } + + @Override + void prepare() throws Exception { + Path p = new Path(src); + if (!dfs.exists(p)) { + DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0); + } + } + + @Override + void invoke() throws Exception { + client.setXAttr(src, "user.key", "value".getBytes(), + EnumSet.of(XAttrSetFlag.CREATE)); + } + + @Override + boolean checkNamenodeBeforeReturn() throws Exception { + for (int i = 0; i < CHECKTIMES; i++) { + Map iter = dfs.getXAttrs(new Path(src)); + Set keySet = iter.keySet(); + if (keySet.contains("user.key")) { + return true; + } + Thread.sleep(1000); + } + return false; + } + + @Override + Object getResult() { + return null; + } + } @Test (timeout=60000) public void testCreateSnapshot() throws Exception { @@ -1130,6 +1175,13 @@ public class TestRetryCacheWithHA { AtMostOnceOp op = new RemoveCachePoolOp(client, "pool"); testClientRetryWithFailover(op); } + + @Test (timeout=60000) + public void testSetXAttr() throws Exception { + DFSClient client = genClientWithDummyHandler(); + AtMostOnceOp op = new SetXAttrOp(client, "/setxattr"); + testClientRetryWithFailover(op); + } /** * When NN failover happens, if the client did not receive the response and diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestXAttrsWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestXAttrsWithHA.java new file mode 100644 index 00000000000..db92238a3e1 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestXAttrsWithHA.java @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.ha; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HAUtil; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Tests interaction of XAttrs with HA failover. + */ +public class TestXAttrsWithHA { + private static final Path path = new Path("/file"); + + // XAttrs + protected static final String name1 = "user.a1"; + protected static final byte[] value1 = {0x31, 0x32, 0x33}; + protected static final byte[] newValue1 = {0x31, 0x31, 0x31}; + protected static final String name2 = "user.a2"; + protected static final byte[] value2 = {0x37, 0x38, 0x39}; + protected static final String name3 = "user.a3"; + + private MiniDFSCluster cluster; + private NameNode nn0; + private NameNode nn1; + private FileSystem fs; + + @Before + public void setupCluster() throws Exception { + Configuration conf = new Configuration(); + conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); + HAUtil.setAllowStandbyReads(conf, true); + + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(MiniDFSNNTopology.simpleHATopology()) + .numDataNodes(1) + .waitSafeMode(false) + .build(); + cluster.waitActive(); + + nn0 = cluster.getNameNode(0); + nn1 = cluster.getNameNode(1); + fs = HATestUtil.configureFailoverFs(cluster, conf); + + cluster.transitionToActive(0); + } + + @After + public void shutdownCluster() throws IOException { + if (cluster != null) { + cluster.shutdown(); + } + } + + /** + * Test that xattrs are properly tracked by the standby + */ + @Test(timeout = 60000) + public void testXAttrsTrackedOnStandby() throws Exception { + fs.create(path).close(); + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); + + HATestUtil.waitForStandbyToCatchUp(nn0, nn1); + List xAttrs = nn1.getRpcServer().getXAttrs("/file", null); + assertEquals(2, xAttrs.size()); + cluster.shutdownNameNode(0); + + // Failover the current standby to active. + cluster.shutdownNameNode(0); + cluster.transitionToActive(1); + + Map xattrs = fs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(value2, xattrs.get(name2)); + + fs.delete(path, true); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java new file mode 100644 index 00000000000..87b856e2216 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java @@ -0,0 +1,371 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode.snapshot; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + +import java.util.EnumSet; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; +import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.io.IOUtils; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +/** + * Tests interaction of XAttrs with snapshots. + */ +public class TestXAttrWithSnapshot { + + private static MiniDFSCluster cluster; + private static Configuration conf; + private static DistributedFileSystem hdfs; + private static int pathCount = 0; + private static Path path, snapshotPath; + private static String snapshotName; + // XAttrs + private static final String name1 = "user.a1"; + private static final byte[] value1 = { 0x31, 0x32, 0x33 }; + private static final byte[] newValue1 = { 0x31, 0x31, 0x31 }; + private static final String name2 = "user.a2"; + private static final byte[] value2 = { 0x37, 0x38, 0x39 }; + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @BeforeClass + public static void init() throws Exception { + conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); + initCluster(true); + } + + @AfterClass + public static void shutdown() throws Exception { + IOUtils.cleanup(null, hdfs); + if (cluster != null) { + cluster.shutdown(); + } + } + + @Before + public void setUp() { + ++pathCount; + path = new Path("/p" + pathCount); + snapshotName = "snapshot" + pathCount; + snapshotPath = new Path(path, new Path(".snapshot", snapshotName)); + } + + /** + * Tests modifying xattrs on a directory that has been snapshotted + */ + @Test (timeout = 120000) + public void testModifyReadsCurrentState() throws Exception { + // Init + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700)); + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + hdfs.setXAttr(path, name1, value1); + hdfs.setXAttr(path, name2, value2); + + // Verify that current path reflects xattrs, snapshot doesn't + Map xattrs = hdfs.getXAttrs(path); + assertEquals(xattrs.size(), 2); + assertArrayEquals(value1, xattrs.get(name1)); + assertArrayEquals(value2, xattrs.get(name2)); + + xattrs = hdfs.getXAttrs(snapshotPath); + assertEquals(xattrs.size(), 0); + + // Modify each xattr and make sure it's reflected + hdfs.setXAttr(path, name1, value2, EnumSet.of(XAttrSetFlag.REPLACE)); + xattrs = hdfs.getXAttrs(path); + assertEquals(xattrs.size(), 2); + assertArrayEquals(value2, xattrs.get(name1)); + assertArrayEquals(value2, xattrs.get(name2)); + + hdfs.setXAttr(path, name2, value1, EnumSet.of(XAttrSetFlag.REPLACE)); + xattrs = hdfs.getXAttrs(path); + assertEquals(xattrs.size(), 2); + assertArrayEquals(value2, xattrs.get(name1)); + assertArrayEquals(value1, xattrs.get(name2)); + + // Paranoia checks + xattrs = hdfs.getXAttrs(snapshotPath); + assertEquals(xattrs.size(), 0); + + hdfs.removeXAttr(path, name1); + hdfs.removeXAttr(path, name2); + xattrs = hdfs.getXAttrs(path); + assertEquals(xattrs.size(), 0); + } + + /** + * Tests removing xattrs on a directory that has been snapshotted + */ + @Test (timeout = 120000) + public void testRemoveReadsCurrentState() throws Exception { + // Init + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700)); + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + hdfs.setXAttr(path, name1, value1); + hdfs.setXAttr(path, name2, value2); + + // Verify that current path reflects xattrs, snapshot doesn't + Map xattrs = hdfs.getXAttrs(path); + assertEquals(xattrs.size(), 2); + assertArrayEquals(value1, xattrs.get(name1)); + assertArrayEquals(value2, xattrs.get(name2)); + + xattrs = hdfs.getXAttrs(snapshotPath); + assertEquals(xattrs.size(), 0); + + // Remove xattrs and verify one-by-one + hdfs.removeXAttr(path, name2); + xattrs = hdfs.getXAttrs(path); + assertEquals(xattrs.size(), 1); + assertArrayEquals(value1, xattrs.get(name1)); + + hdfs.removeXAttr(path, name1); + xattrs = hdfs.getXAttrs(path); + assertEquals(xattrs.size(), 0); + } + + /** + * 1) Save xattrs, then create snapshot. Assert that inode of original and + * snapshot have same xattrs. 2) Change the original xattrs, assert snapshot + * still has old xattrs. + */ + @Test + public void testXAttrForSnapshotRootAfterChange() throws Exception { + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700)); + hdfs.setXAttr(path, name1, value1); + hdfs.setXAttr(path, name2, value2); + + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + + // Both original and snapshot have same XAttrs. + Map xattrs = hdfs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(value2, xattrs.get(name2)); + + xattrs = hdfs.getXAttrs(snapshotPath); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(value2, xattrs.get(name2)); + + // Original XAttrs have changed, but snapshot still has old XAttrs. + hdfs.setXAttr(path, name1, newValue1); + + doSnapshotRootChangeAssertions(path, snapshotPath); + restart(false); + doSnapshotRootChangeAssertions(path, snapshotPath); + restart(true); + doSnapshotRootChangeAssertions(path, snapshotPath); + } + + private static void doSnapshotRootChangeAssertions(Path path, + Path snapshotPath) throws Exception { + Map xattrs = hdfs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(newValue1, xattrs.get(name1)); + Assert.assertArrayEquals(value2, xattrs.get(name2)); + + xattrs = hdfs.getXAttrs(snapshotPath); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(value2, xattrs.get(name2)); + } + + /** + * 1) Save xattrs, then create snapshot. Assert that inode of original and + * snapshot have same xattrs. 2) Remove some original xattrs, assert snapshot + * still has old xattrs. + */ + @Test + public void testXAttrForSnapshotRootAfterRemove() throws Exception { + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700)); + hdfs.setXAttr(path, name1, value1); + hdfs.setXAttr(path, name2, value2); + + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + + // Both original and snapshot have same XAttrs. + Map xattrs = hdfs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(value2, xattrs.get(name2)); + + xattrs = hdfs.getXAttrs(snapshotPath); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(value2, xattrs.get(name2)); + + // Original XAttrs have been removed, but snapshot still has old XAttrs. + hdfs.removeXAttr(path, name1); + hdfs.removeXAttr(path, name2); + + doSnapshotRootRemovalAssertions(path, snapshotPath); + restart(false); + doSnapshotRootRemovalAssertions(path, snapshotPath); + restart(true); + doSnapshotRootRemovalAssertions(path, snapshotPath); + } + + private static void doSnapshotRootRemovalAssertions(Path path, + Path snapshotPath) throws Exception { + Map xattrs = hdfs.getXAttrs(path); + Assert.assertEquals(xattrs.size(), 0); + + xattrs = hdfs.getXAttrs(snapshotPath); + Assert.assertEquals(xattrs.size(), 2); + Assert.assertArrayEquals(value1, xattrs.get(name1)); + Assert.assertArrayEquals(value2, xattrs.get(name2)); + } + + /** + * Assert exception of setting xattr on read-only snapshot. + */ + @Test + public void testSetXAttrSnapshotPath() throws Exception { + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700)); + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + exception.expect(SnapshotAccessControlException.class); + hdfs.setXAttr(snapshotPath, name1, value1); + } + + /** + * Assert exception of setting xattr when exceeding quota. + */ + @Test + public void testSetXAttrExceedsQuota() throws Exception { + Path filePath = new Path(path, "file1"); + Path fileSnapshotPath = new Path(snapshotPath, "file1"); + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0755)); + hdfs.allowSnapshot(path); + hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET); + FileSystem.create(hdfs, filePath, + FsPermission.createImmutable((short) 0600)).close(); + hdfs.setXAttr(filePath, name1, value1); + + hdfs.createSnapshot(path, snapshotName); + + byte[] value = hdfs.getXAttr(filePath, name1); + Assert.assertArrayEquals(value, value1); + + value = hdfs.getXAttr(fileSnapshotPath, name1); + Assert.assertArrayEquals(value, value1); + + exception.expect(NSQuotaExceededException.class); + hdfs.setXAttr(filePath, name2, value2); + } + + + /** + * Test that an exception is thrown when adding an XAttr Feature to + * a snapshotted path + */ + @Test + public void testSetXAttrAfterSnapshotExceedsQuota() throws Exception { + Path filePath = new Path(path, "file1"); + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0755)); + hdfs.allowSnapshot(path); + hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET); + FileSystem.create(hdfs, filePath, + FsPermission.createImmutable((short) 0600)).close(); + hdfs.createSnapshot(path, snapshotName); + // This adds an XAttr feature, which can throw an exception + exception.expect(NSQuotaExceededException.class); + hdfs.setXAttr(filePath, name1, value1); + } + + /** + * Assert exception of removing xattr when exceeding quota. + */ + @Test + public void testRemoveXAttrExceedsQuota() throws Exception { + Path filePath = new Path(path, "file1"); + Path fileSnapshotPath = new Path(snapshotPath, "file1"); + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0755)); + hdfs.allowSnapshot(path); + hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET); + FileSystem.create(hdfs, filePath, + FsPermission.createImmutable((short) 0600)).close(); + hdfs.setXAttr(filePath, name1, value1); + + hdfs.createSnapshot(path, snapshotName); + + byte[] value = hdfs.getXAttr(filePath, name1); + Assert.assertArrayEquals(value, value1); + + value = hdfs.getXAttr(fileSnapshotPath, name1); + Assert.assertArrayEquals(value, value1); + + exception.expect(NSQuotaExceededException.class); + hdfs.removeXAttr(filePath, name1); + } + + /** + * Initialize the cluster, wait for it to become active, and get FileSystem + * instances for our test users. + * + * @param format if true, format the NameNode and DataNodes before starting up + * @throws Exception if any step fails + */ + private static void initCluster(boolean format) throws Exception { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format) + .build(); + cluster.waitActive(); + hdfs = cluster.getFileSystem(); + } + + /** + * Restart the cluster, optionally saving a new checkpoint. + * + * @param checkpoint boolean true to save a new checkpoint + * @throws Exception if restart fails + */ + private static void restart(boolean checkpoint) throws Exception { + NameNode nameNode = cluster.getNameNode(); + if (checkpoint) { + NameNodeAdapter.enterSafeMode(nameNode, false); + NameNodeAdapter.saveNamespace(nameNode); + } + shutdown(); + initCluster(false); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java index d161a1d353a..ffaf193d863 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java @@ -22,16 +22,22 @@ import static org.apache.hadoop.fs.permission.AclEntryType.*; import static org.apache.hadoop.fs.permission.FsAction.*; import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*; +import java.io.IOException; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.INodeId; @@ -186,6 +192,48 @@ public class TestJsonUtil { JsonUtil.toJsonString(aclStatusBuilder.build())); } + + @Test + public void testToJsonFromXAttrs() throws IOException { + String jsonString = + "{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," + + "{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}"; + XAttr xAttr1 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER). + setName("a1").setValue(XAttrCodec.decodeValue("0x313233")).build(); + XAttr xAttr2 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER). + setName("a2").setValue(XAttrCodec.decodeValue("0x313131")).build(); + List xAttrs = Lists.newArrayList(); + xAttrs.add(xAttr1); + xAttrs.add(xAttr2); + + Assert.assertEquals(jsonString, JsonUtil.toJsonString(xAttrs, + XAttrCodec.HEX)); + } + + @Test + public void testToXAttrMap() throws IOException { + String jsonString = + "{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," + + "{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}"; + Map json = (Map)JSON.parse(jsonString); + XAttr xAttr1 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER). + setName("a1").setValue(XAttrCodec.decodeValue("0x313233")).build(); + XAttr xAttr2 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER). + setName("a2").setValue(XAttrCodec.decodeValue("0x313131")).build(); + List xAttrs = Lists.newArrayList(); + xAttrs.add(xAttr1); + xAttrs.add(xAttr2); + Map xAttrMap = XAttrHelper.buildXAttrMap(xAttrs); + Map parsedXAttrMap = JsonUtil.toXAttrs(json); + + Assert.assertEquals(xAttrMap.size(), parsedXAttrMap.size()); + Iterator> iter = xAttrMap.entrySet().iterator(); + while(iter.hasNext()) { + Entry entry = iter.next(); + Assert.assertArrayEquals(entry.getValue(), + parsedXAttrMap.get(entry.getKey())); + } + } private void checkDecodeFailure(Map map) { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSXAttr.java new file mode 100644 index 00000000000..8b829734ef7 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSXAttr.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web; + +import org.apache.hadoop.hdfs.server.namenode.FSXAttrBaseTest; + +/** + * Tests XAttr APIs via WebHDFS. + */ +public class TestWebHDFSXAttr extends FSXAttrBaseTest { + /** + * Overridden to provide a WebHdfsFileSystem wrapper for the super-user. + * + * @return WebHdfsFileSystem for super-user + * @throws Exception if creation fails + */ + @Override + protected WebHdfsFileSystem createFileSystem() throws Exception { + return WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java index 4117d9d8a18..1a20739a712 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.web.resources; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; +import java.io.IOException; import java.util.Arrays; import java.util.EnumSet; import java.util.List; @@ -30,6 +31,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.XAttrCodec; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -348,6 +351,43 @@ public class TestParam { } } + @Test + public void testXAttrNameParam() { + final XAttrNameParam p = new XAttrNameParam("user.a1"); + Assert.assertEquals(p.getXAttrName(), "user.a1"); + try { + new XAttrNameParam("a1"); + Assert.fail(); + } catch (IllegalArgumentException e) { + LOG.info("EXPECTED: " + e); + } + } + + @Test + public void testXAttrValueParam() throws IOException { + final XAttrValueParam p = new XAttrValueParam("0x313233"); + Assert.assertArrayEquals(p.getXAttrValue(), + XAttrCodec.decodeValue("0x313233")); + } + + @Test + public void testXAttrEncodingParam() { + final XAttrEncodingParam p = new XAttrEncodingParam(XAttrCodec.BASE64); + Assert.assertEquals(p.getEncoding(), XAttrCodec.BASE64); + final XAttrEncodingParam p1 = new XAttrEncodingParam(p.getValueString()); + Assert.assertEquals(p1.getEncoding(), XAttrCodec.BASE64); + } + + @Test + public void testXAttrSetFlagParam() { + EnumSet flag = EnumSet.of( + XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE); + final XAttrSetFlagParam p = new XAttrSetFlagParam(flag); + Assert.assertEquals(p.getFlag(), flag); + final XAttrSetFlagParam p1 = new XAttrSetFlagParam(p.getValueString()); + Assert.assertEquals(p1.getFlag(), flag); + } + @Test public void testRenameOptionSetParam() { final RenameOptionSetParam p = new RenameOptionSetParam( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored index 0b29a5a15ba6620aba5ff45f9776105823eeb814..c41b65be4cd3b3da524866ca03e7e696e96d5a43 100644 GIT binary patch delta 104 zcmX@3a#WS&|NsBTH?o)u@o6(KFt`FS2zV<^{vasIE5yygAjFhtD8+1OWIQ>LSyVWH oOT*Gt2dF{|td4=fM;4?~R)`IvGEyd{L>wr^1C{pGh}5tI0I}>6u>b%7 delta 31 jcmX@AdPari|Ns9dHnNxt35qi?Fz^5|2zVc~>g53dvqcEC diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml index 6b6f298ed01..54f98764908 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml @@ -1,6 +1,6 @@ - -56 + -57 OP_START_LOG_SEGMENT @@ -938,9 +938,34 @@ - OP_END_LOG_SEGMENT + OP_SET_XATTR 75 + /file_concat_target + + USER + a1 + 0x313233 + + 9b85a845-bbfa-42f6-8a16-c433614b8eb9 + 80 + + + + OP_REMOVE_XATTR + + 76 + /file_concat_target + + USER + a1 + + + + + OP_END_LOG_SEGMENT + + 77 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml new file mode 100644 index 00000000000..7b7f866ac38 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml @@ -0,0 +1,409 @@ + + + + + + + + test + + + + + setfattr : Add an xattr + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n user.a1 -v 123456 /file1 + -fs NAMENODE -getfattr -d /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + # file: /file1 + + + SubstringComparator + user.a1="123456" + + + + + + setfattr : Add an xattr which has wrong prefix + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n uuu.a1 -v 123456 /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + name must be prefixed with user/trusted/security/system, followed by a '.' + + + + + + setfattr : Add an xattr of trusted namespace + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n trusted.a1 -v 123456 /file1 + -fs NAMENODE -getfattr -d /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + # file: /file1 + + + SubstringComparator + trusted.a1="123456" + + + + + + setfattr : Add an xattr of system namespace + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n system.a1 -v 123456 /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + setfattr: User doesn't have permission for xattr: system.a1 + + + + + + setfattr : Add an xattr of security namespace + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n security.a1 -v 123456 /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + setfattr: User doesn't have permission for xattr: security.a1 + + + + + + setfattr : Add an xattr, and encode is text + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n user.a1 -v "123456" /file1 + -fs NAMENODE -getfattr -d /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + # file: /file1 + + + SubstringComparator + user.a1="123456" + + + + + + setfattr : Add an xattr, and encode is hex + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n user.a1 -v 0x313233343536 /file1 + -fs NAMENODE -getfattr -d /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + # file: /file1 + + + SubstringComparator + user.a1="123456" + + + + + + setfattr : Add an xattr, and encode is base64 + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n user.a1 -v 0sMTIzNDU2 /file1 + -fs NAMENODE -getfattr -d /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + # file: /file1 + + + SubstringComparator + user.a1="123456" + + + + + + setfattr : Add multiple xattrs + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n user.a1 -v 123456 /file1 + -fs NAMENODE -setfattr -n user.a2 -v abc /file1 + -fs NAMENODE -getfattr -d /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + # file: /file1 + + + SubstringComparator + user.a1="123456" + + + SubstringComparator + user.a2="abc" + + + + + + setfattr : Remove an xattr + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n user.a1 -v 123456 /file1 + -fs NAMENODE -setfattr -n user.a2 -v abc /file1 + -fs NAMENODE -setfattr -x user.a1 /file1 + -fs NAMENODE -getfattr -d /file1 + + + -fs NAMENODE -rm /file1 + + + + ExactComparator + # file: /file1#LF#user.a2="abc"#LF# + + + + + + setfattr : Remove an xattr which doesn't exist + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n user.a1 -v 123456 /file1 + -fs NAMENODE -setfattr -x user.a2 /file1 + -fs NAMENODE -getfattr -d /file1 + + + -fs NAMENODE -rm /file1 + + + + ExactComparator + # file: /file1#LF#user.a1="123456"#LF# + + + + + + getfattr : Get an xattr + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n user.a1 -v 123456 /file1 + -fs NAMENODE -setfattr -n user.a2 -v abc /file1 + -fs NAMENODE -getfattr -n user.a1 /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + # file: /file1 + + + SubstringComparator + user.a1="123456" + + + + + + getfattr : Get an xattr which doesn't exist + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -getfattr -n user.a1 /file1 + + + -fs NAMENODE -rm /file1 + + + + ExactComparator + # file: /file1#LF# + + + + + + getfattr : Get an xattr, and encode is text + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n user.a1 -v 123456 /file1 + -fs NAMENODE -setfattr -n user.a2 -v abc /file1 + -fs NAMENODE -getfattr -n user.a1 -e text /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + # file: /file1 + + + SubstringComparator + user.a1="123456" + + + + + + getfattr : Get an xattr, and encode is hex + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n user.a1 -v 123456 /file1 + -fs NAMENODE -setfattr -n user.a2 -v abc /file1 + -fs NAMENODE -getfattr -n user.a1 -e hex /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + # file: /file1 + + + SubstringComparator + user.a1=0x313233343536 + + + + + + getfattr : Get an xattr, and encode is base64 + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n user.a1 -v 123456 /file1 + -fs NAMENODE -setfattr -n user.a2 -v abc /file1 + -fs NAMENODE -getfattr -n user.a1 -e base64 /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + # file: /file1 + + + SubstringComparator + user.a1=0sMTIzNDU2 + + + + + + getfattr : Get an xattr, and encode is invalid + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n user.a1 -v 123456 /file1 + -fs NAMENODE -setfattr -n user.a2 -v abc /file1 + -fs NAMENODE -getfattr -n user.a1 -e invalid /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + Invalid/unsupported encoding option specified: invalid + + + + + + getfattr -R : recursive + + -fs NAMENODE -mkdir /dir1 + -fs NAMENODE -setfattr -n user.a1 -v 123456 /dir1 + -fs NAMENODE -mkdir /dir1/dir2 + -fs NAMENODE -setfattr -n user.a2 -v abc /dir1/dir2 + -fs NAMENODE -getfattr -R -d /dir1 + + + -fs NAMENODE -rm -R /dir1 + + + + ExactComparator + # file: /dir1#LF#user.a1="123456"#LF## file: /dir1/dir2#LF#user.a2="abc"#LF# + + + + + + \ No newline at end of file diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml index 953167d6ab1..a4e8799a454 100644 --- a/hadoop-project/src/site/site.xml +++ b/hadoop-project/src/site/site.xml @@ -85,6 +85,7 @@ +