Merge HDFS-2006 HDFS XAttrs branch to Trunk
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1596575 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
011b53be7f
commit
ac23a55547
|
@ -340,6 +340,24 @@ Trunk (Unreleased)
|
|||
|
||||
HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
|
||||
|
||||
BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HADOOP-10520. Extended attributes definition and FileSystem APIs for
|
||||
extended attributes. (Yi Liu via wang)
|
||||
|
||||
HADOOP-10546. Javadoc and other small fixes for extended attributes in
|
||||
hadoop-common. (Charles Lamb via wang)
|
||||
|
||||
HADOOP-10521. FsShell commands for extended attributes. (Yi Liu via wang)
|
||||
|
||||
HADOOP-10548. Improve FsShell xattr error handling and other fixes. (Charles Lamb via umamahesh)
|
||||
|
||||
HADOOP-10567. Shift XAttr value encoding code out for reuse. (Yi Liu via umamahesh)
|
||||
|
||||
HADOOP-10621. Remove CRLF for xattr value base64 encoding for better display.(Yi Liu via umamahesh)
|
||||
|
||||
HADOOP-10575. Small fixes for XAttrCommands and test. (Yi Liu via umamahesh)
|
||||
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Constructor;
|
||||
|
@ -1039,6 +1038,163 @@ public abstract class AbstractFileSystem {
|
|||
+ " doesn't support getAclStatus");
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
* @param value xattr value.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void setXAttr(Path path, String name, byte[] value)
|
||||
throws IOException {
|
||||
setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE,
|
||||
XAttrSetFlag.REPLACE));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
* @param value xattr value.
|
||||
* @param flag xattr set flag
|
||||
* @throws IOException
|
||||
*/
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support setXAttr");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an xattr for a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only get an xattr for the "user" namespace.
|
||||
* The super user can get an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* An xattr will only be returned when the logged-in user has the correct permissions.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attribute
|
||||
* @param name xattr name.
|
||||
* @return byte[] xattr value.
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte[] getXAttr(Path path, String name) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttr");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the xattrs for a file or directory.
|
||||
* Only those xattrs for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattr of "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @return Map<String, byte[]> describing the XAttrs of the file or directory
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttrs");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the xattrs for a file or directory.
|
||||
* Only those xattrs for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattr of "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @param names XAttr names.
|
||||
* @return Map<String, byte[]> describing the XAttrs of the file or directory
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttrs");
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only remove an xattr for the "user" namespace.
|
||||
* The super user can remove an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to remove extended attribute
|
||||
* @param name xattr name
|
||||
* @throws IOException
|
||||
*/
|
||||
public void removeXAttr(Path path, String name) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support removeXAttr");
|
||||
}
|
||||
|
||||
@Override //Object
|
||||
public int hashCode() {
|
||||
return myUri.hashCode();
|
||||
|
|
|
@ -2294,4 +2294,194 @@ public final class FileContext {
|
|||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
* @param value xattr value.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void setXAttr(Path path, String name, byte[] value)
|
||||
throws IOException {
|
||||
setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE,
|
||||
XAttrSetFlag.REPLACE));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
* @param value xattr value.
|
||||
* @param flag xattr set flag
|
||||
* @throws IOException
|
||||
*/
|
||||
public void setXAttr(Path path, final String name, final byte[] value,
|
||||
final EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
new FSLinkResolver<Void>() {
|
||||
@Override
|
||||
public Void next(final AbstractFileSystem fs, final Path p)
|
||||
throws IOException {
|
||||
fs.setXAttr(p, name, value, flag);
|
||||
return null;
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an xattr for a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
*
|
||||
* A regular user can only get an xattr for the "user" namespace.
|
||||
* The super user can get an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* An xattr will only be returned when the logged-in user has the correct permissions.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attribute
|
||||
* @param name xattr name.
|
||||
* @return byte[] xattr value.
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte[] getXAttr(Path path, final String name) throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
return new FSLinkResolver<byte[]>() {
|
||||
@Override
|
||||
public byte[] next(final AbstractFileSystem fs, final Path p)
|
||||
throws IOException {
|
||||
return fs.getXAttr(p, name);
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the xattrs for a file or directory.
|
||||
* Only those xattrs for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattr of "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @return Map<String, byte[]> describing the XAttrs of the file or directory
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
return new FSLinkResolver<Map<String, byte[]>>() {
|
||||
@Override
|
||||
public Map<String, byte[]> next(final AbstractFileSystem fs, final Path p)
|
||||
throws IOException {
|
||||
return fs.getXAttrs(p);
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the xattrs for a file or directory.
|
||||
* Only those xattrs for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattr of "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @param names XAttr names.
|
||||
* @return Map<String, byte[]> describing the XAttrs of the file or directory
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<String, byte[]> getXAttrs(Path path, final List<String> names)
|
||||
throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
return new FSLinkResolver<Map<String, byte[]>>() {
|
||||
@Override
|
||||
public Map<String, byte[]> next(final AbstractFileSystem fs, final Path p)
|
||||
throws IOException {
|
||||
return fs.getXAttrs(p, names);
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only remove an xattr for the "user" namespace.
|
||||
* The super user can remove an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to remove extended attribute
|
||||
* @param name xattr name
|
||||
* @throws IOException
|
||||
*/
|
||||
public void removeXAttr(Path path, final String name) throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
new FSLinkResolver<Void>() {
|
||||
@Override
|
||||
public Void next(final AbstractFileSystem fs, final Path p)
|
||||
throws IOException {
|
||||
fs.removeXAttr(p, name);
|
||||
return null;
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2350,6 +2350,164 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
+ " doesn't support getAclStatus");
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
* @param value xattr value.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void setXAttr(Path path, String name, byte[] value)
|
||||
throws IOException {
|
||||
setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE,
|
||||
XAttrSetFlag.REPLACE));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
* @param value xattr value.
|
||||
* @param flag xattr set flag
|
||||
* @throws IOException
|
||||
*/
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support setXAttr");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an xattr for a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
*
|
||||
* A regular user can only get an xattr for the "user" namespace.
|
||||
* The super user can get an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* An xattr will only be returned when the logged-in user has the correct permissions.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attribute
|
||||
* @param name xattr name.
|
||||
* @return byte[] xattr value.
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte[] getXAttr(Path path, String name) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttr");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the xattrs for a file or directory.
|
||||
* Only those xattrs for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattr of "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @return Map<String, byte[]> describing the XAttrs of the file or directory
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttrs");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the xattrs for a file or directory.
|
||||
* Only those xattrs for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattr of "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @param names XAttr names.
|
||||
* @return Map<String, byte[]> describing the XAttrs of the file or directory
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttrs");
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only remove an xattr for the "user" namespace.
|
||||
* The super user can remove an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to remove extended attribute
|
||||
* @param name xattr name
|
||||
* @throws IOException
|
||||
*/
|
||||
public void removeXAttr(Path path, String name) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support removeXAttr");
|
||||
}
|
||||
|
||||
// making it volatile to be able to do a double checked locking
|
||||
private volatile static boolean FILE_SYSTEMS_LOADED = false;
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.net.URI;
|
|||
import java.net.URISyntaxException;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -538,4 +539,37 @@ public class FilterFileSystem extends FileSystem {
|
|||
public AclStatus getAclStatus(Path path) throws IOException {
|
||||
return fs.getAclStatus(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, String name, byte[] value)
|
||||
throws IOException {
|
||||
fs.setXAttr(path, name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
fs.setXAttr(path, name, value, flag);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getXAttr(Path path, String name) throws IOException {
|
||||
return fs.getXAttr(path, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
return fs.getXAttrs(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException {
|
||||
return fs.getXAttrs(path, names);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(Path path, String name) throws IOException {
|
||||
fs.removeXAttr(path, name);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.net.URI;
|
|||
import java.net.URISyntaxException;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -316,4 +317,37 @@ public abstract class FilterFs extends AbstractFileSystem {
|
|||
public AclStatus getAclStatus(Path path) throws IOException {
|
||||
return myFs.getAclStatus(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, String name, byte[] value)
|
||||
throws IOException {
|
||||
myFs.setXAttr(path, name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
myFs.setXAttr(path, name, value, flag);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getXAttr(Path path, String name) throws IOException {
|
||||
return myFs.getXAttr(path, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
return myFs.getXAttrs(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException {
|
||||
return myFs.getXAttrs(path, names);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(Path path, String name) throws IOException {
|
||||
myFs.removeXAttr(path, name);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,121 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.codec.DecoderException;
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.apache.commons.codec.binary.Hex;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* The value of <code>XAttr</code> is byte[], this class is to
|
||||
* covert byte[] to some kind of string representation or convert back.
|
||||
* String representation is convenient for display and input. For example
|
||||
* display in screen as shell response and json response, input as http
|
||||
* or shell parameter.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public enum XAttrCodec {
|
||||
/**
|
||||
* Value encoded as text
|
||||
* string is enclosed in double quotes (\").
|
||||
*/
|
||||
TEXT,
|
||||
|
||||
/**
|
||||
* Value encoded as hexadecimal string
|
||||
* is prefixed with 0x.
|
||||
*/
|
||||
HEX,
|
||||
|
||||
/**
|
||||
* Value encoded as base64 string
|
||||
* is prefixed with 0s.
|
||||
*/
|
||||
BASE64;
|
||||
|
||||
private static final String HEX_PREFIX = "0x";
|
||||
private static final String BASE64_PREFIX = "0s";
|
||||
private static final Base64 base64 = new Base64(0);
|
||||
|
||||
/**
|
||||
* Decode string representation of a value and check whether it's
|
||||
* encoded. If the given string begins with 0x or 0X, it expresses
|
||||
* a hexadecimal number. If the given string begins with 0s or 0S,
|
||||
* base64 encoding is expected. If the given string is enclosed in
|
||||
* double quotes, the inner string is treated as text. Otherwise
|
||||
* the given string is treated as text.
|
||||
* @param value string representation of the value.
|
||||
* @return byte[] the value
|
||||
* @throws IOException
|
||||
*/
|
||||
public static byte[] decodeValue(String value) throws IOException {
|
||||
byte[] result = null;
|
||||
if (value != null) {
|
||||
if (value.length() >= 2) {
|
||||
String en = value.substring(0, 2);
|
||||
if (value.startsWith("\"") && value.endsWith("\"")) {
|
||||
value = value.substring(1, value.length()-1);
|
||||
result = value.getBytes("utf-8");
|
||||
} else if (en.equalsIgnoreCase(HEX_PREFIX)) {
|
||||
value = value.substring(2, value.length());
|
||||
try {
|
||||
result = Hex.decodeHex(value.toCharArray());
|
||||
} catch (DecoderException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
} else if (en.equalsIgnoreCase(BASE64_PREFIX)) {
|
||||
value = value.substring(2, value.length());
|
||||
result = base64.decode(value);
|
||||
}
|
||||
}
|
||||
if (result == null) {
|
||||
result = value.getBytes("utf-8");
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode byte[] value to string representation with encoding.
|
||||
* Values encoded as text strings are enclosed in double quotes (\"),
|
||||
* while strings encoded as hexadecimal and base64 are prefixed with
|
||||
* 0x and 0s, respectively.
|
||||
* @param value byte[] value
|
||||
* @param encoding
|
||||
* @return String string representation of value
|
||||
* @throws IOException
|
||||
*/
|
||||
public static String encodeValue(byte[] value, XAttrCodec encoding)
|
||||
throws IOException {
|
||||
Preconditions.checkNotNull(value, "Value can not be null.");
|
||||
if (encoding == HEX) {
|
||||
return HEX_PREFIX + Hex.encodeHexString(value);
|
||||
} else if (encoding == BASE64) {
|
||||
return BASE64_PREFIX + base64.encodeToString(value);
|
||||
} else {
|
||||
return "\"" + new String(value, "utf-8") + "\"";
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.EnumSet;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public enum XAttrSetFlag {
|
||||
/**
|
||||
* Create a new xattr.
|
||||
* If the xattr exists already, exception will be thrown.
|
||||
*/
|
||||
CREATE((short) 0x01),
|
||||
|
||||
/**
|
||||
* Replace a existing xattr.
|
||||
* If the xattr does not exist, exception will be thrown.
|
||||
*/
|
||||
REPLACE((short) 0x02);
|
||||
|
||||
private final short flag;
|
||||
|
||||
private XAttrSetFlag(short flag) {
|
||||
this.flag = flag;
|
||||
}
|
||||
|
||||
short getFlag() {
|
||||
return flag;
|
||||
}
|
||||
|
||||
public static void validate(String xAttrName, boolean xAttrExists,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
if (flag == null || flag.isEmpty()) {
|
||||
throw new HadoopIllegalArgumentException("A flag must be specified.");
|
||||
}
|
||||
|
||||
if (xAttrExists) {
|
||||
if (!flag.contains(REPLACE)) {
|
||||
throw new IOException("XAttr: " + xAttrName +
|
||||
" already exists. The REPLACE flag must be specified.");
|
||||
}
|
||||
} else {
|
||||
if (!flag.contains(CREATE)) {
|
||||
throw new IOException("XAttr: " + xAttrName +
|
||||
" does not exist. The CREATE flag must be specified.");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -59,6 +59,7 @@ abstract public class FsCommand extends Command {
|
|||
factory.registerCommands(Test.class);
|
||||
factory.registerCommands(Touch.class);
|
||||
factory.registerCommands(SnapshotCommands.class);
|
||||
factory.registerCommands(XAttrCommands.class);
|
||||
}
|
||||
|
||||
protected FsCommand() {}
|
||||
|
|
|
@ -0,0 +1,188 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.shell;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import com.google.common.base.Enums;
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.XAttrCodec;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
/**
|
||||
* XAttr related operations
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
class XAttrCommands extends FsCommand {
|
||||
private static final String GET_FATTR = "getfattr";
|
||||
private static final String SET_FATTR = "setfattr";
|
||||
|
||||
public static void registerCommands(CommandFactory factory) {
|
||||
factory.addClass(GetfattrCommand.class, "-" + GET_FATTR);
|
||||
factory.addClass(SetfattrCommand.class, "-" + SET_FATTR);
|
||||
}
|
||||
|
||||
/**
|
||||
* Implements the '-getfattr' command for the FsShell.
|
||||
*/
|
||||
public static class GetfattrCommand extends FsCommand {
|
||||
public static final String NAME = GET_FATTR;
|
||||
public static final String USAGE = "[-R] {-n name | -d} [-e en] <path>";
|
||||
public static final String DESCRIPTION =
|
||||
"Displays the extended attribute names and values (if any) for a " +
|
||||
"file or directory.\n" +
|
||||
"-R: Recursively list the attributes for all files and directories.\n" +
|
||||
"-n name: Dump the named extended attribute value.\n" +
|
||||
"-d: Dump all extended attribute values associated with pathname.\n" +
|
||||
"-e <encoding>: Encode values after retrieving them.\n" +
|
||||
"Valid encodings are \"text\", \"hex\", and \"base64\".\n" +
|
||||
"Values encoded as text strings are enclosed in double quotes (\"),\n" +
|
||||
" and values encoded as hexadecimal and base64 are prefixed with\n" +
|
||||
"0x and 0s, respectively.\n" +
|
||||
"<path>: The file or directory.\n";
|
||||
private final static Function<String, XAttrCodec> enValueOfFunc =
|
||||
Enums.valueOfFunction(XAttrCodec.class);
|
||||
|
||||
private String name = null;
|
||||
private boolean dump = false;
|
||||
private XAttrCodec encoding = XAttrCodec.TEXT;
|
||||
|
||||
@Override
|
||||
protected void processOptions(LinkedList<String> args) throws IOException {
|
||||
name = StringUtils.popOptionWithArgument("-n", args);
|
||||
String en = StringUtils.popOptionWithArgument("-e", args);
|
||||
if (en != null) {
|
||||
encoding = enValueOfFunc.apply(en.toUpperCase());
|
||||
Preconditions.checkArgument(encoding != null,
|
||||
"Invalid/unsupported encoding option specified: " + en);
|
||||
}
|
||||
|
||||
boolean r = StringUtils.popOption("-R", args);
|
||||
setRecursive(r);
|
||||
dump = StringUtils.popOption("-d", args);
|
||||
|
||||
if (!dump && name == null) {
|
||||
throw new HadoopIllegalArgumentException(
|
||||
"Must specify '-n name' or '-d' option.");
|
||||
}
|
||||
|
||||
if (args.isEmpty()) {
|
||||
throw new HadoopIllegalArgumentException("<path> is missing.");
|
||||
}
|
||||
if (args.size() > 1) {
|
||||
throw new HadoopIllegalArgumentException("Too many arguments.");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processPath(PathData item) throws IOException {
|
||||
out.println("# file: " + item);
|
||||
if (dump) {
|
||||
Map<String, byte[]> xattrs = item.fs.getXAttrs(item.path);
|
||||
if (xattrs != null) {
|
||||
Iterator<Entry<String, byte[]>> iter = xattrs.entrySet().iterator();
|
||||
while(iter.hasNext()) {
|
||||
Entry<String, byte[]> entry = iter.next();
|
||||
printXAttr(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
byte[] value = item.fs.getXAttr(item.path, name);
|
||||
printXAttr(name, value);
|
||||
}
|
||||
}
|
||||
|
||||
private void printXAttr(String name, byte[] value) throws IOException{
|
||||
if (value != null) {
|
||||
if (value.length != 0) {
|
||||
out.println(name + "=" + XAttrCodec.encodeValue(value, encoding));
|
||||
} else {
|
||||
out.println(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Implements the '-setfattr' command for the FsShell.
|
||||
*/
|
||||
public static class SetfattrCommand extends FsCommand {
|
||||
public static final String NAME = SET_FATTR;
|
||||
public static final String USAGE = "{-n name [-v value] | -x name} <path>";
|
||||
public static final String DESCRIPTION =
|
||||
"Sets an extended attribute name and value for a file or directory.\n" +
|
||||
"-n name: The extended attribute name.\n" +
|
||||
"-v value: The extended attribute value. There are three different\n" +
|
||||
"encoding methods for the value. If the argument is enclosed in double\n" +
|
||||
"quotes, then the value is the string inside the quotes. If the\n" +
|
||||
"argument is prefixed with 0x or 0X, then it is taken as a hexadecimal\n" +
|
||||
"number. If the argument begins with 0s or 0S, then it is taken as a\n" +
|
||||
"base64 encoding.\n" +
|
||||
"-x name: Remove the extended attribute.\n" +
|
||||
"<path>: The file or directory.\n";
|
||||
|
||||
private String name = null;
|
||||
private byte[] value = null;
|
||||
private String xname = null;
|
||||
|
||||
@Override
|
||||
protected void processOptions(LinkedList<String> args) throws IOException {
|
||||
name = StringUtils.popOptionWithArgument("-n", args);
|
||||
String v = StringUtils.popOptionWithArgument("-v", args);
|
||||
if (v != null) {
|
||||
value = XAttrCodec.decodeValue(v);
|
||||
}
|
||||
xname = StringUtils.popOptionWithArgument("-x", args);
|
||||
|
||||
if (name != null && xname != null) {
|
||||
throw new HadoopIllegalArgumentException(
|
||||
"Can not specify both '-n name' and '-x name' option.");
|
||||
}
|
||||
if (name == null && xname == null) {
|
||||
throw new HadoopIllegalArgumentException(
|
||||
"Must specify '-n name' or '-x name' option.");
|
||||
}
|
||||
|
||||
if (args.isEmpty()) {
|
||||
throw new HadoopIllegalArgumentException("<path> is missing.");
|
||||
}
|
||||
if (args.size() > 1) {
|
||||
throw new HadoopIllegalArgumentException("Too many arguments.");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processPath(PathData item) throws IOException {
|
||||
if (name != null) {
|
||||
item.fs.setXAttr(item.path, name, value);
|
||||
} else if (xname != null) {
|
||||
item.fs.removeXAttr(item.path, xname);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
|||
import java.net.URI;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -37,6 +38,7 @@ import org.apache.hadoop.fs.FilterFileSystem;
|
|||
import org.apache.hadoop.fs.FsServerDefaults;
|
||||
import org.apache.hadoop.fs.FsStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -313,6 +315,33 @@ class ChRootedFileSystem extends FilterFileSystem {
|
|||
return super.getAclStatus(fullPath(path));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
super.setXAttr(fullPath(path), name, value, flag);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getXAttr(Path path, String name) throws IOException {
|
||||
return super.getXAttr(fullPath(path), name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
return super.getXAttrs(fullPath(path));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException {
|
||||
return super.getXAttrs(fullPath(path), names);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(Path path, String name) throws IOException {
|
||||
super.removeXAttr(fullPath(path), name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Path resolvePath(final Path p) throws IOException {
|
||||
return super.resolvePath(fullPath(p));
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.util.Arrays;
|
|||
import java.util.EnumSet;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
|
@ -46,6 +47,7 @@ import org.apache.hadoop.fs.FsConstants;
|
|||
import org.apache.hadoop.fs.FsServerDefaults;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.UnsupportedFileSystemException;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -519,6 +521,43 @@ public class ViewFileSystem extends FileSystem {
|
|||
return res.targetFileSystem.getAclStatus(res.remainingPath);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
InodeTree.ResolveResult<FileSystem> res =
|
||||
fsState.resolve(getUriPath(path), true);
|
||||
res.targetFileSystem.setXAttr(res.remainingPath, name, value, flag);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getXAttr(Path path, String name) throws IOException {
|
||||
InodeTree.ResolveResult<FileSystem> res =
|
||||
fsState.resolve(getUriPath(path), true);
|
||||
return res.targetFileSystem.getXAttr(res.remainingPath, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
InodeTree.ResolveResult<FileSystem> res =
|
||||
fsState.resolve(getUriPath(path), true);
|
||||
return res.targetFileSystem.getXAttrs(res.remainingPath);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException {
|
||||
InodeTree.ResolveResult<FileSystem> res =
|
||||
fsState.resolve(getUriPath(path), true);
|
||||
return res.targetFileSystem.getXAttrs(res.remainingPath, names);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(Path path, String name) throws IOException {
|
||||
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path),
|
||||
true);
|
||||
res.targetFileSystem.removeXAttr(res.remainingPath, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setVerifyChecksum(final boolean verifyChecksum) {
|
||||
List<InodeTree.MountPoint<FileSystem>> mountPoints =
|
||||
|
|
|
@ -254,6 +254,35 @@ getfacl
|
|||
|
||||
Returns 0 on success and non-zero on error.
|
||||
|
||||
getfattr
|
||||
|
||||
Usage: <<<hdfs dfs -getfattr [-R] {-n name | -d} [-e en] <path> >>>
|
||||
|
||||
Displays the extended attribute names and values (if any) for a file or
|
||||
directory.
|
||||
|
||||
Options:
|
||||
|
||||
* -R: Recursively list the attributes for all files and directories.
|
||||
|
||||
* -n name: Dump the named extended attribute value.
|
||||
|
||||
* -d: Dump all extended attribute values associated with pathname.
|
||||
|
||||
* -e <encoding>: Encode values after retrieving them. Valid encodings are "text", "hex", and "base64". Values encoded as text strings are enclosed in double quotes ("), and values encoded as hexadecimal and base64 are prefixed with 0x and 0s, respectively.
|
||||
|
||||
* <path>: The file or directory.
|
||||
|
||||
Examples:
|
||||
|
||||
* <<<hdfs dfs -getfattr -d /file>>>
|
||||
|
||||
* <<<hdfs dfs -getfattr -R -n user.myAttr /dir>>>
|
||||
|
||||
Exit Code:
|
||||
|
||||
Returns 0 on success and non-zero on error.
|
||||
|
||||
getmerge
|
||||
|
||||
Usage: <<<hdfs dfs -getmerge <src> <localdst> [addnl]>>>
|
||||
|
@ -450,6 +479,36 @@ setfacl
|
|||
|
||||
Returns 0 on success and non-zero on error.
|
||||
|
||||
setfattr
|
||||
|
||||
Usage: <<<hdfs dfs -setfattr {-n name [-v value] | -x name} <path> >>>
|
||||
|
||||
Sets an extended attribute name and value for a file or directory.
|
||||
|
||||
Options:
|
||||
|
||||
* -b: Remove all but the base ACL entries. The entries for user, group and others are retained for compatibility with permission bits.
|
||||
|
||||
* -n name: The extended attribute name.
|
||||
|
||||
* -v value: The extended attribute value. There are three different encoding methods for the value. If the argument is enclosed in double quotes, then the value is the string inside the quotes. If the argument is prefixed with 0x or 0X, then it is taken as a hexadecimal number. If the argument begins with 0s or 0S, then it is taken as a base64 encoding.
|
||||
|
||||
* -x name: Remove the extended attribute.
|
||||
|
||||
* <path>: The file or directory.
|
||||
|
||||
Examples:
|
||||
|
||||
* <<<hdfs dfs -setfattr -n user.myAttr -v myValue /file>>>
|
||||
|
||||
* <<<hdfs dfs -setfattr -n user.noValue /file>>>
|
||||
|
||||
* <<<hdfs dfs -setfattr -x user.myAttr /file>>>
|
||||
|
||||
Exit Code:
|
||||
|
||||
Returns 0 on success and non-zero on error.
|
||||
|
||||
setrep
|
||||
|
||||
Usage: <<<hdfs dfs -setrep [-R] [-w] <numReplicas> <path> >>>
|
||||
|
|
|
@ -36,6 +36,7 @@ import java.lang.reflect.Modifier;
|
|||
import java.util.EnumSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||
import static org.apache.hadoop.fs.Options.CreateOpts;
|
||||
|
@ -181,6 +182,21 @@ public class TestHarFileSystem {
|
|||
|
||||
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException;
|
||||
|
||||
public void setXAttr(Path path, String name, byte[] value)
|
||||
throws IOException;
|
||||
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException;
|
||||
|
||||
public byte[] getXAttr(Path path, String name) throws IOException;
|
||||
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException;
|
||||
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException;
|
||||
|
||||
public void removeXAttr(Path path, String name) throws IOException;
|
||||
|
||||
public AclStatus getAclStatus(Path path) throws IOException;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.shell;
|
||||
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FsShell;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestXAttrCommands {
|
||||
private final ByteArrayOutputStream errContent =
|
||||
new ByteArrayOutputStream();
|
||||
private Configuration conf = null;
|
||||
private PrintStream initialStdErr;
|
||||
|
||||
@Before
|
||||
public void setup() throws IOException {
|
||||
errContent.reset();
|
||||
initialStdErr = System.err;
|
||||
System.setErr(new PrintStream(errContent));
|
||||
conf = new Configuration();
|
||||
}
|
||||
|
||||
@After
|
||||
public void cleanUp() throws Exception {
|
||||
errContent.reset();
|
||||
System.setErr(initialStdErr);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetfattrValidations() throws Exception {
|
||||
errContent.reset();
|
||||
assertFalse("getfattr should fail without path",
|
||||
0 == runCommand(new String[] { "-getfattr", "-d"}));
|
||||
assertTrue(errContent.toString().contains("<path> is missing"));
|
||||
|
||||
errContent.reset();
|
||||
assertFalse("getfattr should fail with extra argument",
|
||||
0 == runCommand(new String[] { "-getfattr", "extra", "-d", "/test"}));
|
||||
assertTrue(errContent.toString().contains("Too many arguments"));
|
||||
|
||||
errContent.reset();
|
||||
assertFalse("getfattr should fail without \"-n name\" or \"-d\"",
|
||||
0 == runCommand(new String[] { "-getfattr", "/test"}));
|
||||
assertTrue(errContent.toString().contains("Must specify '-n name' or '-d' option"));
|
||||
|
||||
errContent.reset();
|
||||
assertFalse("getfattr should fail with invalid encoding",
|
||||
0 == runCommand(new String[] { "-getfattr", "-d", "-e", "aaa", "/test"}));
|
||||
assertTrue(errContent.toString().contains("Invalid/unsupported encoding option specified: aaa"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetfattrValidations() throws Exception {
|
||||
errContent.reset();
|
||||
assertFalse("setfattr should fail without path",
|
||||
0 == runCommand(new String[] { "-setfattr", "-n", "user.a1" }));
|
||||
assertTrue(errContent.toString().contains("<path> is missing"));
|
||||
|
||||
errContent.reset();
|
||||
assertFalse("setfattr should fail with extra arguments",
|
||||
0 == runCommand(new String[] { "-setfattr", "extra", "-n", "user.a1", "/test"}));
|
||||
assertTrue(errContent.toString().contains("Too many arguments"));
|
||||
|
||||
errContent.reset();
|
||||
assertFalse("setfattr should fail without \"-n name\" or \"-x name\"",
|
||||
0 == runCommand(new String[] { "-setfattr", "/test"}));
|
||||
assertTrue(errContent.toString().contains("Must specify '-n name' or '-x name' option"));
|
||||
}
|
||||
|
||||
private int runCommand(String[] commands) throws Exception {
|
||||
return ToolRunner.run(conf, new FsShell(), commands);
|
||||
}
|
||||
}
|
|
@ -254,6 +254,69 @@ Trunk (Unreleased)
|
|||
HDFS-5794. Fix the inconsistency of layout version number of
|
||||
ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
|
||||
|
||||
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6302. Implement XAttr as a INode feature. (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6309. Javadocs for Xattrs apis in DFSClient and other minor fixups. (Charles Lamb via umamahesh)
|
||||
|
||||
HDFS-6258. Namenode server-side storage for XAttrs. (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6303. HDFS implementation of FileContext API for XAttrs. (Yi Liu and Charles Lamb via umamahesh)
|
||||
|
||||
HDFS-6324. Shift XAttr helper code out for reuse. (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6301. NameNode: persist XAttrs in fsimage and record XAttrs modifications to edit log.
|
||||
(Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6298. XML based End-to-End test for getfattr and setfattr commands. (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6314. Test cases for XAttrs. (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6344. Maximum limit on the size of an xattr. (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6377. Unify xattr name and value limits into a single limit. (wang)
|
||||
|
||||
HDFS-6373. Remove support for extended attributes on symlinks. (Charles Lamb via wang)
|
||||
|
||||
HDFS-6283. Write end user documentation for xattrs. (wang)
|
||||
|
||||
HDFS-6412. Interface audience and stability annotations missing from
|
||||
several new classes related to xattrs. (wang)
|
||||
|
||||
HDFS-6259. Support extended attributes via WebHDFS. (yliu)
|
||||
|
||||
HDFS-6346. Optimize OP_SET_XATTRS by persisting single Xattr entry per setXattr/removeXattr api call
|
||||
(Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6331. ClientProtocol#setXattr should not be annotated idempotent.
|
||||
(umamahesh via wang)
|
||||
|
||||
HDFS-6335. TestOfflineEditsViewer for XAttr. (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6343. fix TestNamenodeRetryCache and TestRetryCacheWithHA failures. (umamahesh)
|
||||
|
||||
HDFS-6366. FsImage loading failed with RemoveXattr op (umamahesh)
|
||||
|
||||
HDFS-6357. SetXattr should persist rpcIDs for handling retrycache with Namenode restart and HA
|
||||
(umamahesh)
|
||||
|
||||
HDFS-6372. Handle setXattr rpcIDs for OfflineEditsViewer. (umamahesh)
|
||||
|
||||
HDFS-6410. DFSClient unwraps AclException in xattr methods, but those
|
||||
methods cannot throw AclException. (wang)
|
||||
|
||||
HDFS-6413. xattr names erroneously handled as case-insensitive.
|
||||
(Charles Lamb via cnauroth)
|
||||
|
||||
HDFS-6414. xattr modification operations are based on state of latest
|
||||
snapshot instead of current version of inode. (Andrew Wang via cnauroth)
|
||||
|
||||
HDFS-6374. setXAttr should require the user to be the owner of the file
|
||||
or directory (Charles Lamb via wang)
|
||||
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -290,6 +290,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<include>NamenodeProtocol.proto</include>
|
||||
<include>QJournalProtocol.proto</include>
|
||||
<include>acl.proto</include>
|
||||
<include>xattr.proto</include>
|
||||
<include>datatransfer.proto</include>
|
||||
<include>fsimage.proto</include>
|
||||
<include>hdfs.proto</include>
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.net.URISyntaxException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
@ -414,6 +415,33 @@ public class Hdfs extends AbstractFileSystem {
|
|||
public AclStatus getAclStatus(Path path) throws IOException {
|
||||
return dfs.getAclStatus(getUriPath(path));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
dfs.setXAttr(getUriPath(path), name, value, flag);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getXAttr(Path path, String name) throws IOException {
|
||||
return dfs.getXAttr(getUriPath(path), name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
return dfs.getXAttrs(getUriPath(path));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException {
|
||||
return dfs.getXAttrs(getUriPath(path), names);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(Path path, String name) throws IOException {
|
||||
dfs.removeXAttr(getUriPath(path), name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Renew an existing delegation token.
|
||||
|
|
|
@ -0,0 +1,149 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* XAttr is the POSIX Extended Attribute model similar to that found in
|
||||
* traditional Operating Systems. Extended Attributes consist of one
|
||||
* or more name/value pairs associated with a file or directory. Four
|
||||
* namespaces are defined: user, trusted, security and system.
|
||||
* 1) USER namespace attributes may be used by any user to store
|
||||
* arbitrary information. Access permissions in this namespace are
|
||||
* defined by a file directory's permission bits.
|
||||
* <br>
|
||||
* 2) TRUSTED namespace attributes are only visible and accessible to
|
||||
* privileged users (a file or directory's owner or the fs
|
||||
* admin). This namespace is available from both user space
|
||||
* (filesystem API) and fs kernel.
|
||||
* <br>
|
||||
* 3) SYSTEM namespace attributes are used by the fs kernel to store
|
||||
* system objects. This namespace is only available in the fs
|
||||
* kernel. It is not visible to users.
|
||||
* <br>
|
||||
* 4) SECURITY namespace attributes are used by the fs kernel for
|
||||
* security features. It is not visible to users.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class XAttr {
|
||||
|
||||
public static enum NameSpace {
|
||||
USER,
|
||||
TRUSTED,
|
||||
SECURITY,
|
||||
SYSTEM;
|
||||
}
|
||||
|
||||
private final NameSpace ns;
|
||||
private final String name;
|
||||
private final byte[] value;
|
||||
|
||||
public static class Builder {
|
||||
private NameSpace ns = NameSpace.USER;
|
||||
private String name;
|
||||
private byte[] value;
|
||||
|
||||
public Builder setNameSpace(NameSpace ns) {
|
||||
this.ns = ns;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setName(String name) {
|
||||
this.name = name;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setValue(byte[] value) {
|
||||
this.value = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
public XAttr build() {
|
||||
return new XAttr(ns, name, value);
|
||||
}
|
||||
}
|
||||
|
||||
private XAttr(NameSpace ns, String name, byte[] value) {
|
||||
this.ns = ns;
|
||||
this.name = name;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public NameSpace getNameSpace() {
|
||||
return ns;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public byte[] getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
int result = 1;
|
||||
result = prime * result + ((name == null) ? 0 : name.hashCode());
|
||||
result = prime * result + ((ns == null) ? 0 : ns.hashCode());
|
||||
result = prime * result + Arrays.hashCode(value);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
XAttr other = (XAttr) obj;
|
||||
if (name == null) {
|
||||
if (other.name != null) {
|
||||
return false;
|
||||
}
|
||||
} else if (!name.equals(other.name)) {
|
||||
return false;
|
||||
}
|
||||
if (ns != other.ns) {
|
||||
return false;
|
||||
}
|
||||
if (!Arrays.equals(value, other.value)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "XAttr [ns=" + ns + ", name=" + name + ", value="
|
||||
+ Arrays.toString(value) + "]";
|
||||
}
|
||||
}
|
|
@ -109,6 +109,8 @@ import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
|
|||
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
|
||||
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -2757,6 +2759,72 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
|
|||
UnresolvedPathException.class);
|
||||
}
|
||||
}
|
||||
|
||||
public void setXAttr(String src, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
checkOpen();
|
||||
try {
|
||||
namenode.setXAttr(src, XAttrHelper.buildXAttr(name, value), flag);
|
||||
} catch (RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class,
|
||||
FileNotFoundException.class,
|
||||
NSQuotaExceededException.class,
|
||||
SafeModeException.class,
|
||||
SnapshotAccessControlException.class,
|
||||
UnresolvedPathException.class);
|
||||
}
|
||||
}
|
||||
|
||||
public byte[] getXAttr(String src, String name) throws IOException {
|
||||
checkOpen();
|
||||
try {
|
||||
final List<XAttr> xAttrs = XAttrHelper.buildXAttrAsList(name);
|
||||
final List<XAttr> result = namenode.getXAttrs(src, xAttrs);
|
||||
return XAttrHelper.getFirstXAttrValue(result);
|
||||
} catch(RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class,
|
||||
FileNotFoundException.class,
|
||||
UnresolvedPathException.class);
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, byte[]> getXAttrs(String src) throws IOException {
|
||||
checkOpen();
|
||||
try {
|
||||
return XAttrHelper.buildXAttrMap(namenode.getXAttrs(src, null));
|
||||
} catch(RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class,
|
||||
FileNotFoundException.class,
|
||||
UnresolvedPathException.class);
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, byte[]> getXAttrs(String src, List<String> names)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
try {
|
||||
return XAttrHelper.buildXAttrMap(namenode.getXAttrs(
|
||||
src, XAttrHelper.buildXAttrs(names)));
|
||||
} catch(RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class,
|
||||
FileNotFoundException.class,
|
||||
UnresolvedPathException.class);
|
||||
}
|
||||
}
|
||||
|
||||
public void removeXAttr(String src, String name) throws IOException {
|
||||
checkOpen();
|
||||
try {
|
||||
namenode.removeXAttr(src, XAttrHelper.buildXAttr(name));
|
||||
} catch(RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class,
|
||||
FileNotFoundException.class,
|
||||
NSQuotaExceededException.class,
|
||||
SafeModeException.class,
|
||||
SnapshotAccessControlException.class,
|
||||
UnresolvedPathException.class);
|
||||
}
|
||||
}
|
||||
|
||||
@Override // RemotePeerFactory
|
||||
public Peer newConnectedPeer(InetSocketAddress addr) throws IOException {
|
||||
|
|
|
@ -192,6 +192,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final String DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = "supergroup";
|
||||
public static final String DFS_NAMENODE_ACLS_ENABLED_KEY = "dfs.namenode.acls.enabled";
|
||||
public static final boolean DFS_NAMENODE_ACLS_ENABLED_DEFAULT = false;
|
||||
public static final String DFS_NAMENODE_XATTRS_ENABLED_KEY = "dfs.namenode.xattrs.enabled";
|
||||
public static final boolean DFS_NAMENODE_XATTRS_ENABLED_DEFAULT = true;
|
||||
public static final String DFS_ADMIN = "dfs.cluster.administrators";
|
||||
public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.https.server.keystore.resource";
|
||||
public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml";
|
||||
|
@ -295,6 +297,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final long DFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT = 1024*1024;
|
||||
public static final String DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY = "dfs.namenode.fs-limits.max-blocks-per-file";
|
||||
public static final long DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT = 1024*1024;
|
||||
public static final String DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY = "dfs.namenode.fs-limits.max-xattrs-per-inode";
|
||||
public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
|
||||
public static final String DFS_NAMENODE_MAX_XATTR_SIZE_KEY = "dfs.namenode.fs-limits.max-xattr-size";
|
||||
public static final int DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT = 16384;
|
||||
|
||||
|
||||
//Following keys have no defaults
|
||||
public static final String DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir";
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.net.URI;
|
|||
import java.util.ArrayList;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -46,6 +47,7 @@ import org.apache.hadoop.fs.FsServerDefaults;
|
|||
import org.apache.hadoop.fs.FsStatus;
|
||||
import org.apache.hadoop.fs.LocatedFileStatus;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -1769,4 +1771,91 @@ public class DistributedFileSystem extends FileSystem {
|
|||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, final String name, final byte[] value,
|
||||
final EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
Path absF = fixRelativePart(path);
|
||||
new FileSystemLinkResolver<Void>() {
|
||||
|
||||
@Override
|
||||
public Void doCall(final Path p) throws IOException {
|
||||
dfs.setXAttr(getPathName(p), name, value, flag);
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void next(final FileSystem fs, final Path p) throws IOException {
|
||||
fs.setXAttr(p, name, value, flag);
|
||||
return null;
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getXAttr(Path path, final String name) throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
return new FileSystemLinkResolver<byte[]>() {
|
||||
@Override
|
||||
public byte[] doCall(final Path p) throws IOException {
|
||||
return dfs.getXAttr(getPathName(p), name);
|
||||
}
|
||||
@Override
|
||||
public byte[] next(final FileSystem fs, final Path p)
|
||||
throws IOException, UnresolvedLinkException {
|
||||
return fs.getXAttr(p, name);
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
return new FileSystemLinkResolver<Map<String, byte[]>>() {
|
||||
@Override
|
||||
public Map<String, byte[]> doCall(final Path p) throws IOException {
|
||||
return dfs.getXAttrs(getPathName(p));
|
||||
}
|
||||
@Override
|
||||
public Map<String, byte[]> next(final FileSystem fs, final Path p)
|
||||
throws IOException, UnresolvedLinkException {
|
||||
return fs.getXAttrs(p);
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path, final List<String> names)
|
||||
throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
return new FileSystemLinkResolver<Map<String, byte[]>>() {
|
||||
@Override
|
||||
public Map<String, byte[]> doCall(final Path p) throws IOException {
|
||||
return dfs.getXAttrs(getPathName(p), names);
|
||||
}
|
||||
@Override
|
||||
public Map<String, byte[]> next(final FileSystem fs, final Path p)
|
||||
throws IOException, UnresolvedLinkException {
|
||||
return fs.getXAttrs(p, names);
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(Path path, final String name) throws IOException {
|
||||
Path absF = fixRelativePart(path);
|
||||
new FileSystemLinkResolver<Void>() {
|
||||
@Override
|
||||
public Void doCall(final Path p) throws IOException {
|
||||
dfs.removeXAttr(getPathName(p), name);
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void next(final FileSystem fs, final Path p) throws IOException {
|
||||
fs.removeXAttr(p, name);
|
||||
return null;
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,164 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttr.NameSpace;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public class XAttrHelper {
|
||||
|
||||
/**
|
||||
* Build <code>XAttr</code> from xattr name with prefix.
|
||||
*/
|
||||
public static XAttr buildXAttr(String name) {
|
||||
return buildXAttr(name, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build <code>XAttr</code> from name with prefix and value.
|
||||
* Name can not be null. Value can be null. The name and prefix
|
||||
* are validated.
|
||||
* Both name and namespace are case sensitive.
|
||||
*/
|
||||
public static XAttr buildXAttr(String name, byte[] value) {
|
||||
Preconditions.checkNotNull(name, "XAttr name cannot be null.");
|
||||
|
||||
final int prefixIndex = name.indexOf(".");
|
||||
if (prefixIndex < 4) {// Prefix length is at least 4.
|
||||
throw new HadoopIllegalArgumentException("An XAttr name must be " +
|
||||
"prefixed with user/trusted/security/system, followed by a '.'");
|
||||
} else if (prefixIndex == name.length() - 1) {
|
||||
throw new HadoopIllegalArgumentException("XAttr name cannot be empty.");
|
||||
}
|
||||
|
||||
NameSpace ns;
|
||||
final String prefix = name.substring(0, prefixIndex).toLowerCase();
|
||||
if (prefix.equals(NameSpace.USER.toString().toLowerCase())) {
|
||||
ns = NameSpace.USER;
|
||||
} else if (prefix.equals(NameSpace.TRUSTED.toString().toLowerCase())) {
|
||||
ns = NameSpace.TRUSTED;
|
||||
} else if (prefix.equals(NameSpace.SYSTEM.toString().toLowerCase())) {
|
||||
ns = NameSpace.SYSTEM;
|
||||
} else if (prefix.equals(NameSpace.SECURITY.toString().toLowerCase())) {
|
||||
ns = NameSpace.SECURITY;
|
||||
} else {
|
||||
throw new HadoopIllegalArgumentException("An XAttr name must be " +
|
||||
"prefixed with user/trusted/security/system, followed by a '.'");
|
||||
}
|
||||
XAttr xAttr = (new XAttr.Builder()).setNameSpace(ns).setName(name.
|
||||
substring(prefixIndex + 1)).setValue(value).build();
|
||||
|
||||
return xAttr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build xattr name with prefix as <code>XAttr</code> list.
|
||||
*/
|
||||
public static List<XAttr> buildXAttrAsList(String name) {
|
||||
XAttr xAttr = buildXAttr(name);
|
||||
List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
|
||||
xAttrs.add(xAttr);
|
||||
|
||||
return xAttrs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get value of first xattr from <code>XAttr</code> list
|
||||
*/
|
||||
public static byte[] getFirstXAttrValue(List<XAttr> xAttrs) {
|
||||
byte[] value = null;
|
||||
XAttr xAttr = getFirstXAttr(xAttrs);
|
||||
if (xAttr != null) {
|
||||
value = xAttr.getValue();
|
||||
if (value == null) {
|
||||
value = new byte[0]; // xattr exists, but no value.
|
||||
}
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get first xattr from <code>XAttr</code> list
|
||||
*/
|
||||
public static XAttr getFirstXAttr(List<XAttr> xAttrs) {
|
||||
if (xAttrs != null && !xAttrs.isEmpty()) {
|
||||
return xAttrs.get(0);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build xattr map from <code>XAttr</code> list, the key is
|
||||
* xattr name with prefix, and value is xattr value.
|
||||
*/
|
||||
public static Map<String, byte[]> buildXAttrMap(List<XAttr> xAttrs) {
|
||||
if (xAttrs == null) {
|
||||
return null;
|
||||
}
|
||||
Map<String, byte[]> xAttrMap = Maps.newHashMap();
|
||||
for (XAttr xAttr : xAttrs) {
|
||||
String name = getPrefixName(xAttr);
|
||||
byte[] value = xAttr.getValue();
|
||||
if (value == null) {
|
||||
value = new byte[0];
|
||||
}
|
||||
xAttrMap.put(name, value);
|
||||
}
|
||||
|
||||
return xAttrMap;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get name with prefix from <code>XAttr</code>
|
||||
*/
|
||||
public static String getPrefixName(XAttr xAttr) {
|
||||
if (xAttr == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
String namespace = xAttr.getNameSpace().toString();
|
||||
return namespace.toLowerCase() + "." + xAttr.getName();
|
||||
}
|
||||
|
||||
/**
|
||||
* Build <code>XAttr</code> list from xattr name list.
|
||||
*/
|
||||
public static List<XAttr> buildXAttrs(List<String> names) {
|
||||
if (names == null || names.isEmpty()) {
|
||||
throw new HadoopIllegalArgumentException("XAttr names can not be " +
|
||||
"null or empty.");
|
||||
}
|
||||
|
||||
List<XAttr> xAttrs = Lists.newArrayListWithCapacity(names.size());
|
||||
for (String name : names) {
|
||||
xAttrs.add(buildXAttr(name, null));
|
||||
}
|
||||
return xAttrs;
|
||||
}
|
||||
}
|
|
@ -31,10 +31,12 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
|
|||
import org.apache.hadoop.fs.FsServerDefaults;
|
||||
import org.apache.hadoop.fs.InvalidPathException;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
|
||||
import org.apache.hadoop.fs.Options.Rename;
|
||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -1254,4 +1256,66 @@ public interface ClientProtocol {
|
|||
*/
|
||||
@Idempotent
|
||||
public AclStatus getAclStatus(String src) throws IOException;
|
||||
|
||||
/**
|
||||
* Set xattr of a file or directory.
|
||||
* A regular user only can set xattr of "user" namespace.
|
||||
* A super user can set xattr of "user" and "trusted" namespace.
|
||||
* XAttr of "security" and "system" namespace is only used/exposed
|
||||
* internally to the FS impl.
|
||||
* <p/>
|
||||
* For xattr of "user" namespace, its access permissions are
|
||||
* defined by the file or directory permission bits.
|
||||
* XAttr will be set only when login user has correct permissions.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* @param src file or directory
|
||||
* @param xAttr <code>XAttr</code> to set
|
||||
* @param flag set flag
|
||||
* @throws IOException
|
||||
*/
|
||||
@AtMostOnce
|
||||
public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Get xattrs of file or directory. Values in xAttrs parameter are ignored.
|
||||
* If xattrs is null or empty, equals getting all xattrs of the file or
|
||||
* directory.
|
||||
* Only xattrs which login user has correct permissions will be returned.
|
||||
* <p/>
|
||||
* A regular user only can get xattr of "user" namespace.
|
||||
* A super user can get xattr of "user" and "trusted" namespace.
|
||||
* XAttr of "security" and "system" namespace is only used/exposed
|
||||
* internally to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* @param src file or directory
|
||||
* @param xAttrs xAttrs to get
|
||||
* @return List<XAttr> <code>XAttr</code> list
|
||||
* @throws IOException
|
||||
*/
|
||||
@Idempotent
|
||||
public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Remove xattr of a file or directory.Value in xAttr parameter is ignored.
|
||||
* Name must be prefixed with user/trusted/security/system.
|
||||
* <p/>
|
||||
* A regular user only can remove xattr of "user" namespace.
|
||||
* A super user can remove xattr of "user" and "trusted" namespace.
|
||||
* XAttr of "security" and "system" namespace is only used/exposed
|
||||
* internally to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* @param src file or directory
|
||||
* @param xAttr <code>XAttr</code> to remove
|
||||
* @throws IOException
|
||||
*/
|
||||
@Idempotent
|
||||
public void removeXAttr(String src, XAttr xAttr) throws IOException;
|
||||
}
|
||||
|
|
|
@ -174,6 +174,12 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
|
|||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrResponseProto;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||
|
@ -302,6 +308,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
|||
|
||||
private static final RemoveAclResponseProto
|
||||
VOID_REMOVEACL_RESPONSE = RemoveAclResponseProto.getDefaultInstance();
|
||||
|
||||
private static final SetXAttrResponseProto
|
||||
VOID_SETXATTR_RESPONSE = SetXAttrResponseProto.getDefaultInstance();
|
||||
|
||||
private static final RemoveXAttrResponseProto
|
||||
VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance();
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
@ -1262,4 +1274,38 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
|||
throw new ServiceException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public SetXAttrResponseProto setXAttr(RpcController controller,
|
||||
SetXAttrRequestProto req) throws ServiceException {
|
||||
try {
|
||||
server.setXAttr(req.getSrc(), PBHelper.convertXAttr(req.getXAttr()),
|
||||
PBHelper.convert(req.getFlag()));
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return VOID_SETXATTR_RESPONSE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetXAttrsResponseProto getXAttrs(RpcController controller,
|
||||
GetXAttrsRequestProto req) throws ServiceException {
|
||||
try {
|
||||
return PBHelper.convertXAttrsResponse(server.getXAttrs(req.getSrc(),
|
||||
PBHelper.convertXAttrs(req.getXAttrsList())));
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public RemoveXAttrResponseProto removeXAttr(RpcController controller,
|
||||
RemoveXAttrRequestProto req) throws ServiceException {
|
||||
try {
|
||||
server.removeXAttr(req.getSrc(), PBHelper.convertXAttr(req.getXAttr()));
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return VOID_REMOVEXATTR_RESPONSE;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,6 +35,8 @@ import org.apache.hadoop.fs.FsServerDefaults;
|
|||
import org.apache.hadoop.fs.Options.Rename;
|
||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -141,6 +143,9 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSaf
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||
|
@ -1268,4 +1273,47 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
|
||||
throws IOException {
|
||||
SetXAttrRequestProto req = SetXAttrRequestProto.newBuilder()
|
||||
.setSrc(src)
|
||||
.setXAttr(PBHelper.convertXAttrProto(xAttr))
|
||||
.setFlag(PBHelper.convert(flag))
|
||||
.build();
|
||||
try {
|
||||
rpcProxy.setXAttr(null, req);
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
|
||||
throws IOException {
|
||||
GetXAttrsRequestProto.Builder builder = GetXAttrsRequestProto.newBuilder();
|
||||
builder.setSrc(src);
|
||||
if (xAttrs != null) {
|
||||
builder.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
|
||||
}
|
||||
GetXAttrsRequestProto req = builder.build();
|
||||
try {
|
||||
return PBHelper.convert(rpcProxy.getXAttrs(null, req));
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(String src, XAttr xAttr) throws IOException {
|
||||
RemoveXAttrRequestProto req = RemoveXAttrRequestProto
|
||||
.newBuilder().setSrc(src)
|
||||
.setXAttr(PBHelper.convertXAttrProto(xAttr)).build();
|
||||
try {
|
||||
rpcProxy.removeXAttr(null, req);
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,6 +32,8 @@ import org.apache.hadoop.fs.ContentSummary;
|
|||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FsServerDefaults;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclEntryScope;
|
||||
import org.apache.hadoop.fs.permission.AclEntryType;
|
||||
|
@ -150,6 +152,10 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
|
|||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.XAttrNamespaceProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrSetFlagProto;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockKey;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
|
@ -221,6 +227,8 @@ public class PBHelper {
|
|||
AclEntryType.values();
|
||||
private static final FsAction[] FSACTION_VALUES =
|
||||
FsAction.values();
|
||||
private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES =
|
||||
XAttr.NameSpace.values();
|
||||
|
||||
private PBHelper() {
|
||||
/** Hidden constructor */
|
||||
|
@ -2007,6 +2015,14 @@ public class PBHelper {
|
|||
private static AclEntryType convert(AclEntryTypeProto v) {
|
||||
return castEnum(v, ACL_ENTRY_TYPE_VALUES);
|
||||
}
|
||||
|
||||
private static XAttrNamespaceProto convert(XAttr.NameSpace v) {
|
||||
return XAttrNamespaceProto.valueOf(v.ordinal());
|
||||
}
|
||||
|
||||
private static XAttr.NameSpace convert(XAttrNamespaceProto v) {
|
||||
return castEnum(v, XATTR_NAMESPACE_VALUES);
|
||||
}
|
||||
|
||||
private static FsActionProto convert(FsAction v) {
|
||||
return FsActionProto.valueOf(v != null ? v.ordinal() : 0);
|
||||
|
@ -2060,6 +2076,108 @@ public class PBHelper {
|
|||
.addAllEntries(convertAclEntryProto(e.getEntries())).build();
|
||||
return GetAclStatusResponseProto.newBuilder().setResult(r).build();
|
||||
}
|
||||
|
||||
public static XAttrProto convertXAttrProto(XAttr a) {
|
||||
XAttrProto.Builder builder = XAttrProto.newBuilder();
|
||||
builder.setNamespace(convert(a.getNameSpace()));
|
||||
if (a.getName() != null) {
|
||||
builder.setName(a.getName());
|
||||
}
|
||||
if (a.getValue() != null) {
|
||||
builder.setValue(getByteString(a.getValue()));
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static List<XAttrProto> convertXAttrProto(
|
||||
List<XAttr> xAttrSpec) {
|
||||
ArrayList<XAttrProto> xAttrs = Lists.newArrayListWithCapacity(
|
||||
xAttrSpec.size());
|
||||
for (XAttr a : xAttrSpec) {
|
||||
XAttrProto.Builder builder = XAttrProto.newBuilder();
|
||||
builder.setNamespace(convert(a.getNameSpace()));
|
||||
if (a.getName() != null) {
|
||||
builder.setName(a.getName());
|
||||
}
|
||||
if (a.getValue() != null) {
|
||||
builder.setValue(getByteString(a.getValue()));
|
||||
}
|
||||
xAttrs.add(builder.build());
|
||||
}
|
||||
return xAttrs;
|
||||
}
|
||||
|
||||
/**
|
||||
* The flag field in PB is a bitmask whose values are the same a the
|
||||
* emum values of XAttrSetFlag
|
||||
*/
|
||||
public static int convert(EnumSet<XAttrSetFlag> flag) {
|
||||
int value = 0;
|
||||
if (flag.contains(XAttrSetFlag.CREATE)) {
|
||||
value |= XAttrSetFlagProto.XATTR_CREATE.getNumber();
|
||||
}
|
||||
if (flag.contains(XAttrSetFlag.REPLACE)) {
|
||||
value |= XAttrSetFlagProto.XATTR_REPLACE.getNumber();
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
public static EnumSet<XAttrSetFlag> convert(int flag) {
|
||||
EnumSet<XAttrSetFlag> result =
|
||||
EnumSet.noneOf(XAttrSetFlag.class);
|
||||
if ((flag & XAttrSetFlagProto.XATTR_CREATE_VALUE) ==
|
||||
XAttrSetFlagProto.XATTR_CREATE_VALUE) {
|
||||
result.add(XAttrSetFlag.CREATE);
|
||||
}
|
||||
if ((flag & XAttrSetFlagProto.XATTR_REPLACE_VALUE) ==
|
||||
XAttrSetFlagProto.XATTR_REPLACE_VALUE) {
|
||||
result.add(XAttrSetFlag.REPLACE);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public static XAttr convertXAttr(XAttrProto a) {
|
||||
XAttr.Builder builder = new XAttr.Builder();
|
||||
builder.setNameSpace(convert(a.getNamespace()));
|
||||
if (a.hasName()) {
|
||||
builder.setName(a.getName());
|
||||
}
|
||||
if (a.hasValue()) {
|
||||
builder.setValue(a.getValue().toByteArray());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static List<XAttr> convertXAttrs(List<XAttrProto> xAttrSpec) {
|
||||
ArrayList<XAttr> xAttrs = Lists.newArrayListWithCapacity(xAttrSpec.size());
|
||||
for (XAttrProto a : xAttrSpec) {
|
||||
XAttr.Builder builder = new XAttr.Builder();
|
||||
builder.setNameSpace(convert(a.getNamespace()));
|
||||
if (a.hasName()) {
|
||||
builder.setName(a.getName());
|
||||
}
|
||||
if (a.hasValue()) {
|
||||
builder.setValue(a.getValue().toByteArray());
|
||||
}
|
||||
xAttrs.add(builder.build());
|
||||
}
|
||||
return xAttrs;
|
||||
}
|
||||
|
||||
public static List<XAttr> convert(GetXAttrsResponseProto a) {
|
||||
List<XAttrProto> xAttrs = a.getXAttrsList();
|
||||
return convertXAttrs(xAttrs);
|
||||
}
|
||||
|
||||
public static GetXAttrsResponseProto convertXAttrsResponse(
|
||||
List<XAttr> xAttrs) {
|
||||
GetXAttrsResponseProto.Builder builder = GetXAttrsResponseProto
|
||||
.newBuilder();
|
||||
if (xAttrs != null) {
|
||||
builder.addAllXAttrs(convertXAttrProto(xAttrs));
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static ShortCircuitShmSlotProto convert(SlotId slotId) {
|
||||
return ShortCircuitShmSlotProto.newBuilder().
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.AclException;
|
||||
|
||||
/**
|
||||
* Support for ACLs is controlled by a configuration flag. If the configuration
|
||||
* flag is false, then the NameNode will reject all ACL-related operations.
|
||||
*/
|
||||
final class AclConfigFlag {
|
||||
private final boolean enabled;
|
||||
|
||||
/**
|
||||
* Creates a new AclConfigFlag from configuration.
|
||||
*
|
||||
* @param conf Configuration to check
|
||||
*/
|
||||
public AclConfigFlag(Configuration conf) {
|
||||
enabled = conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_DEFAULT);
|
||||
LogFactory.getLog(AclConfigFlag.class).info("ACLs enabled? " + enabled);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the flag on behalf of an ACL API call.
|
||||
*
|
||||
* @throws AclException if ACLs are disabled
|
||||
*/
|
||||
public void checkForApiCall() throws AclException {
|
||||
if (!enabled) {
|
||||
throw new AclException(String.format(
|
||||
"The ACL operation has been rejected. "
|
||||
+ "Support for ACLs has been disabled by setting %s to false.",
|
||||
DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -24,6 +24,7 @@ import java.io.FileNotFoundException;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
|
@ -39,6 +40,8 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
|
@ -47,6 +50,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
|
|||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||
import org.apache.hadoop.hdfs.protocol.AclException;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
|
@ -79,6 +83,7 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
|||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
/*************************************************
|
||||
* FSDirectory stores the filesystem directory state.
|
||||
|
@ -125,6 +130,7 @@ public class FSDirectory implements Closeable {
|
|||
private final int contentCountLimit; // max content summary counts per run
|
||||
private final INodeMap inodeMap; // Synchronized by dirLock
|
||||
private long yieldCount = 0; // keep track of lock yield count.
|
||||
private final int inodeXAttrsLimit; //inode xattrs max limit
|
||||
|
||||
// lock to protect the directory and BlockMap
|
||||
private final ReentrantReadWriteLock dirLock;
|
||||
|
@ -190,6 +196,12 @@ public class FSDirectory implements Closeable {
|
|||
this.maxDirItems = conf.getInt(
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT);
|
||||
this.inodeXAttrsLimit = conf.getInt(
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);
|
||||
Preconditions.checkArgument(this.inodeXAttrsLimit >= 0,
|
||||
"Cannot set a negative limit on the number of xattrs per inode (%s).",
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY);
|
||||
// We need a maximum maximum because by default, PB limits message sizes
|
||||
// to 64MB. This means we can only store approximately 6.7 million entries
|
||||
// per directory, but let's use 6.4 million for some safety.
|
||||
|
@ -2856,6 +2868,116 @@ public class FSDirectory implements Closeable {
|
|||
readUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
void removeXAttr(String src, XAttr xAttr) throws IOException {
|
||||
writeLock();
|
||||
try {
|
||||
XAttr removedXAttr = unprotectedRemoveXAttr(src, xAttr);
|
||||
if (removedXAttr != null) {
|
||||
fsImage.getEditLog().logRemoveXAttr(src, removedXAttr);
|
||||
} else {
|
||||
NameNode.stateChangeLog.info("DIR* FSDirectory.removeXAttr: XAttr " +
|
||||
XAttrHelper.getPrefixName(xAttr) +
|
||||
" does not exist on the path " + src);
|
||||
}
|
||||
} finally {
|
||||
writeUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
XAttr unprotectedRemoveXAttr(String src,
|
||||
XAttr xAttr) throws IOException {
|
||||
assert hasWriteLock();
|
||||
INodesInPath iip = getINodesInPath4Write(normalizePath(src), true);
|
||||
INode inode = resolveLastINode(src, iip);
|
||||
int snapshotId = iip.getLatestSnapshotId();
|
||||
List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
|
||||
List<XAttr> newXAttrs = filterINodeXAttr(existingXAttrs, xAttr);
|
||||
if (existingXAttrs.size() != newXAttrs.size()) {
|
||||
XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId);
|
||||
return xAttr;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
List<XAttr> filterINodeXAttr(List<XAttr> existingXAttrs,
|
||||
XAttr xAttr) throws QuotaExceededException {
|
||||
if (existingXAttrs == null || existingXAttrs.isEmpty()) {
|
||||
return existingXAttrs;
|
||||
}
|
||||
|
||||
List<XAttr> xAttrs = Lists.newArrayListWithCapacity(existingXAttrs.size());
|
||||
for (XAttr a : existingXAttrs) {
|
||||
if (!(a.getNameSpace() == xAttr.getNameSpace()
|
||||
&& a.getName().equals(xAttr.getName()))) {
|
||||
xAttrs.add(a);
|
||||
}
|
||||
}
|
||||
|
||||
return xAttrs;
|
||||
}
|
||||
|
||||
void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
|
||||
boolean logRetryCache) throws IOException {
|
||||
writeLock();
|
||||
try {
|
||||
unprotectedSetXAttr(src, xAttr, flag);
|
||||
fsImage.getEditLog().logSetXAttr(src, xAttr, logRetryCache);
|
||||
} finally {
|
||||
writeUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
void unprotectedSetXAttr(String src, XAttr xAttr,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
assert hasWriteLock();
|
||||
INodesInPath iip = getINodesInPath4Write(normalizePath(src), true);
|
||||
INode inode = resolveLastINode(src, iip);
|
||||
int snapshotId = iip.getLatestSnapshotId();
|
||||
List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
|
||||
List<XAttr> newXAttrs = setINodeXAttr(existingXAttrs, xAttr, flag);
|
||||
XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId);
|
||||
}
|
||||
|
||||
List<XAttr> setINodeXAttr(List<XAttr> existingXAttrs, XAttr xAttr,
|
||||
EnumSet<XAttrSetFlag> flag) throws QuotaExceededException, IOException {
|
||||
List<XAttr> xAttrs = Lists.newArrayListWithCapacity(
|
||||
existingXAttrs != null ? existingXAttrs.size() + 1 : 1);
|
||||
boolean exist = false;
|
||||
if (existingXAttrs != null) {
|
||||
for (XAttr a: existingXAttrs) {
|
||||
if ((a.getNameSpace() == xAttr.getNameSpace()
|
||||
&& a.getName().equals(xAttr.getName()))) {
|
||||
exist = true;
|
||||
} else {
|
||||
xAttrs.add(a);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
XAttrSetFlag.validate(xAttr.getName(), exist, flag);
|
||||
xAttrs.add(xAttr);
|
||||
|
||||
if (xAttrs.size() > inodeXAttrsLimit) {
|
||||
throw new IOException("Cannot add additional XAttr to inode, "
|
||||
+ "would exceed limit of " + inodeXAttrsLimit);
|
||||
}
|
||||
|
||||
return xAttrs;
|
||||
}
|
||||
|
||||
List<XAttr> getXAttrs(String src) throws IOException {
|
||||
String srcs = normalizePath(src);
|
||||
readLock();
|
||||
try {
|
||||
INodesInPath iip = getLastINodeInPath(srcs, true);
|
||||
INode inode = resolveLastINode(src, iip);
|
||||
int snapshotId = iip.getPathSnapshotId();
|
||||
return XAttrStorage.readINodeXAttrs(inode, snapshotId);
|
||||
} finally {
|
||||
readUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
private static INode resolveLastINode(String src, INodesInPath iip)
|
||||
throws FileNotFoundException {
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Options;
|
|||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||
|
@ -69,6 +70,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
|
|||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveXAttrOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
|
||||
|
@ -80,6 +82,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
|
|||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
|
||||
|
@ -1050,6 +1053,21 @@ public class FSEditLog implements LogsPurgeable {
|
|||
op.aclEntries = entries;
|
||||
logEdit(op);
|
||||
}
|
||||
|
||||
void logSetXAttr(String src, XAttr xAttr, boolean toLogRpcIds) {
|
||||
final SetXAttrOp op = SetXAttrOp.getInstance();
|
||||
op.src = src;
|
||||
op.xAttr = xAttr;
|
||||
logRpcIds(op, toLogRpcIds);
|
||||
logEdit(op);
|
||||
}
|
||||
|
||||
void logRemoveXAttr(String src, XAttr xAttr) {
|
||||
final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
|
||||
op.src = src;
|
||||
op.xAttr = xAttr;
|
||||
logEdit(op);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all the journals this edit log is currently operating on.
|
||||
|
|
|
@ -25,12 +25,14 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumMap;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
|
@ -76,6 +78,8 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
|
|||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveXAttrOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
|
||||
|
@ -798,6 +802,20 @@ public class FSEditLogLoader {
|
|||
fsDir.unprotectedSetAcl(setAclOp.src, setAclOp.aclEntries);
|
||||
break;
|
||||
}
|
||||
case OP_SET_XATTR: {
|
||||
SetXAttrOp setXAttrOp = (SetXAttrOp) op;
|
||||
fsDir.unprotectedSetXAttr(setXAttrOp.src, setXAttrOp.xAttr,
|
||||
EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
|
||||
if (toAddRetryCache) {
|
||||
fsNamesys.addCacheEntry(setXAttrOp.rpcClientId, setXAttrOp.rpcCallId);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OP_REMOVE_XATTR: {
|
||||
RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op;
|
||||
fsDir.unprotectedRemoveXAttr(removeXAttrOp.src, removeXAttrOp.xAttr);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw new IOException("Invalid operation read " + op.opCode);
|
||||
}
|
||||
|
|
|
@ -54,6 +54,8 @@ import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_OWN
|
|||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_PERMISSIONS;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_QUOTA;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_REPLICATION;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_XATTR;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_XATTR;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_START_LOG_SEGMENT;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SYMLINK;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_TIMES;
|
||||
|
@ -79,12 +81,14 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.ChecksumException;
|
||||
import org.apache.hadoop.fs.Options.Rename;
|
||||
import org.apache.hadoop.fs.XAttrCodec;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclEntryScope;
|
||||
import org.apache.hadoop.fs.permission.AclEntryType;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DeprecatedUTF8;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
|
@ -95,6 +99,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEditLogProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrEditLogProto;
|
||||
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.util.XMLUtils;
|
||||
|
@ -186,6 +191,8 @@ public abstract class FSEditLogOp {
|
|||
OP_ROLLING_UPGRADE_START, "start"));
|
||||
inst.put(OP_ROLLING_UPGRADE_FINALIZE, new RollingUpgradeOp(
|
||||
OP_ROLLING_UPGRADE_FINALIZE, "finalize"));
|
||||
inst.put(OP_SET_XATTR, new SetXAttrOp());
|
||||
inst.put(OP_REMOVE_XATTR, new RemoveXAttrOp());
|
||||
}
|
||||
|
||||
public FSEditLogOp get(FSEditLogOpCodes opcode) {
|
||||
|
@ -3490,6 +3497,95 @@ public abstract class FSEditLogOp {
|
|||
return builder.toString();
|
||||
}
|
||||
}
|
||||
|
||||
static class RemoveXAttrOp extends FSEditLogOp {
|
||||
XAttr xAttr;
|
||||
String src;
|
||||
|
||||
private RemoveXAttrOp() {
|
||||
super(OP_REMOVE_XATTR);
|
||||
}
|
||||
|
||||
static RemoveXAttrOp getInstance() {
|
||||
return new RemoveXAttrOp();
|
||||
}
|
||||
|
||||
@Override
|
||||
void readFields(DataInputStream in, int logVersion) throws IOException {
|
||||
XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
|
||||
src = p.getSrc();
|
||||
xAttr = PBHelper.convertXAttr(p.getXAttr());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeFields(DataOutputStream out) throws IOException {
|
||||
XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder();
|
||||
if (src != null) {
|
||||
b.setSrc(src);
|
||||
}
|
||||
b.setXAttr(PBHelper.convertXAttrProto(xAttr));
|
||||
b.build().writeDelimitedTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void toXml(ContentHandler contentHandler) throws SAXException {
|
||||
XMLUtils.addSaxString(contentHandler, "SRC", src);
|
||||
appendXAttrToXml(contentHandler, xAttr);
|
||||
}
|
||||
|
||||
@Override
|
||||
void fromXml(Stanza st) throws InvalidXmlException {
|
||||
src = st.getValue("SRC");
|
||||
xAttr = readXAttrFromXml(st);
|
||||
}
|
||||
}
|
||||
|
||||
static class SetXAttrOp extends FSEditLogOp {
|
||||
XAttr xAttr;
|
||||
String src;
|
||||
|
||||
private SetXAttrOp() {
|
||||
super(OP_SET_XATTR);
|
||||
}
|
||||
|
||||
static SetXAttrOp getInstance() {
|
||||
return new SetXAttrOp();
|
||||
}
|
||||
|
||||
@Override
|
||||
void readFields(DataInputStream in, int logVersion) throws IOException {
|
||||
XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
|
||||
src = p.getSrc();
|
||||
xAttr = PBHelper.convertXAttr(p.getXAttr());
|
||||
readRpcIds(in, logVersion);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeFields(DataOutputStream out) throws IOException {
|
||||
XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder();
|
||||
if (src != null) {
|
||||
b.setSrc(src);
|
||||
}
|
||||
b.setXAttr(PBHelper.convertXAttrProto(xAttr));
|
||||
b.build().writeDelimitedTo(out);
|
||||
// clientId and callId
|
||||
writeRpcIds(rpcClientId, rpcCallId, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void toXml(ContentHandler contentHandler) throws SAXException {
|
||||
XMLUtils.addSaxString(contentHandler, "SRC", src);
|
||||
appendXAttrToXml(contentHandler, xAttr);
|
||||
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
|
||||
}
|
||||
|
||||
@Override
|
||||
void fromXml(Stanza st) throws InvalidXmlException {
|
||||
src = st.getValue("SRC");
|
||||
xAttr = readXAttrFromXml(st);
|
||||
readRpcIdsFromXml(st);
|
||||
}
|
||||
}
|
||||
|
||||
static class SetAclOp extends FSEditLogOp {
|
||||
List<AclEntry> aclEntries = Lists.newArrayList();
|
||||
|
@ -4106,4 +4202,42 @@ public abstract class FSEditLogOp {
|
|||
}
|
||||
return aclEntries;
|
||||
}
|
||||
|
||||
private static void appendXAttrToXml(ContentHandler contentHandler,
|
||||
XAttr xAttr) throws SAXException {
|
||||
contentHandler.startElement("", "", "XATTR", new AttributesImpl());
|
||||
XMLUtils.addSaxString(contentHandler, "NAMESPACE",
|
||||
xAttr.getNameSpace().toString());
|
||||
XMLUtils.addSaxString(contentHandler, "NAME", xAttr.getName());
|
||||
if (xAttr.getValue() != null) {
|
||||
try {
|
||||
XMLUtils.addSaxString(contentHandler, "VALUE",
|
||||
XAttrCodec.encodeValue(xAttr.getValue(), XAttrCodec.HEX));
|
||||
} catch (IOException e) {
|
||||
throw new SAXException(e);
|
||||
}
|
||||
}
|
||||
contentHandler.endElement("", "", "XATTR");
|
||||
}
|
||||
|
||||
private static XAttr readXAttrFromXml(Stanza st)
|
||||
throws InvalidXmlException {
|
||||
if (!st.hasChildren("XATTR")) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Stanza a = st.getChildren("XATTR").get(0);
|
||||
XAttr.Builder builder = new XAttr.Builder();
|
||||
builder.setNameSpace(XAttr.NameSpace.valueOf(a.getValue("NAMESPACE"))).
|
||||
setName(a.getValue("NAME"));
|
||||
String v = a.getValueOrNull("VALUE");
|
||||
if (v != null) {
|
||||
try {
|
||||
builder.setValue(XAttrCodec.decodeValue(v));
|
||||
} catch (IOException e) {
|
||||
throw new InvalidXmlException(e.toString());
|
||||
}
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,6 +70,8 @@ public enum FSEditLogOpCodes {
|
|||
OP_SET_ACL ((byte) 40),
|
||||
OP_ROLLING_UPGRADE_START ((byte) 41),
|
||||
OP_ROLLING_UPGRADE_FINALIZE ((byte) 42),
|
||||
OP_SET_XATTR ((byte) 43),
|
||||
OP_REMOVE_XATTR ((byte) 44),
|
||||
|
||||
// Note that the current range of the valid OP code is 0~127
|
||||
OP_INVALID ((byte) -1);
|
||||
|
|
|
@ -877,7 +877,7 @@ public class FSImageFormat {
|
|||
final long preferredBlockSize = in.readLong();
|
||||
|
||||
return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
|
||||
accessTime, replication, preferredBlockSize);
|
||||
accessTime, replication, preferredBlockSize, null);
|
||||
}
|
||||
|
||||
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
|
||||
|
@ -897,10 +897,10 @@ public class FSImageFormat {
|
|||
final long nsQuota = in.readLong();
|
||||
final long dsQuota = in.readLong();
|
||||
|
||||
return nsQuota == -1L && dsQuota == -1L?
|
||||
new INodeDirectoryAttributes.SnapshotCopy(name, permissions, null, modificationTime)
|
||||
return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy(
|
||||
name, permissions, null, modificationTime, null)
|
||||
: new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
|
||||
null, modificationTime, nsQuota, dsQuota);
|
||||
null, modificationTime, nsQuota, dsQuota, null);
|
||||
}
|
||||
|
||||
private void loadFilesUnderConstruction(DataInput in,
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.fs.permission.AclEntryType;
|
|||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||
|
@ -49,7 +50,10 @@ import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructio
|
|||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -74,6 +78,14 @@ public final class FSImageFormatPBINode {
|
|||
.values();
|
||||
private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType
|
||||
.values();
|
||||
|
||||
private static final int XATTR_NAMESPACE_MASK = 3;
|
||||
private static final int XATTR_NAMESPACE_OFFSET = 30;
|
||||
private static final int XATTR_NAME_MASK = (1 << 24) - 1;
|
||||
private static final int XATTR_NAME_OFFSET = 6;
|
||||
private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES =
|
||||
XAttr.NameSpace.values();
|
||||
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(FSImageFormatPBINode.class);
|
||||
|
||||
|
@ -103,6 +115,25 @@ public final class FSImageFormatPBINode {
|
|||
}
|
||||
return b.build();
|
||||
}
|
||||
|
||||
public static ImmutableList<XAttr> loadXAttrs(
|
||||
XAttrFeatureProto proto, final String[] stringTable) {
|
||||
ImmutableList.Builder<XAttr> b = ImmutableList.builder();
|
||||
for (XAttrCompactProto xAttrCompactProto : proto.getXAttrsList()) {
|
||||
int v = xAttrCompactProto.getName();
|
||||
int nid = (v >> XATTR_NAME_OFFSET) & XATTR_NAME_MASK;
|
||||
int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK;
|
||||
String name = stringTable[nid];
|
||||
byte[] value = null;
|
||||
if (xAttrCompactProto.getValue() != null) {
|
||||
value = xAttrCompactProto.getValue().toByteArray();
|
||||
}
|
||||
b.add(new XAttr.Builder().setNameSpace(XATTR_NAMESPACE_VALUES[ns])
|
||||
.setName(name).setValue(value).build());
|
||||
}
|
||||
|
||||
return b.build();
|
||||
}
|
||||
|
||||
public static INodeDirectory loadINodeDirectory(INodeSection.INode n,
|
||||
LoaderContext state) {
|
||||
|
@ -123,6 +154,10 @@ public final class FSImageFormatPBINode {
|
|||
dir.addAclFeature(new AclFeature(loadAclEntries(d.getAcl(),
|
||||
state.getStringTable())));
|
||||
}
|
||||
if (d.hasXAttrs()) {
|
||||
dir.addXAttrFeature(new XAttrFeature(
|
||||
loadXAttrs(d.getXAttrs(), state.getStringTable())));
|
||||
}
|
||||
return dir;
|
||||
}
|
||||
|
||||
|
@ -255,6 +290,11 @@ public final class FSImageFormatPBINode {
|
|||
file.addAclFeature(new AclFeature(loadAclEntries(f.getAcl(),
|
||||
state.getStringTable())));
|
||||
}
|
||||
|
||||
if (f.hasXAttrs()) {
|
||||
file.addXAttrFeature(new XAttrFeature(
|
||||
loadXAttrs(f.getXAttrs(), state.getStringTable())));
|
||||
}
|
||||
|
||||
// under-construction information
|
||||
if (f.hasFileUC()) {
|
||||
|
@ -295,6 +335,11 @@ public final class FSImageFormatPBINode {
|
|||
}
|
||||
dir.rootDir.cloneModificationTime(root);
|
||||
dir.rootDir.clonePermissionStatus(root);
|
||||
// root dir supports having extended attributes according to POSIX
|
||||
final XAttrFeature f = root.getXAttrFeature();
|
||||
if (f != null) {
|
||||
dir.rootDir.addXAttrFeature(f);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -320,6 +365,26 @@ public final class FSImageFormatPBINode {
|
|||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
private static XAttrFeatureProto.Builder buildXAttrs(XAttrFeature f,
|
||||
final SaverContext.DeduplicationMap<String> stringMap) {
|
||||
XAttrFeatureProto.Builder b = XAttrFeatureProto.newBuilder();
|
||||
for (XAttr a : f.getXAttrs()) {
|
||||
XAttrCompactProto.Builder xAttrCompactBuilder = XAttrCompactProto.
|
||||
newBuilder();
|
||||
int v = ((a.getNameSpace().ordinal() & XATTR_NAMESPACE_MASK) <<
|
||||
XATTR_NAMESPACE_OFFSET)
|
||||
| ((stringMap.getId(a.getName()) & XATTR_NAME_MASK) <<
|
||||
XATTR_NAME_OFFSET);
|
||||
xAttrCompactBuilder.setName(v);
|
||||
if (a.getValue() != null) {
|
||||
xAttrCompactBuilder.setValue(PBHelper.getByteString(a.getValue()));
|
||||
}
|
||||
b.addXAttrs(xAttrCompactBuilder.build());
|
||||
}
|
||||
|
||||
return b;
|
||||
}
|
||||
|
||||
public static INodeSection.INodeFile.Builder buildINodeFile(
|
||||
INodeFileAttributes file, final SaverContext state) {
|
||||
|
@ -334,6 +399,10 @@ public final class FSImageFormatPBINode {
|
|||
if (f != null) {
|
||||
b.setAcl(buildAclEntries(f, state.getStringMap()));
|
||||
}
|
||||
XAttrFeature xAttrFeature = file.getXAttrFeature();
|
||||
if (xAttrFeature != null) {
|
||||
b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
|
@ -350,6 +419,10 @@ public final class FSImageFormatPBINode {
|
|||
if (f != null) {
|
||||
b.setAcl(buildAclEntries(f, state.getStringMap()));
|
||||
}
|
||||
XAttrFeature xAttrFeature = dir.getXAttrFeature();
|
||||
if (xAttrFeature != null) {
|
||||
b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
|
|
|
@ -131,6 +131,8 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
|
@ -508,7 +510,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
|
||||
private final RetryCache retryCache;
|
||||
|
||||
private final AclConfigFlag aclConfigFlag;
|
||||
private final NNConf nnConf;
|
||||
|
||||
/**
|
||||
* Set the last allocated inode id when fsimage or editlog is loaded.
|
||||
|
@ -775,7 +777,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
this.isDefaultAuditLogger = auditLoggers.size() == 1 &&
|
||||
auditLoggers.get(0) instanceof DefaultAuditLogger;
|
||||
this.retryCache = ignoreRetryCache ? null : initRetryCache(conf);
|
||||
this.aclConfigFlag = new AclConfigFlag(conf);
|
||||
this.nnConf = new NNConf(conf);
|
||||
} catch(IOException e) {
|
||||
LOG.error(getClass().getSimpleName() + " initialization failed.", e);
|
||||
close();
|
||||
|
@ -1112,8 +1114,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
// so that the tailer starts from the right spot.
|
||||
dir.fsImage.updateLastAppliedTxIdFromWritten();
|
||||
}
|
||||
cacheManager.stopMonitorThread();
|
||||
cacheManager.clearDirectiveStats();
|
||||
if (cacheManager != null) {
|
||||
cacheManager.stopMonitorThread();
|
||||
cacheManager.clearDirectiveStats();
|
||||
}
|
||||
blockManager.getDatanodeManager().clearPendingCachingCommands();
|
||||
blockManager.getDatanodeManager().setShouldSendCachingCommands(false);
|
||||
// Don't want to keep replication queues when not in Active.
|
||||
|
@ -7694,7 +7698,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
void modifyAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
|
||||
aclConfigFlag.checkForApiCall();
|
||||
nnConf.checkAclsConfigFlag();
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
|
@ -7715,7 +7719,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
void removeAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
|
||||
aclConfigFlag.checkForApiCall();
|
||||
nnConf.checkAclsConfigFlag();
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
|
@ -7736,7 +7740,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
void removeDefaultAcl(String src) throws IOException {
|
||||
aclConfigFlag.checkForApiCall();
|
||||
nnConf.checkAclsConfigFlag();
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
|
@ -7757,7 +7761,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
void removeAcl(String src) throws IOException {
|
||||
aclConfigFlag.checkForApiCall();
|
||||
nnConf.checkAclsConfigFlag();
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
|
@ -7778,7 +7782,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
|
||||
aclConfigFlag.checkForApiCall();
|
||||
nnConf.checkAclsConfigFlag();
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
|
@ -7799,7 +7803,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
AclStatus getAclStatus(String src) throws IOException {
|
||||
aclConfigFlag.checkForApiCall();
|
||||
nnConf.checkAclsConfigFlag();
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
checkOperation(OperationCategory.READ);
|
||||
readLock();
|
||||
|
@ -7813,6 +7817,167 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
readUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set xattr for a file or directory.
|
||||
*
|
||||
* @param src
|
||||
* - path on which it sets the xattr
|
||||
* @param xAttr
|
||||
* - xAttr details to set
|
||||
* @param flag
|
||||
* - xAttrs flags
|
||||
* @throws AccessControlException
|
||||
* @throws SafeModeException
|
||||
* @throws UnresolvedLinkException
|
||||
* @throws IOException
|
||||
*/
|
||||
void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
|
||||
throws AccessControlException, SafeModeException,
|
||||
UnresolvedLinkException, IOException {
|
||||
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
|
||||
if (cacheEntry != null && cacheEntry.isSuccess()) {
|
||||
return; // Return previous response
|
||||
}
|
||||
boolean success = false;
|
||||
try {
|
||||
setXAttrInt(src, xAttr, flag, cacheEntry != null);
|
||||
success = true;
|
||||
} catch (AccessControlException e) {
|
||||
logAuditEvent(false, "setXAttr", src);
|
||||
throw e;
|
||||
} finally {
|
||||
RetryCache.setState(cacheEntry, success);
|
||||
}
|
||||
}
|
||||
|
||||
private void setXAttrInt(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
|
||||
boolean logRetryCache) throws IOException {
|
||||
nnConf.checkXAttrsConfigFlag();
|
||||
checkXAttrSize(xAttr);
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
|
||||
writeLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
checkNameNodeSafeMode("Cannot set XAttr on " + src);
|
||||
src = FSDirectory.resolvePath(src, pathComponents, dir);
|
||||
if (isPermissionEnabled) {
|
||||
checkOwner(pc, src);
|
||||
checkPathAccess(pc, src, FsAction.WRITE);
|
||||
}
|
||||
dir.setXAttr(src, xAttr, flag, logRetryCache);
|
||||
resultingStat = getAuditFileInfo(src, false);
|
||||
} finally {
|
||||
writeUnlock();
|
||||
}
|
||||
getEditLog().logSync();
|
||||
logAuditEvent(true, "setXAttr", src, null, resultingStat);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies that the combined size of the name and value of an xattr is within
|
||||
* the configured limit. Setting a limit of zero disables this check.
|
||||
*/
|
||||
private void checkXAttrSize(XAttr xAttr) {
|
||||
if (nnConf.xattrMaxSize == 0) {
|
||||
return;
|
||||
}
|
||||
int size = xAttr.getName().getBytes(Charsets.UTF_8).length;
|
||||
if (xAttr.getValue() != null) {
|
||||
size += xAttr.getValue().length;
|
||||
}
|
||||
if (size > nnConf.xattrMaxSize) {
|
||||
throw new HadoopIllegalArgumentException(
|
||||
"The XAttr is too big. The maximum combined size of the"
|
||||
+ " name and value is " + nnConf.xattrMaxSize
|
||||
+ ", but the total size is " + size);
|
||||
}
|
||||
}
|
||||
|
||||
List<XAttr> getXAttrs(String src, List<XAttr> xAttrs) throws IOException {
|
||||
nnConf.checkXAttrsConfigFlag();
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
boolean getAll = xAttrs == null || xAttrs.isEmpty();
|
||||
List<XAttr> filteredXAttrs = null;
|
||||
if (!getAll) {
|
||||
filteredXAttrs = XAttrPermissionFilter.filterXAttrsForApi(pc, xAttrs);
|
||||
if (filteredXAttrs.isEmpty()) {
|
||||
return filteredXAttrs;
|
||||
}
|
||||
}
|
||||
checkOperation(OperationCategory.READ);
|
||||
readLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.READ);
|
||||
if (isPermissionEnabled) {
|
||||
checkPathAccess(pc, src, FsAction.READ);
|
||||
}
|
||||
List<XAttr> all = dir.getXAttrs(src);
|
||||
List<XAttr> filteredAll = XAttrPermissionFilter.
|
||||
filterXAttrsForApi(pc, all);
|
||||
if (getAll) {
|
||||
return filteredAll;
|
||||
} else {
|
||||
if (filteredAll == null || filteredAll.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
List<XAttr> toGet = Lists.newArrayListWithCapacity(filteredXAttrs.size());
|
||||
for (XAttr xAttr : filteredXAttrs) {
|
||||
for (XAttr a : filteredAll) {
|
||||
if (xAttr.getNameSpace() == a.getNameSpace()
|
||||
&& xAttr.getName().equals(a.getName())) {
|
||||
toGet.add(a);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return toGet;
|
||||
}
|
||||
} catch (AccessControlException e) {
|
||||
logAuditEvent(false, "getXAttrs", src);
|
||||
throw e;
|
||||
} finally {
|
||||
readUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
void removeXAttr(String src, XAttr xAttr) throws IOException {
|
||||
nnConf.checkXAttrsConfigFlag();
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
try {
|
||||
XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
|
||||
} catch (AccessControlException e) {
|
||||
logAuditEvent(false, "removeXAttr", src);
|
||||
throw e;
|
||||
}
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
|
||||
writeLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
checkNameNodeSafeMode("Cannot remove XAttr entry on " + src);
|
||||
src = FSDirectory.resolvePath(src, pathComponents, dir);
|
||||
if (isPermissionEnabled) {
|
||||
checkOwner(pc, src);
|
||||
checkPathAccess(pc, src, FsAction.WRITE);
|
||||
}
|
||||
|
||||
dir.removeXAttr(src, xAttr);
|
||||
resultingStat = getAuditFileInfo(src, false);
|
||||
} catch (AccessControlException e) {
|
||||
logAuditEvent(false, "removeXAttr", src);
|
||||
throw e;
|
||||
} finally {
|
||||
writeUnlock();
|
||||
}
|
||||
getEditLog().logSync();
|
||||
logAuditEvent(true, "removeXAttr", src, null, resultingStat);
|
||||
}
|
||||
|
||||
/**
|
||||
* Default AuditLogger implementation; used when no access logger is
|
||||
|
@ -7898,6 +8063,5 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
logger.addAppender(asyncAppender);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -177,6 +177,44 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
|||
nodeToUpdate.removeAclFeature();
|
||||
return nodeToUpdate;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param snapshotId
|
||||
* if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
|
||||
* from the given snapshot; otherwise, get the result from the
|
||||
* current inode.
|
||||
* @return XAttrFeature
|
||||
*/
|
||||
abstract XAttrFeature getXAttrFeature(int snapshotId);
|
||||
|
||||
@Override
|
||||
public final XAttrFeature getXAttrFeature() {
|
||||
return getXAttrFeature(Snapshot.CURRENT_STATE_ID);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set <code>XAttrFeature</code>
|
||||
*/
|
||||
abstract void addXAttrFeature(XAttrFeature xAttrFeature);
|
||||
|
||||
final INode addXAttrFeature(XAttrFeature xAttrFeature, int latestSnapshotId)
|
||||
throws QuotaExceededException {
|
||||
final INode nodeToUpdate = recordModification(latestSnapshotId);
|
||||
nodeToUpdate.addXAttrFeature(xAttrFeature);
|
||||
return nodeToUpdate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove <code>XAttrFeature</code>
|
||||
*/
|
||||
abstract void removeXAttrFeature();
|
||||
|
||||
final INode removeXAttrFeature(int lastestSnapshotId)
|
||||
throws QuotaExceededException {
|
||||
final INode nodeToUpdate = recordModification(lastestSnapshotId);
|
||||
nodeToUpdate.removeXAttrFeature();
|
||||
return nodeToUpdate;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return if the given snapshot id is {@link Snapshot#CURRENT_STATE_ID},
|
||||
|
|
|
@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeWithAdditionalFields.PermissionStatusFormat;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
|
||||
/**
|
||||
* The attributes of an inode.
|
||||
|
@ -50,6 +51,9 @@ public interface INodeAttributes {
|
|||
|
||||
/** @return the ACL feature. */
|
||||
public AclFeature getAclFeature();
|
||||
|
||||
/** @return the XAttrs feature. */
|
||||
public XAttrFeature getXAttrFeature();
|
||||
|
||||
/** @return the modification time. */
|
||||
public long getModificationTime();
|
||||
|
@ -64,14 +68,17 @@ public interface INodeAttributes {
|
|||
private final AclFeature aclFeature;
|
||||
private final long modificationTime;
|
||||
private final long accessTime;
|
||||
private XAttrFeature xAttrFeature;
|
||||
|
||||
SnapshotCopy(byte[] name, PermissionStatus permissions,
|
||||
AclFeature aclFeature, long modificationTime, long accessTime) {
|
||||
AclFeature aclFeature, long modificationTime, long accessTime,
|
||||
XAttrFeature xAttrFeature) {
|
||||
this.name = name;
|
||||
this.permission = PermissionStatusFormat.toLong(permissions);
|
||||
this.aclFeature = aclFeature;
|
||||
this.modificationTime = modificationTime;
|
||||
this.accessTime = accessTime;
|
||||
this.xAttrFeature = xAttrFeature;
|
||||
}
|
||||
|
||||
SnapshotCopy(INode inode) {
|
||||
|
@ -80,6 +87,7 @@ public interface INodeAttributes {
|
|||
this.aclFeature = inode.getAclFeature();
|
||||
this.modificationTime = inode.getModificationTime();
|
||||
this.accessTime = inode.getAccessTime();
|
||||
this.xAttrFeature = inode.getXAttrFeature();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -128,5 +136,10 @@ public interface INodeAttributes {
|
|||
public final long getAccessTime() {
|
||||
return accessTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final XAttrFeature getXAttrFeature() {
|
||||
return xAttrFeature;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
|
@ -35,8 +36,9 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
|
|||
public static class SnapshotCopy extends INodeAttributes.SnapshotCopy
|
||||
implements INodeDirectoryAttributes {
|
||||
public SnapshotCopy(byte[] name, PermissionStatus permissions,
|
||||
AclFeature aclFeature, long modificationTime) {
|
||||
super(name, permissions, aclFeature, modificationTime, 0L);
|
||||
AclFeature aclFeature, long modificationTime,
|
||||
XAttrFeature xAttrsFeature) {
|
||||
super(name, permissions, aclFeature, modificationTime, 0L, xAttrsFeature);
|
||||
}
|
||||
|
||||
public SnapshotCopy(INodeDirectory dir) {
|
||||
|
@ -63,8 +65,8 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
|
|||
|
||||
public CopyWithQuota(byte[] name, PermissionStatus permissions,
|
||||
AclFeature aclFeature, long modificationTime, long nsQuota,
|
||||
long dsQuota) {
|
||||
super(name, permissions, aclFeature, modificationTime);
|
||||
long dsQuota, XAttrFeature xAttrsFeature) {
|
||||
super(name, permissions, aclFeature, modificationTime, xAttrsFeature);
|
||||
this.nsQuota = nsQuota;
|
||||
this.dsQuota = dsQuota;
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
|
||||
/**
|
||||
* The attributes of a file.
|
||||
|
@ -42,8 +43,9 @@ public interface INodeFileAttributes extends INodeAttributes {
|
|||
|
||||
public SnapshotCopy(byte[] name, PermissionStatus permissions,
|
||||
AclFeature aclFeature, long modificationTime, long accessTime,
|
||||
short replication, long preferredBlockSize) {
|
||||
super(name, permissions, aclFeature, modificationTime, accessTime);
|
||||
short replication, long preferredBlockSize, XAttrFeature xAttrsFeature) {
|
||||
super(name, permissions, aclFeature, modificationTime, accessTime,
|
||||
xAttrsFeature);
|
||||
|
||||
final long h = HeaderFormat.combineReplication(0L, replication);
|
||||
header = HeaderFormat.combinePreferredBlockSize(h, preferredBlockSize);
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
|
|||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
|
@ -228,6 +229,21 @@ public abstract class INodeReference extends INode {
|
|||
final void removeAclFeature() {
|
||||
referred.removeAclFeature();
|
||||
}
|
||||
|
||||
@Override
|
||||
final XAttrFeature getXAttrFeature(int snapshotId) {
|
||||
return referred.getXAttrFeature(snapshotId);
|
||||
}
|
||||
|
||||
@Override
|
||||
final void addXAttrFeature(XAttrFeature xAttrFeature) {
|
||||
referred.addXAttrFeature(xAttrFeature);
|
||||
}
|
||||
|
||||
@Override
|
||||
final void removeXAttrFeature() {
|
||||
referred.removeXAttrFeature();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final short getFsPermissionShort() {
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
|
|||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
|
||||
/**
|
||||
* An {@link INode} representing a symbolic link.
|
||||
|
@ -110,4 +111,19 @@ public class INodeSymlink extends INodeWithAdditionalFields {
|
|||
super.dumpTreeRecursively(out, prefix, snapshot);
|
||||
out.println();
|
||||
}
|
||||
|
||||
@Override
|
||||
final XAttrFeature getXAttrFeature(int snapshotId) {
|
||||
throw new UnsupportedOperationException("XAttrs are not supported on symlinks");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttrFeature() {
|
||||
throw new UnsupportedOperationException("XAttrs are not supported on symlinks");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addXAttrFeature(XAttrFeature f) {
|
||||
throw new UnsupportedOperationException("XAttrs are not supported on symlinks");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
|
|||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode.Feature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -340,6 +341,30 @@ public abstract class INodeWithAdditionalFields extends INode
|
|||
|
||||
addFeature(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
XAttrFeature getXAttrFeature(int snapshotId) {
|
||||
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
|
||||
return getSnapshotINode(snapshotId).getXAttrFeature();
|
||||
}
|
||||
|
||||
return getFeature(XAttrFeature.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttrFeature() {
|
||||
XAttrFeature f = getXAttrFeature();
|
||||
Preconditions.checkNotNull(f);
|
||||
removeFeature(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addXAttrFeature(XAttrFeature f) {
|
||||
XAttrFeature f1 = getXAttrFeature();
|
||||
Preconditions.checkState(f1 == null, "Duplicated XAttrFeature");
|
||||
|
||||
addFeature(f);
|
||||
}
|
||||
|
||||
public final Feature[] getFeatures() {
|
||||
return features;
|
||||
|
|
|
@ -0,0 +1,104 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.AclException;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* This class is a common place for NN configuration.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
final class NNConf {
|
||||
/**
|
||||
* Support for ACLs is controlled by a configuration flag. If the
|
||||
* configuration flag is false, then the NameNode will reject all
|
||||
* ACL-related operations.
|
||||
*/
|
||||
private final boolean aclsEnabled;
|
||||
|
||||
/**
|
||||
* Support for XAttrs is controlled by a configuration flag. If the
|
||||
* configuration flag is false, then the NameNode will reject all
|
||||
* XAttr-related operations.
|
||||
*/
|
||||
private final boolean xattrsEnabled;
|
||||
|
||||
/**
|
||||
* Maximum size of a single name-value extended attribute.
|
||||
*/
|
||||
final int xattrMaxSize;
|
||||
|
||||
/**
|
||||
* Creates a new NNConf from configuration.
|
||||
*
|
||||
* @param conf Configuration to check
|
||||
*/
|
||||
public NNConf(Configuration conf) {
|
||||
aclsEnabled = conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_DEFAULT);
|
||||
LogFactory.getLog(NNConf.class).info("ACLs enabled? " + aclsEnabled);
|
||||
xattrsEnabled = conf.getBoolean(
|
||||
DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_DEFAULT);
|
||||
LogFactory.getLog(NNConf.class).info("XAttrs enabled? " + xattrsEnabled);
|
||||
xattrMaxSize = conf.getInt(
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
|
||||
Preconditions.checkArgument(xattrMaxSize >= 0,
|
||||
"Cannot set a negative value for the maximum size of an xattr (%s).",
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY);
|
||||
final String unlimited = xattrMaxSize == 0 ? " (unlimited)" : "";
|
||||
LogFactory.getLog(NNConf.class).info(
|
||||
"Maximum size of an xattr: " + xattrMaxSize + unlimited);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the flag on behalf of an ACL API call.
|
||||
*
|
||||
* @throws AclException if ACLs are disabled
|
||||
*/
|
||||
public void checkAclsConfigFlag() throws AclException {
|
||||
if (!aclsEnabled) {
|
||||
throw new AclException(String.format(
|
||||
"The ACL operation has been rejected. "
|
||||
+ "Support for ACLs has been disabled by setting %s to false.",
|
||||
DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the flag on behalf of an XAttr API call.
|
||||
* @throws IOException if XAttrs are disabled
|
||||
*/
|
||||
public void checkXAttrsConfigFlag() throws IOException {
|
||||
if (!xattrsEnabled) {
|
||||
throw new IOException(String.format(
|
||||
"The XAttr operation has been rejected. "
|
||||
+ "Support for XAttrs has been disabled by setting %s to false.",
|
||||
DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -64,7 +64,8 @@ public class NameNodeLayoutVersion {
|
|||
*/
|
||||
public static enum Feature implements LayoutFeature {
|
||||
ROLLING_UPGRADE(-55, -53, "Support rolling upgrade", false),
|
||||
EDITLOG_LENGTH(-56, "Add length field to every edit log op");
|
||||
EDITLOG_LENGTH(-56, "Add length field to every edit log op"),
|
||||
XATTRS(-57, "Extended attributes");
|
||||
|
||||
private final FeatureInfo info;
|
||||
|
||||
|
|
|
@ -49,6 +49,8 @@ import org.apache.hadoop.fs.Options;
|
|||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -1381,5 +1383,22 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
public AclStatus getAclStatus(String src) throws IOException {
|
||||
return namesystem.getAclStatus(src);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
|
||||
throws IOException {
|
||||
namesystem.setXAttr(src, xAttr, flag);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
|
||||
throws IOException {
|
||||
return namesystem.getXAttrs(src, xAttrs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(String src, XAttr xAttr) throws IOException {
|
||||
namesystem.removeXAttr(src, xAttr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
/**
|
||||
* Feature for extended attributes.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class XAttrFeature implements INode.Feature {
|
||||
public static final ImmutableList<XAttr> EMPTY_ENTRY_LIST =
|
||||
ImmutableList.of();
|
||||
|
||||
private final ImmutableList<XAttr> xAttrs;
|
||||
|
||||
public XAttrFeature(ImmutableList<XAttr> xAttrs) {
|
||||
this.xAttrs = xAttrs;
|
||||
}
|
||||
|
||||
public ImmutableList<XAttr> getXAttrs() {
|
||||
return xAttrs;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
/**
|
||||
* There are four types of extended attributes <XAttr> defined by the
|
||||
* following namespaces:
|
||||
* <br>
|
||||
* USER - extended user attributes: these can be assigned to files and
|
||||
* directories to store arbitrary additional information. The access
|
||||
* permissions for user attributes are defined by the file permission
|
||||
* bits.
|
||||
* <br>
|
||||
* TRUSTED - trusted extended attributes: these are visible/accessible
|
||||
* only to/by the super user.
|
||||
* <br>
|
||||
* SECURITY - extended security attributes: these are used by the HDFS
|
||||
* core for security purposes and are not available through admin/user
|
||||
* API.
|
||||
* <br>
|
||||
* SYSTEM - extended system attributes: these are used by the HDFS
|
||||
* core and are not available through admin/user API.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class XAttrPermissionFilter {
|
||||
|
||||
static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr)
|
||||
throws AccessControlException {
|
||||
if (xAttr.getNameSpace() == XAttr.NameSpace.USER ||
|
||||
(xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED &&
|
||||
pc.isSuperUser())) {
|
||||
return;
|
||||
}
|
||||
throw new AccessControlException("User doesn't have permission for xattr: "
|
||||
+ XAttrHelper.getPrefixName(xAttr));
|
||||
}
|
||||
|
||||
static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc,
|
||||
List<XAttr> xAttrs) {
|
||||
assert xAttrs != null : "xAttrs can not be null";
|
||||
if (xAttrs == null || xAttrs.isEmpty()) {
|
||||
return xAttrs;
|
||||
}
|
||||
|
||||
List<XAttr> filteredXAttrs = Lists.newArrayListWithCapacity(xAttrs.size());
|
||||
for (XAttr xAttr : xAttrs) {
|
||||
if (xAttr.getNameSpace() == XAttr.NameSpace.USER) {
|
||||
filteredXAttrs.add(xAttr);
|
||||
} else if (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED &&
|
||||
pc.isSuperUser()) {
|
||||
filteredXAttrs.add(xAttr);
|
||||
}
|
||||
}
|
||||
|
||||
return filteredXAttrs;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
/**
|
||||
* XAttrStorage is used to read and set xattrs for an inode.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class XAttrStorage {
|
||||
|
||||
/**
|
||||
* Reads the existing extended attributes of an inode. If the
|
||||
* inode does not have an <code>XAttr</code>, then this method
|
||||
* returns an empty list.
|
||||
* @param inode INode to read
|
||||
* @param snapshotId
|
||||
* @return List<XAttr> <code>XAttr</code> list.
|
||||
*/
|
||||
public static List<XAttr> readINodeXAttrs(INode inode, int snapshotId) {
|
||||
XAttrFeature f = inode.getXAttrFeature(snapshotId);
|
||||
return f == null ? ImmutableList.<XAttr> of() : f.getXAttrs();
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads the existing extended attributes of an inode.
|
||||
* @param inode INode to read.
|
||||
* @return List<XAttr> <code>XAttr</code> list.
|
||||
*/
|
||||
public static List<XAttr> readINodeXAttrs(INode inode) {
|
||||
XAttrFeature f = inode.getXAttrFeature();
|
||||
return f == null ? ImmutableList.<XAttr> of() : f.getXAttrs();
|
||||
}
|
||||
|
||||
/**
|
||||
* Update xattrs of inode.
|
||||
* @param inode INode to update
|
||||
* @param xAttrs to update xAttrs.
|
||||
* @param snapshotId id of the latest snapshot of the inode
|
||||
*/
|
||||
public static void updateINodeXAttrs(INode inode,
|
||||
List<XAttr> xAttrs, int snapshotId) throws QuotaExceededException {
|
||||
if (xAttrs == null || xAttrs.isEmpty()) {
|
||||
if (inode.getXAttrFeature() != null) {
|
||||
inode.removeXAttrFeature(snapshotId);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
ImmutableList<XAttr> newXAttrs = ImmutableList.copyOf(xAttrs);
|
||||
if (inode.getXAttrFeature() != null) {
|
||||
inode.removeXAttrFeature(snapshotId);
|
||||
}
|
||||
inode.addXAttrFeature(new XAttrFeature(newXAttrs), snapshotId);
|
||||
}
|
||||
}
|
|
@ -65,6 +65,7 @@ import org.apache.hadoop.hdfs.server.namenode.SaveNamespaceContext;
|
|||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -215,11 +216,16 @@ public class FSImageFormatPBSnapshot {
|
|||
acl = new AclFeature(FSImageFormatPBINode.Loader.loadAclEntries(
|
||||
fileInPb.getAcl(), state.getStringTable()));
|
||||
}
|
||||
XAttrFeature xAttrs = null;
|
||||
if (fileInPb.hasXAttrs()) {
|
||||
xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.loadXAttrs(
|
||||
fileInPb.getXAttrs(), state.getStringTable()));
|
||||
}
|
||||
|
||||
copy = new INodeFileAttributes.SnapshotCopy(pbf.getName()
|
||||
.toByteArray(), permission, acl, fileInPb.getModificationTime(),
|
||||
fileInPb.getAccessTime(), (short) fileInPb.getReplication(),
|
||||
fileInPb.getPreferredBlockSize());
|
||||
fileInPb.getPreferredBlockSize(), xAttrs);
|
||||
}
|
||||
|
||||
FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null,
|
||||
|
@ -310,16 +316,21 @@ public class FSImageFormatPBSnapshot {
|
|||
acl = new AclFeature(FSImageFormatPBINode.Loader.loadAclEntries(
|
||||
dirCopyInPb.getAcl(), state.getStringTable()));
|
||||
}
|
||||
XAttrFeature xAttrs = null;
|
||||
if (dirCopyInPb.hasXAttrs()) {
|
||||
xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.loadXAttrs(
|
||||
dirCopyInPb.getXAttrs(), state.getStringTable()));
|
||||
}
|
||||
|
||||
long modTime = dirCopyInPb.getModificationTime();
|
||||
boolean noQuota = dirCopyInPb.getNsQuota() == -1
|
||||
&& dirCopyInPb.getDsQuota() == -1;
|
||||
|
||||
copy = noQuota ? new INodeDirectoryAttributes.SnapshotCopy(name,
|
||||
permission, acl, modTime)
|
||||
permission, acl, modTime, xAttrs)
|
||||
: new INodeDirectoryAttributes.CopyWithQuota(name, permission,
|
||||
acl, modTime, dirCopyInPb.getNsQuota(),
|
||||
dirCopyInPb.getDsQuota());
|
||||
dirCopyInPb.getDsQuota(), xAttrs);
|
||||
}
|
||||
// load created list
|
||||
List<INode> clist = loadCreatedList(in, dir,
|
||||
|
|
|
@ -34,8 +34,10 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
|
|||
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
|
@ -144,9 +146,20 @@ public class Snapshot implements Comparable<byte[]> {
|
|||
/** The root directory of the snapshot. */
|
||||
static public class Root extends INodeDirectory {
|
||||
Root(INodeDirectory other) {
|
||||
// Always preserve ACL.
|
||||
// Always preserve ACL, XAttr.
|
||||
super(other, false, Lists.newArrayList(
|
||||
Iterables.filter(Arrays.asList(other.getFeatures()), AclFeature.class))
|
||||
Iterables.filter(Arrays.asList(other.getFeatures()), new Predicate<Feature>() {
|
||||
|
||||
@Override
|
||||
public boolean apply(Feature input) {
|
||||
if (AclFeature.class.isInstance(input)
|
||||
|| XAttrFeature.class.isInstance(input)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}))
|
||||
.toArray(new Feature[0]));
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.net.URISyntaxException;
|
|||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
|
||||
import javax.servlet.ServletContext;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
|
@ -53,8 +54,10 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.hdfs.StorageType;
|
||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
|
@ -103,6 +106,10 @@ import org.apache.hadoop.hdfs.web.resources.SnapshotNameParam;
|
|||
import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.UserParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.XAttrNameParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
|
||||
|
@ -344,6 +351,12 @@ public class NamenodeWebHdfsMethods {
|
|||
final TokenArgumentParam delegationTokenArgument,
|
||||
@QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT)
|
||||
final AclPermissionParam aclPermission,
|
||||
@QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT)
|
||||
final XAttrNameParam xattrName,
|
||||
@QueryParam(XAttrValueParam.NAME) @DefaultValue(XAttrValueParam.DEFAULT)
|
||||
final XAttrValueParam xattrValue,
|
||||
@QueryParam(XAttrSetFlagParam.NAME) @DefaultValue(XAttrSetFlagParam.DEFAULT)
|
||||
final XAttrSetFlagParam xattrSetFlag,
|
||||
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
|
||||
final SnapshotNameParam snapshotName,
|
||||
@QueryParam(OldSnapshotNameParam.NAME) @DefaultValue(OldSnapshotNameParam.DEFAULT)
|
||||
|
@ -352,7 +365,8 @@ public class NamenodeWebHdfsMethods {
|
|||
return put(ugi, delegation, username, doAsUser, ROOT, op, destination,
|
||||
owner, group, permission, overwrite, bufferSize, replication,
|
||||
blockSize, modificationTime, accessTime, renameOptions, createParent,
|
||||
delegationTokenArgument, aclPermission, snapshotName, oldSnapshotName);
|
||||
delegationTokenArgument, aclPermission, xattrName, xattrValue,
|
||||
xattrSetFlag, snapshotName, oldSnapshotName);
|
||||
}
|
||||
|
||||
/** Handle HTTP PUT request. */
|
||||
|
@ -399,6 +413,12 @@ public class NamenodeWebHdfsMethods {
|
|||
final TokenArgumentParam delegationTokenArgument,
|
||||
@QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT)
|
||||
final AclPermissionParam aclPermission,
|
||||
@QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT)
|
||||
final XAttrNameParam xattrName,
|
||||
@QueryParam(XAttrValueParam.NAME) @DefaultValue(XAttrValueParam.DEFAULT)
|
||||
final XAttrValueParam xattrValue,
|
||||
@QueryParam(XAttrSetFlagParam.NAME) @DefaultValue(XAttrSetFlagParam.DEFAULT)
|
||||
final XAttrSetFlagParam xattrSetFlag,
|
||||
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
|
||||
final SnapshotNameParam snapshotName,
|
||||
@QueryParam(OldSnapshotNameParam.NAME) @DefaultValue(OldSnapshotNameParam.DEFAULT)
|
||||
|
@ -408,7 +428,8 @@ public class NamenodeWebHdfsMethods {
|
|||
init(ugi, delegation, username, doAsUser, path, op, destination, owner,
|
||||
group, permission, overwrite, bufferSize, replication, blockSize,
|
||||
modificationTime, accessTime, renameOptions, delegationTokenArgument,
|
||||
aclPermission, snapshotName, oldSnapshotName);
|
||||
aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName,
|
||||
oldSnapshotName);
|
||||
|
||||
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
|
||||
@Override
|
||||
|
@ -418,8 +439,8 @@ public class NamenodeWebHdfsMethods {
|
|||
path.getAbsolutePath(), op, destination, owner, group,
|
||||
permission, overwrite, bufferSize, replication, blockSize,
|
||||
modificationTime, accessTime, renameOptions, createParent,
|
||||
delegationTokenArgument, aclPermission, snapshotName,
|
||||
oldSnapshotName);
|
||||
delegationTokenArgument, aclPermission, xattrName, xattrValue,
|
||||
xattrSetFlag, snapshotName, oldSnapshotName);
|
||||
} finally {
|
||||
reset();
|
||||
}
|
||||
|
@ -448,6 +469,9 @@ public class NamenodeWebHdfsMethods {
|
|||
final CreateParentParam createParent,
|
||||
final TokenArgumentParam delegationTokenArgument,
|
||||
final AclPermissionParam aclPermission,
|
||||
final XAttrNameParam xattrName,
|
||||
final XAttrValueParam xattrValue,
|
||||
final XAttrSetFlagParam xattrSetFlag,
|
||||
final SnapshotNameParam snapshotName,
|
||||
final OldSnapshotNameParam oldSnapshotName
|
||||
) throws IOException, URISyntaxException {
|
||||
|
@ -549,6 +573,17 @@ public class NamenodeWebHdfsMethods {
|
|||
np.setAcl(fullpath, aclPermission.getAclPermission(true));
|
||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||
}
|
||||
case SETXATTR: {
|
||||
np.setXAttr(
|
||||
fullpath,
|
||||
XAttrHelper.buildXAttr(xattrName.getXAttrName(),
|
||||
xattrValue.getXAttrValue()), xattrSetFlag.getFlag());
|
||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||
}
|
||||
case REMOVEXATTR: {
|
||||
np.removeXAttr(fullpath, XAttrHelper.buildXAttr(xattrName.getXAttrName()));
|
||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||
}
|
||||
case CREATESNAPSHOT: {
|
||||
String snapshotPath = np.createSnapshot(fullpath, snapshotName.getValue());
|
||||
final String js = JsonUtil.toJsonString(
|
||||
|
@ -679,10 +714,14 @@ public class NamenodeWebHdfsMethods {
|
|||
@QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
|
||||
final RenewerParam renewer,
|
||||
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
|
||||
final BufferSizeParam bufferSize
|
||||
final BufferSizeParam bufferSize,
|
||||
@QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT)
|
||||
final XAttrNameParam xattrName,
|
||||
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
|
||||
final XAttrEncodingParam xattrEncoding
|
||||
) throws IOException, InterruptedException {
|
||||
return get(ugi, delegation, username, doAsUser, ROOT, op,
|
||||
offset, length, renewer, bufferSize);
|
||||
return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
|
||||
renewer, bufferSize, xattrName, xattrEncoding);
|
||||
}
|
||||
|
||||
/** Handle HTTP GET request. */
|
||||
|
@ -707,18 +746,23 @@ public class NamenodeWebHdfsMethods {
|
|||
@QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
|
||||
final RenewerParam renewer,
|
||||
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
|
||||
final BufferSizeParam bufferSize
|
||||
final BufferSizeParam bufferSize,
|
||||
@QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT)
|
||||
final XAttrNameParam xattrName,
|
||||
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
|
||||
final XAttrEncodingParam xattrEncoding
|
||||
) throws IOException, InterruptedException {
|
||||
|
||||
init(ugi, delegation, username, doAsUser, path, op,
|
||||
offset, length, renewer, bufferSize);
|
||||
init(ugi, delegation, username, doAsUser, path, op, offset, length,
|
||||
renewer, bufferSize, xattrName, xattrEncoding);
|
||||
|
||||
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
|
||||
@Override
|
||||
public Response run() throws IOException, URISyntaxException {
|
||||
try {
|
||||
return get(ugi, delegation, username, doAsUser,
|
||||
path.getAbsolutePath(), op, offset, length, renewer, bufferSize);
|
||||
path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
|
||||
xattrName, xattrEncoding);
|
||||
} finally {
|
||||
reset();
|
||||
}
|
||||
|
@ -736,7 +780,9 @@ public class NamenodeWebHdfsMethods {
|
|||
final OffsetParam offset,
|
||||
final LengthParam length,
|
||||
final RenewerParam renewer,
|
||||
final BufferSizeParam bufferSize
|
||||
final BufferSizeParam bufferSize,
|
||||
final XAttrNameParam xattrName,
|
||||
final XAttrEncodingParam xattrEncoding
|
||||
) throws IOException, URISyntaxException {
|
||||
final NameNode namenode = (NameNode)context.getAttribute("name.node");
|
||||
final NamenodeProtocols np = getRPCServer(namenode);
|
||||
|
@ -811,6 +857,19 @@ public class NamenodeWebHdfsMethods {
|
|||
final String js = JsonUtil.toJsonString(status);
|
||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||
}
|
||||
case GETXATTR: {
|
||||
XAttr xAttr = XAttrHelper.getFirstXAttr(np.getXAttrs(fullpath,
|
||||
XAttrHelper.buildXAttrAsList(xattrName.getXAttrName())));
|
||||
final String js = JsonUtil.toJsonString(xAttr,
|
||||
xattrEncoding.getEncoding());
|
||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||
}
|
||||
case GETXATTRS: {
|
||||
List<XAttr> xAttrs = np.getXAttrs(fullpath, null);
|
||||
final String js = JsonUtil.toJsonString(xAttrs,
|
||||
xattrEncoding.getEncoding());
|
||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||
}
|
||||
default:
|
||||
throw new UnsupportedOperationException(op + " is not supported");
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.apache.hadoop.fs.permission.AclEntry;
|
|||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||
import org.apache.hadoop.hdfs.protocol.*;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
|
@ -34,6 +35,8 @@ import org.apache.hadoop.util.DataChecksum;
|
|||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
|
@ -661,4 +664,125 @@ public class JsonUtil {
|
|||
aclStatusBuilder.addEntries(aclEntryList);
|
||||
return aclStatusBuilder.build();
|
||||
}
|
||||
|
||||
public static String toJsonString(final XAttr xAttr,
|
||||
final XAttrCodec encoding) throws IOException {
|
||||
if (xAttr == null) {
|
||||
return "{}";
|
||||
}
|
||||
final Map<String, Object> m = new TreeMap<String, Object>();
|
||||
m.put("name", XAttrHelper.getPrefixName(xAttr));
|
||||
m.put("value", xAttr.getValue() != null ?
|
||||
XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null);
|
||||
final Map<String, Map<String, Object>> finalMap =
|
||||
new TreeMap<String, Map<String, Object>>();
|
||||
finalMap.put(XAttr.class.getSimpleName(), m);
|
||||
return JSON.toString(finalMap);
|
||||
}
|
||||
|
||||
private static Map<String, Object> toJsonMap(final XAttr xAttr,
|
||||
final XAttrCodec encoding) throws IOException {
|
||||
if (xAttr == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final Map<String, Object> m = new TreeMap<String, Object>();
|
||||
m.put("name", XAttrHelper.getPrefixName(xAttr));
|
||||
m.put("value", xAttr.getValue() != null ?
|
||||
XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null);
|
||||
return m;
|
||||
}
|
||||
|
||||
private static Object[] toJsonArray(final List<XAttr> array,
|
||||
final XAttrCodec encoding) throws IOException {
|
||||
if (array == null) {
|
||||
return null;
|
||||
} else if (array.size() == 0) {
|
||||
return EMPTY_OBJECT_ARRAY;
|
||||
} else {
|
||||
final Object[] a = new Object[array.size()];
|
||||
for(int i = 0; i < array.size(); i++) {
|
||||
a[i] = toJsonMap(array.get(i), encoding);
|
||||
}
|
||||
return a;
|
||||
}
|
||||
}
|
||||
|
||||
public static String toJsonString(final List<XAttr> xAttrs,
|
||||
final XAttrCodec encoding) throws IOException {
|
||||
final Map<String, Object> finalMap = new TreeMap<String, Object>();
|
||||
finalMap.put("XAttrs", toJsonArray(xAttrs, encoding));
|
||||
return JSON.toString(finalMap);
|
||||
}
|
||||
|
||||
public static XAttr toXAttr(final Map<?, ?> json) throws IOException {
|
||||
if (json == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Map<?, ?> m = (Map<?, ?>) json.get(XAttr.class.getSimpleName());
|
||||
if (m == null) {
|
||||
return null;
|
||||
}
|
||||
String name = (String) m.get("name");
|
||||
String value = (String) m.get("value");
|
||||
return XAttrHelper.buildXAttr(name, decodeXAttrValue(value));
|
||||
}
|
||||
|
||||
public static Map<String, byte[]> toXAttrs(final Map<?, ?> json)
|
||||
throws IOException {
|
||||
if (json == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return toXAttrMap((Object[])json.get("XAttrs"));
|
||||
}
|
||||
|
||||
public static Map<String, byte[]> toXAttrs(final Map<?, ?> json,
|
||||
List<String> names) throws IOException {
|
||||
if (json == null || names == null) {
|
||||
return null;
|
||||
}
|
||||
if (names.isEmpty()) {
|
||||
return Maps.newHashMap();
|
||||
}
|
||||
Map<String, byte[]> xAttrs = toXAttrs(json);
|
||||
if (xAttrs == null || xAttrs.isEmpty()) {
|
||||
return xAttrs;
|
||||
}
|
||||
|
||||
Map<String, byte[]> result = Maps.newHashMap();
|
||||
for (String name : names) {
|
||||
if (xAttrs.containsKey(name)) {
|
||||
result.put(name, xAttrs.get(name));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private static Map<String, byte[]> toXAttrMap(final Object[] objects)
|
||||
throws IOException {
|
||||
if (objects == null) {
|
||||
return null;
|
||||
} else if (objects.length == 0) {
|
||||
return Maps.newHashMap();
|
||||
} else {
|
||||
final Map<String, byte[]> xAttrs = Maps.newHashMap();
|
||||
for(int i = 0; i < objects.length; i++) {
|
||||
Map<?, ?> m = (Map<?, ?>) objects[i];
|
||||
String name = (String) m.get("name");
|
||||
String value = (String) m.get("value");
|
||||
xAttrs.put(name, decodeXAttrValue(value));
|
||||
}
|
||||
return xAttrs;
|
||||
}
|
||||
}
|
||||
|
||||
private static byte[] decodeXAttrValue(String value) throws IOException {
|
||||
if (value != null) {
|
||||
return XAttrCodec.decodeValue(value);
|
||||
} else {
|
||||
return new byte[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import java.net.URI;
|
|||
import java.net.URL;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.StringTokenizer;
|
||||
|
@ -49,6 +50,9 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrCodec;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -813,6 +817,66 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
new RenameOptionSetParam(options)
|
||||
).run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path p, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
statistics.incrementWriteOps(1);
|
||||
final HttpOpParam.Op op = PutOpParam.Op.SETXATTR;
|
||||
if (value != null) {
|
||||
new FsPathRunner(op, p, new XAttrNameParam(name), new XAttrValueParam(
|
||||
XAttrCodec.encodeValue(value, XAttrCodec.HEX)),
|
||||
new XAttrSetFlagParam(flag)).run();
|
||||
} else {
|
||||
new FsPathRunner(op, p, new XAttrNameParam(name),
|
||||
new XAttrSetFlagParam(flag)).run();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getXAttr(Path p, String name) throws IOException {
|
||||
final HttpOpParam.Op op = GetOpParam.Op.GETXATTR;
|
||||
return new FsPathResponseRunner<byte[]>(op, p, new XAttrNameParam(name),
|
||||
new XAttrEncodingParam(XAttrCodec.HEX)) {
|
||||
@Override
|
||||
byte[] decodeResponse(Map<?, ?> json) throws IOException {
|
||||
XAttr xAttr = JsonUtil.toXAttr(json);
|
||||
return xAttr != null ? xAttr.getValue() : null;
|
||||
}
|
||||
}.run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path p) throws IOException {
|
||||
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
|
||||
return new FsPathResponseRunner<Map<String, byte[]>>(op, p,
|
||||
new XAttrEncodingParam(XAttrCodec.HEX)) {
|
||||
@Override
|
||||
Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
|
||||
return JsonUtil.toXAttrs(json);
|
||||
}
|
||||
}.run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path p, final List<String> names)
|
||||
throws IOException {
|
||||
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
|
||||
return new FsPathResponseRunner<Map<String, byte[]>>(op, p,
|
||||
new XAttrEncodingParam(XAttrCodec.HEX)) {
|
||||
@Override
|
||||
Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
|
||||
return JsonUtil.toXAttrs(json, names);
|
||||
}
|
||||
}.run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(Path p, String name) throws IOException {
|
||||
statistics.incrementWriteOps(1);
|
||||
final HttpOpParam.Op op = PutOpParam.Op.REMOVEXATTR;
|
||||
new FsPathRunner(op, p, new XAttrNameParam(name)).run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setOwner(final Path p, final String owner, final String group
|
||||
|
|
|
@ -36,6 +36,8 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
|
|||
/** GET_BLOCK_LOCATIONS is a private unstable op. */
|
||||
GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
|
||||
GETACLSTATUS(false, HttpURLConnection.HTTP_OK),
|
||||
GETXATTR(false, HttpURLConnection.HTTP_OK),
|
||||
GETXATTRS(false, HttpURLConnection.HTTP_OK),
|
||||
|
||||
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
|
||||
|
||||
|
|
|
@ -42,6 +42,9 @@ public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
|
|||
REMOVEDEFAULTACL(false, HttpURLConnection.HTTP_OK),
|
||||
REMOVEACL(false, HttpURLConnection.HTTP_OK),
|
||||
SETACL(false, HttpURLConnection.HTTP_OK),
|
||||
|
||||
SETXATTR(false, HttpURLConnection.HTTP_OK),
|
||||
REMOVEXATTR(false, HttpURLConnection.HTTP_OK),
|
||||
|
||||
CREATESNAPSHOT(false, HttpURLConnection.HTTP_OK),
|
||||
DELETESNAPSHOT(false, HttpURLConnection.HTTP_OK),
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.web.resources;
|
||||
|
||||
import org.apache.hadoop.fs.XAttrCodec;
|
||||
|
||||
public class XAttrEncodingParam extends EnumParam<XAttrCodec> {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "encoding";
|
||||
/** Default parameter value. */
|
||||
public static final String DEFAULT = "";
|
||||
|
||||
private static final Domain<XAttrCodec> DOMAIN =
|
||||
new Domain<XAttrCodec>(NAME, XAttrCodec.class);
|
||||
|
||||
public XAttrEncodingParam(final XAttrCodec encoding) {
|
||||
super(DOMAIN, encoding);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param str a string representation of the parameter value.
|
||||
*/
|
||||
public XAttrEncodingParam(final String str) {
|
||||
super(DOMAIN, str != null && !str.isEmpty() ? DOMAIN.parse(str) : null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getValueString() {
|
||||
return value.toString();
|
||||
}
|
||||
|
||||
public XAttrCodec getEncoding() {
|
||||
return getValue();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.web.resources;
|
||||
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class XAttrNameParam extends StringParam {
|
||||
/** Parameter name. **/
|
||||
public static final String NAME = "xattr.name";
|
||||
/** Default parameter value. **/
|
||||
public static final String DEFAULT = "";
|
||||
|
||||
private static Domain DOMAIN = new Domain(NAME,
|
||||
Pattern.compile("^(user\\.|trusted\\.|system\\.|security\\.).+"));
|
||||
|
||||
public XAttrNameParam(final String str) {
|
||||
super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
public String getXAttrName() {
|
||||
final String v = getValue();
|
||||
return v;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.web.resources;
|
||||
|
||||
import java.util.EnumSet;
|
||||
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
|
||||
public class XAttrSetFlagParam extends EnumSetParam<XAttrSetFlag> {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "flag";
|
||||
/** Default parameter value. */
|
||||
public static final String DEFAULT = "";
|
||||
|
||||
private static final Domain<XAttrSetFlag> DOMAIN = new Domain<XAttrSetFlag>(
|
||||
NAME, XAttrSetFlag.class);
|
||||
|
||||
public XAttrSetFlagParam(final EnumSet<XAttrSetFlag> flag) {
|
||||
super(DOMAIN, flag);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param str a string representation of the parameter value.
|
||||
*/
|
||||
public XAttrSetFlagParam(final String str) {
|
||||
super(DOMAIN, DOMAIN.parse(str));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
public EnumSet<XAttrSetFlag> getFlag() {
|
||||
return getValue();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.web.resources;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.fs.XAttrCodec;
|
||||
|
||||
public class XAttrValueParam extends StringParam {
|
||||
/** Parameter name. **/
|
||||
public static final String NAME = "xattr.value";
|
||||
/** Default parameter value. **/
|
||||
public static final String DEFAULT = "";
|
||||
|
||||
private static Domain DOMAIN = new Domain(NAME, null);
|
||||
|
||||
public XAttrValueParam(final String str) {
|
||||
super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
public byte[] getXAttrValue() throws IOException {
|
||||
final String v = getValue();
|
||||
return XAttrCodec.decodeValue(v);
|
||||
}
|
||||
}
|
|
@ -31,6 +31,7 @@ package hadoop.hdfs;
|
|||
import "Security.proto";
|
||||
import "hdfs.proto";
|
||||
import "acl.proto";
|
||||
import "xattr.proto";
|
||||
|
||||
/**
|
||||
* The ClientNamenodeProtocol Service defines the interface between a client
|
||||
|
@ -759,4 +760,10 @@ service ClientNamenodeProtocol {
|
|||
returns(SetAclResponseProto);
|
||||
rpc getAclStatus(GetAclStatusRequestProto)
|
||||
returns(GetAclStatusResponseProto);
|
||||
rpc setXAttr(SetXAttrRequestProto)
|
||||
returns(SetXAttrResponseProto);
|
||||
rpc getXAttrs(GetXAttrsRequestProto)
|
||||
returns(GetXAttrsResponseProto);
|
||||
rpc removeXAttr(RemoveXAttrRequestProto)
|
||||
returns(RemoveXAttrResponseProto);
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ package hadoop.hdfs.fsimage;
|
|||
|
||||
import "hdfs.proto";
|
||||
import "acl.proto";
|
||||
import "xattr.proto";
|
||||
|
||||
/**
|
||||
* This file defines the on-disk layout of the file system image. The
|
||||
|
@ -106,7 +107,23 @@ message INodeSection {
|
|||
*/
|
||||
repeated fixed32 entries = 2 [packed = true];
|
||||
}
|
||||
|
||||
|
||||
message XAttrCompactProto {
|
||||
/**
|
||||
*
|
||||
* [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
|
||||
* [2:26) -- the name of the entry, which is an ID that points to a
|
||||
* string in the StringTableSection.
|
||||
* [26:32) -- reserved for future uses.
|
||||
*/
|
||||
required fixed32 name = 1;
|
||||
optional bytes value = 2;
|
||||
}
|
||||
|
||||
message XAttrFeatureProto {
|
||||
repeated XAttrCompactProto xAttrs = 1;
|
||||
}
|
||||
|
||||
message INodeFile {
|
||||
optional uint32 replication = 1;
|
||||
optional uint64 modificationTime = 2;
|
||||
|
@ -116,6 +133,7 @@ message INodeSection {
|
|||
repeated BlockProto blocks = 6;
|
||||
optional FileUnderConstructionFeature fileUC = 7;
|
||||
optional AclFeatureProto acl = 8;
|
||||
optional XAttrFeatureProto xAttrs = 9;
|
||||
}
|
||||
|
||||
message INodeDirectory {
|
||||
|
@ -126,6 +144,7 @@ message INodeSection {
|
|||
optional uint64 dsQuota = 3;
|
||||
optional fixed64 permission = 4;
|
||||
optional AclFeatureProto acl = 5;
|
||||
optional XAttrFeatureProto xAttrs = 6;
|
||||
}
|
||||
|
||||
message INodeSymlink {
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
option java_package = "org.apache.hadoop.hdfs.protocol.proto";
|
||||
option java_outer_classname = "XAttrProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
package hadoop.hdfs;
|
||||
|
||||
message XAttrProto {
|
||||
enum XAttrNamespaceProto {
|
||||
USER = 0;
|
||||
TRUSTED = 1;
|
||||
SECURITY = 2;
|
||||
SYSTEM = 3;
|
||||
}
|
||||
|
||||
required XAttrNamespaceProto namespace = 1;
|
||||
required string name = 2;
|
||||
optional bytes value = 3;
|
||||
}
|
||||
|
||||
message XAttrEditLogProto {
|
||||
required string src = 1;
|
||||
optional XAttrProto xAttr = 2;
|
||||
}
|
||||
|
||||
enum XAttrSetFlagProto {
|
||||
XATTR_CREATE = 0x01;
|
||||
XATTR_REPLACE = 0x02;
|
||||
}
|
||||
|
||||
message SetXAttrRequestProto {
|
||||
required string src = 1;
|
||||
optional XAttrProto xAttr = 2;
|
||||
optional uint32 flag = 3; //bits set using XAttrSetFlagProto
|
||||
}
|
||||
|
||||
message SetXAttrResponseProto {
|
||||
}
|
||||
|
||||
message GetXAttrsRequestProto {
|
||||
required string src = 1;
|
||||
repeated XAttrProto xAttrs = 2;
|
||||
}
|
||||
|
||||
message GetXAttrsResponseProto {
|
||||
repeated XAttrProto xAttrs = 1;
|
||||
}
|
||||
|
||||
message RemoveXAttrRequestProto {
|
||||
required string src = 1;
|
||||
optional XAttrProto xAttr = 2;
|
||||
}
|
||||
|
||||
message RemoveXAttrResponseProto {
|
||||
}
|
|
@ -1906,4 +1906,28 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.xattrs.enabled</name>
|
||||
<value>true</value>
|
||||
<description>
|
||||
Whether support for extended attributes is enabled on the NameNode.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.fs-limits.max-xattrs-per-inode</name>
|
||||
<value>32</value>
|
||||
<description>
|
||||
Maximum number of extended attributes per inode.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.fs-limits.max-xattr-size</name>
|
||||
<value>16384</value>
|
||||
<description>
|
||||
The maximum combined size of the name and value of an extended attribute in bytes.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
~~ Licensed under the Apache License, Version 2.0 (the "License");
|
||||
~~ you may not use this file except in compliance with the License.
|
||||
~~ You may obtain a copy of the License at
|
||||
~~
|
||||
~~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~~
|
||||
~~ Unless required by applicable law or agreed to in writing, software
|
||||
~~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~~ See the License for the specific language governing permissions and
|
||||
~~ limitations under the License. See accompanying LICENSE file.
|
||||
|
||||
---
|
||||
Hadoop Distributed File System-${project.version} - Extended Attributes
|
||||
---
|
||||
---
|
||||
${maven.build.timestamp}
|
||||
|
||||
Extended Attributes in HDFS
|
||||
|
||||
\[ {{{../../index.html}Go Back}} \]
|
||||
|
||||
%{toc|section=1|fromDepth=2|toDepth=4}
|
||||
|
||||
* {Overview}
|
||||
|
||||
<Extended attributes> (abbreviated as <xattrs>) are a filesystem feature that allow user applications to associate additional metadata with a file or directory. Unlike system-level inode metadata such as file permissions or modification time, extended attributes are not interpreted by the system and are instead used by applications to store additional information about an inode. Extended attributes could be used, for instance, to specify the character encoding of a plain-text document.
|
||||
|
||||
** {HDFS extended attributes}
|
||||
|
||||
Extended attributes in HDFS are modeled after extended attributes in Linux (see the Linux manpage for {{{http://www.bestbits.at/acl/man/man5/attr.txt}attr(5)}} and {{{http://www.bestbits.at/acl/}related documentation}}). An extended attribute is a <name-value pair>, with a string name and binary value. Xattrs names must also be prefixed with a <namespace>. For example, an xattr named <myXattr> in the <user> namespace would be specified as <<user.myXattr>>. Multiple xattrs can be associated with a single inode.
|
||||
|
||||
** {Namespaces and Permissions}
|
||||
|
||||
In HDFS, as in Linux, there are four valid namespaces: <<<user>>>, <<<trusted>>>, <<<system>>>, and <<<security>>>. Each of these namespaces have different access restrictions.
|
||||
|
||||
The <<<user>>> namespace is the namespace that will commonly be used by client applications. Access to extended attributes in the user namespace is controlled by the corresponding file permissions.
|
||||
|
||||
The <<<trusted>>> namespace is available only to HDFS superusers.
|
||||
|
||||
The <<<system>>> namespace is reserved for internal HDFS use. This namespace is not accessible through userspace methods, and is reserved for implementing internal HDFS features.
|
||||
|
||||
The <<<security>>> namespace is reserved for internal HDFS use. This namespace is not accessible through userspace methods. It is currently unused.
|
||||
|
||||
* {Interacting with extended attributes}
|
||||
|
||||
The Hadoop shell has support for interacting with extended attributes via <<<hadoop fs -getfattr>>> and <<<hadoop fs -setfattr>>>. These commands are styled after the Linux {{{http://www.bestbits.at/acl/man/man1/getfattr.txt}getfattr(1)}} and {{{http://www.bestbits.at/acl/man/man1/setfattr.txt}setfattr(1)}} commands.
|
||||
|
||||
** {getfattr}
|
||||
|
||||
<<<hadoop fs -getfattr [-R] {-n name | -d} [-e en] <path>>>>
|
||||
|
||||
Displays the extended attribute names and values (if any) for a file or directory.
|
||||
|
||||
*--+--+
|
||||
-R | Recursively list the attributes for all files and directories.
|
||||
*--+--+
|
||||
-n name | Dump the named extended attribute value.
|
||||
*--+--+
|
||||
-d | Dump all extended attribute values associated with pathname.
|
||||
*--+--+
|
||||
-e \<encoding\> | Encode values after retrieving them. Valid encodings are "text", "hex", and "base64". Values encoded as text strings are enclosed in double quotes ("), and values encoded as hexadecimal and base64 are prefixed with 0x and 0s, respectively.
|
||||
*--+--+
|
||||
\<path\> | The file or directory.
|
||||
*--+--+
|
||||
|
||||
** {setfattr}
|
||||
|
||||
<<<hadoop fs -setfattr {-n name [-v value] | -x name} <path>>>>
|
||||
|
||||
Sets an extended attribute name and value for a file or directory.
|
||||
|
||||
*--+--+
|
||||
-n name | The extended attribute name.
|
||||
*--+--+
|
||||
-v value | The extended attribute value. There are three different encoding methods for the value. If the argument is enclosed in double quotes, then the value is the string inside the quotes. If the argument is prefixed with 0x or 0X, then it is taken as a hexadecimal number. If the argument begins with 0s or 0S, then it is taken as a base64 encoding.
|
||||
*--+--+
|
||||
-x name | Remove the extended attribute.
|
||||
*--+--+
|
||||
\<path\> | The file or directory.
|
||||
*--+--+
|
||||
|
||||
* {Configuration options}
|
||||
|
||||
|
||||
HDFS supports extended attributes out of the box, without additional configuration. Administrators could potentially be interested in the options limiting the number of xattrs per inode and the size of xattrs, since xattrs increase the on-disk and in-memory space consumption of an inode.
|
||||
|
||||
* <<<dfs.namenode.xattrs.enabled>>>
|
||||
|
||||
Whether support for extended attributes is enabled on the NameNode. By default, extended attributes are enabled.
|
||||
|
||||
* <<<dfs.namenode.fs-limits.max-xattrs-per-inode>>>
|
||||
|
||||
The maximum number of extended attributes per inode. By default, this limit is 32.
|
||||
|
||||
* <<<dfs.namenode.fs-limits.max-xattr-size>>>
|
||||
|
||||
The maximum combined size of the name and value of an extended attribute in bytes. By default, this limit is 16384 bytes.
|
|
@ -0,0 +1,99 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.cli;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import org.apache.hadoop.cli.util.CLICommand;
|
||||
import org.apache.hadoop.cli.util.CommandExecutor.Result;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.security.authorize.PolicyProvider;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestXAttrCLI extends CLITestHelperDFS {
|
||||
protected MiniDFSCluster dfsCluster = null;
|
||||
protected FileSystem fs = null;
|
||||
protected String namenode = null;
|
||||
|
||||
@Before
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
|
||||
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
|
||||
HDFSPolicyProvider.class, PolicyProvider.class);
|
||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
|
||||
|
||||
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||
dfsCluster.waitClusterUp();
|
||||
namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
|
||||
|
||||
username = System.getProperty("user.name");
|
||||
|
||||
fs = dfsCluster.getFileSystem();
|
||||
assertTrue("Not a HDFS: "+fs.getUri(),
|
||||
fs instanceof DistributedFileSystem);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getTestFile() {
|
||||
return "testXAttrConf.xml";
|
||||
}
|
||||
|
||||
@After
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
if (fs != null) {
|
||||
fs.close();
|
||||
}
|
||||
if (dfsCluster != null) {
|
||||
dfsCluster.shutdown();
|
||||
}
|
||||
Thread.sleep(2000);
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String expandCommand(final String cmd) {
|
||||
String expCmd = cmd;
|
||||
expCmd = expCmd.replaceAll("NAMENODE", namenode);
|
||||
expCmd = expCmd.replaceAll("#LF#",
|
||||
System.getProperty("line.separator"));
|
||||
expCmd = super.expandCommand(expCmd);
|
||||
return expCmd;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Result execute(CLICommand cmd) throws Exception {
|
||||
return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
|
||||
}
|
||||
|
||||
@Test
|
||||
@Override
|
||||
public void testAll () {
|
||||
super.testAll();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotSame;
|
||||
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* Tests for <code>XAttr</code> objects.
|
||||
*/
|
||||
public class TestXAttr {
|
||||
private static XAttr XATTR, XATTR1, XATTR2, XATTR3, XATTR4;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
byte[] value = {0x31, 0x32, 0x33};
|
||||
XATTR = new XAttr.Builder()
|
||||
.setName("name")
|
||||
.setValue(value)
|
||||
.build();
|
||||
XATTR1 = new XAttr.Builder()
|
||||
.setNameSpace(XAttr.NameSpace.USER)
|
||||
.setName("name")
|
||||
.setValue(value)
|
||||
.build();
|
||||
XATTR2 = new XAttr.Builder()
|
||||
.setNameSpace(XAttr.NameSpace.TRUSTED)
|
||||
.setName("name")
|
||||
.setValue(value)
|
||||
.build();
|
||||
XATTR3 = new XAttr.Builder()
|
||||
.setNameSpace(XAttr.NameSpace.SYSTEM)
|
||||
.setName("name")
|
||||
.setValue(value)
|
||||
.build();
|
||||
XATTR4 = new XAttr.Builder()
|
||||
.setNameSpace(XAttr.NameSpace.SECURITY)
|
||||
.setName("name")
|
||||
.setValue(value)
|
||||
.build();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testXAttrEquals() {
|
||||
assertNotSame(XATTR1, XATTR2);
|
||||
assertNotSame(XATTR2, XATTR3);
|
||||
assertNotSame(XATTR3, XATTR4);
|
||||
assertEquals(XATTR, XATTR1);
|
||||
assertEquals(XATTR1, XATTR1);
|
||||
assertEquals(XATTR2, XATTR2);
|
||||
assertEquals(XATTR3, XATTR3);
|
||||
assertEquals(XATTR4, XATTR4);
|
||||
assertFalse(XATTR1.equals(XATTR2));
|
||||
assertFalse(XATTR2.equals(XATTR3));
|
||||
assertFalse(XATTR3.equals(XATTR4));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testXAttrHashCode() {
|
||||
assertEquals(XATTR.hashCode(), XATTR1.hashCode());
|
||||
assertFalse(XATTR1.hashCode() == XATTR2.hashCode());
|
||||
assertFalse(XATTR2.hashCode() == XATTR3.hashCode());
|
||||
assertFalse(XATTR3.hashCode() == XATTR4.hashCode());
|
||||
}
|
||||
}
|
|
@ -1181,6 +1181,13 @@ public class DFSTestUtil {
|
|||
.setType(AclEntryType.OTHER)
|
||||
.build());
|
||||
filesystem.setAcl(pathConcatTarget, aclEntryList);
|
||||
// OP_SET_XATTR
|
||||
filesystem.setXAttr(pathConcatTarget, "user.a1",
|
||||
new byte[]{0x31, 0x32, 0x33});
|
||||
filesystem.setXAttr(pathConcatTarget, "user.a2",
|
||||
new byte[]{0x37, 0x38, 0x39});
|
||||
// OP_REMOVE_XATTR
|
||||
filesystem.removeXAttr(pathConcatTarget, "user.a2");
|
||||
}
|
||||
|
||||
public static void abortStream(DFSOutputStream out) throws IOException {
|
||||
|
|
|
@ -155,7 +155,7 @@ public class TestDFSShell {
|
|||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
public void testRecrusiveRm() throws IOException {
|
||||
public void testRecursiveRm() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
|
@ -1583,6 +1583,7 @@ public class TestDFSShell {
|
|||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
private static String runLsr(final FsShell shell, String root, int returnvalue
|
||||
) throws Exception {
|
||||
System.out.println("root=" + root + ", returnvalue=" + returnvalue);
|
||||
|
@ -1874,6 +1875,333 @@ public class TestDFSShell {
|
|||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
public void testSetXAttrPermission() throws Exception {
|
||||
UserGroupInformation user = UserGroupInformation.
|
||||
createUserForTesting("user", new String[] {"mygroup"});
|
||||
MiniDFSCluster cluster = null;
|
||||
PrintStream bak = null;
|
||||
try {
|
||||
final Configuration conf = new HdfsConfiguration();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||
cluster.waitActive();
|
||||
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
Path p = new Path("/foo");
|
||||
fs.mkdirs(p);
|
||||
bak = System.err;
|
||||
|
||||
final FsShell fshell = new FsShell(conf);
|
||||
final ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
System.setErr(new PrintStream(out));
|
||||
|
||||
// No permission to write xattr
|
||||
fs.setPermission(p, new FsPermission((short) 0700));
|
||||
user.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
|
||||
assertEquals("Returned should be 1", 1, ret);
|
||||
String str = out.toString();
|
||||
assertTrue("Permission denied printed",
|
||||
str.indexOf("Permission denied") != -1);
|
||||
out.reset();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
|
||||
assertEquals("Returned should be 0", 0, ret);
|
||||
out.reset();
|
||||
|
||||
// No permission to read and remove
|
||||
fs.setPermission(p, new FsPermission((short) 0750));
|
||||
user.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
// Read
|
||||
int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-getfattr", "-n", "user.a1", "/foo"});
|
||||
assertEquals("Returned should be 1", 1, ret);
|
||||
String str = out.toString();
|
||||
assertTrue("Permission denied printed",
|
||||
str.indexOf("Permission denied") != -1);
|
||||
out.reset();
|
||||
// Remove
|
||||
ret = ToolRunner.run(fshell, new String[]{
|
||||
"-setfattr", "-x", "user.a1", "/foo"});
|
||||
assertEquals("Returned should be 1", 1, ret);
|
||||
str = out.toString();
|
||||
assertTrue("Permission denied printed",
|
||||
str.indexOf("Permission denied") != -1);
|
||||
out.reset();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
} finally {
|
||||
if (bak != null) {
|
||||
System.setErr(bak);
|
||||
}
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* HDFS-6413 xattr names erroneously handled as case-insensitive */
|
||||
@Test (timeout = 30000)
|
||||
public void testSetXAttrCaseSensitivity() throws Exception {
|
||||
UserGroupInformation user = UserGroupInformation.
|
||||
createUserForTesting("user", new String[] {"mygroup"});
|
||||
MiniDFSCluster cluster = null;
|
||||
PrintStream bak = null;
|
||||
try {
|
||||
final Configuration conf = new HdfsConfiguration();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||
cluster.waitActive();
|
||||
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
Path p = new Path("/mydir");
|
||||
fs.mkdirs(p);
|
||||
bak = System.err;
|
||||
|
||||
final FsShell fshell = new FsShell(conf);
|
||||
final ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
System.setOut(new PrintStream(out));
|
||||
|
||||
doSetXattr(out, fshell,
|
||||
new String[] {"-setfattr", "-n", "User.Foo", "/mydir"},
|
||||
new String[] {"-getfattr", "-d", "/mydir"},
|
||||
new String[] {"user.Foo"},
|
||||
new String[] {});
|
||||
|
||||
doSetXattr(out, fshell,
|
||||
new String[] {"-setfattr", "-n", "user.FOO", "/mydir"},
|
||||
new String[] {"-getfattr", "-d", "/mydir"},
|
||||
new String[] {"user.Foo", "user.FOO"},
|
||||
new String[] {});
|
||||
|
||||
doSetXattr(out, fshell,
|
||||
new String[] {"-setfattr", "-n", "USER.foo", "/mydir"},
|
||||
new String[] {"-getfattr", "-d", "/mydir"},
|
||||
new String[] {"user.Foo", "user.FOO", "user.foo"},
|
||||
new String[] {});
|
||||
|
||||
doSetXattr(out, fshell,
|
||||
new String[] {"-setfattr", "-n", "USER.fOo", "-v", "myval", "/mydir"},
|
||||
new String[] {"-getfattr", "-d", "/mydir"},
|
||||
new String[] {"user.Foo", "user.FOO", "user.foo", "user.fOo=\"myval\""},
|
||||
new String[] {"user.Foo=", "user.FOO=", "user.foo="});
|
||||
|
||||
doSetXattr(out, fshell,
|
||||
new String[] {"-setfattr", "-x", "useR.foo", "/mydir"},
|
||||
new String[] {"-getfattr", "-d", "/mydir"},
|
||||
new String[] {"user.Foo", "user.FOO"},
|
||||
new String[] {"foo"});
|
||||
|
||||
doSetXattr(out, fshell,
|
||||
new String[] {"-setfattr", "-x", "USER.FOO", "/mydir"},
|
||||
new String[] {"-getfattr", "-d", "/mydir"},
|
||||
new String[] {"user.Foo"},
|
||||
new String[] {"FOO"});
|
||||
|
||||
doSetXattr(out, fshell,
|
||||
new String[] {"-setfattr", "-x", "useR.Foo", "/mydir"},
|
||||
new String[] {"-getfattr", "-n", "User.Foo", "/mydir"},
|
||||
new String[] {},
|
||||
new String[] {"Foo"});
|
||||
|
||||
} finally {
|
||||
if (bak != null) {
|
||||
System.setOut(bak);
|
||||
}
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void doSetXattr(ByteArrayOutputStream out, FsShell fshell,
|
||||
String[] setOp, String[] getOp, String[] expectArr,
|
||||
String[] dontExpectArr) throws Exception {
|
||||
int ret = ToolRunner.run(fshell, setOp);
|
||||
out.reset();
|
||||
ret = ToolRunner.run(fshell, getOp);
|
||||
final String str = out.toString();
|
||||
for (int i = 0; i < expectArr.length; i++) {
|
||||
final String expect = expectArr[i];
|
||||
final StringBuilder sb = new StringBuilder
|
||||
("Incorrect results from getfattr. Expected: ");
|
||||
sb.append(expect).append(" Full Result: ");
|
||||
sb.append(str);
|
||||
assertTrue(sb.toString(),
|
||||
str.indexOf(expect) != -1);
|
||||
}
|
||||
|
||||
for (int i = 0; i < dontExpectArr.length; i++) {
|
||||
String dontExpect = dontExpectArr[i];
|
||||
final StringBuilder sb = new StringBuilder
|
||||
("Incorrect results from getfattr. Didn't Expect: ");
|
||||
sb.append(dontExpect).append(" Full Result: ");
|
||||
sb.append(str);
|
||||
assertTrue(sb.toString(),
|
||||
str.indexOf(dontExpect) == -1);
|
||||
}
|
||||
out.reset();
|
||||
}
|
||||
|
||||
/**
|
||||
* HDFS-6374 setXAttr should require the user to be the owner of the file
|
||||
* or directory.
|
||||
*
|
||||
* Test to make sure that only the owner of a file or directory can set
|
||||
* or remove the xattrs.
|
||||
*
|
||||
* As user1:
|
||||
* Create a directory (/foo) as user1, chown it to user1 (and user1's group),
|
||||
* grant rwx to "other".
|
||||
*
|
||||
* As user2:
|
||||
* Set an xattr (should fail).
|
||||
*
|
||||
* As user1:
|
||||
* Set an xattr (should pass).
|
||||
*
|
||||
* As user2:
|
||||
* Read the xattr (should pass).
|
||||
* Remove the xattr (should fail).
|
||||
*
|
||||
* As user1:
|
||||
* Read the xattr (should pass).
|
||||
* Remove the xattr (should pass).
|
||||
*/
|
||||
@Test (timeout = 30000)
|
||||
public void testSetXAttrPermissionAsDifferentOwner() throws Exception {
|
||||
final String USER1 = "user1";
|
||||
final String GROUP1 = "mygroup1";
|
||||
final UserGroupInformation user1 = UserGroupInformation.
|
||||
createUserForTesting(USER1, new String[] {GROUP1});
|
||||
final UserGroupInformation user2 = UserGroupInformation.
|
||||
createUserForTesting("user2", new String[] {"mygroup2"});
|
||||
MiniDFSCluster cluster = null;
|
||||
PrintStream bak = null;
|
||||
try {
|
||||
final Configuration conf = new HdfsConfiguration();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||
cluster.waitActive();
|
||||
|
||||
final FileSystem fs = cluster.getFileSystem();
|
||||
fs.setOwner(new Path("/"), USER1, GROUP1);
|
||||
bak = System.err;
|
||||
|
||||
final FsShell fshell = new FsShell(conf);
|
||||
final ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
System.setErr(new PrintStream(out));
|
||||
|
||||
// mkdir foo as user1
|
||||
user1.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
final int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-mkdir", "/foo"});
|
||||
assertEquals("Return should be 0", 0, ret);
|
||||
out.reset();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
user1.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
// Give access to "other"
|
||||
final int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-chmod", "707", "/foo"});
|
||||
assertEquals("Return should be 0", 0, ret);
|
||||
out.reset();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
// No permission to write xattr for non-owning user (user2).
|
||||
user2.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
final int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
|
||||
assertEquals("Returned should be 1", 1, ret);
|
||||
final String str = out.toString();
|
||||
assertTrue("Permission denied printed",
|
||||
str.indexOf("Permission denied") != -1);
|
||||
out.reset();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
// But there should be permission to write xattr for
|
||||
// the owning user.
|
||||
user1.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
final int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
|
||||
assertEquals("Returned should be 0", 0, ret);
|
||||
out.reset();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
// There should be permission to read,but not to remove for
|
||||
// non-owning user (user2).
|
||||
user2.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
// Read
|
||||
int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-getfattr", "-n", "user.a1", "/foo"});
|
||||
assertEquals("Returned should be 0", 0, ret);
|
||||
out.reset();
|
||||
// Remove
|
||||
ret = ToolRunner.run(fshell, new String[]{
|
||||
"-setfattr", "-x", "user.a1", "/foo"});
|
||||
assertEquals("Returned should be 1", 1, ret);
|
||||
final String str = out.toString();
|
||||
assertTrue("Permission denied printed",
|
||||
str.indexOf("Permission denied") != -1);
|
||||
out.reset();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
// But there should be permission to read/remove for
|
||||
// the owning user.
|
||||
user1.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
// Read
|
||||
int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-getfattr", "-n", "user.a1", "/foo"});
|
||||
assertEquals("Returned should be 0", 0, ret);
|
||||
out.reset();
|
||||
// Remove
|
||||
ret = ToolRunner.run(fshell, new String[]{
|
||||
"-setfattr", "-x", "user.a1", "/foo"});
|
||||
assertEquals("Returned should be 0", 0, ret);
|
||||
out.reset();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
} finally {
|
||||
if (bak != null) {
|
||||
System.setErr(bak);
|
||||
}
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that the server trash configuration is respected when
|
||||
|
|
|
@ -73,6 +73,7 @@ public class TestSafeMode {
|
|||
conf = new HdfsConfiguration();
|
||||
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||
cluster.waitActive();
|
||||
fs = cluster.getFileSystem();
|
||||
|
@ -381,7 +382,19 @@ public class TestSafeMode {
|
|||
public void run(FileSystem fs) throws IOException {
|
||||
fs.setAcl(file1, Lists.<AclEntry>newArrayList());
|
||||
}});
|
||||
|
||||
|
||||
runFsFun("setXAttr while in SM", new FSRun() {
|
||||
@Override
|
||||
public void run(FileSystem fs) throws IOException {
|
||||
fs.setXAttr(file1, "user.a1", null);
|
||||
}});
|
||||
|
||||
runFsFun("removeXAttr while in SM", new FSRun() {
|
||||
@Override
|
||||
public void run(FileSystem fs) throws IOException {
|
||||
fs.removeXAttr(file1, "user.a1");
|
||||
}});
|
||||
|
||||
try {
|
||||
DFSTestUtil.readFile(fs, file1);
|
||||
} catch (IOException ioe) {
|
||||
|
|
|
@ -0,0 +1,475 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
/**
|
||||
* Tests NameNode interaction for all XAttr APIs.
|
||||
* This test suite covers restarting the NN, saving a new checkpoint.
|
||||
*/
|
||||
public class FSXAttrBaseTest {
|
||||
|
||||
private static final int MAX_SIZE = 16;
|
||||
|
||||
protected static MiniDFSCluster dfsCluster;
|
||||
protected static Configuration conf;
|
||||
private static int pathCount = 0;
|
||||
private static Path path;
|
||||
|
||||
// XAttrs
|
||||
protected static final String name1 = "user.a1";
|
||||
protected static final byte[] value1 = {0x31, 0x32, 0x33};
|
||||
protected static final byte[] newValue1 = {0x31, 0x31, 0x31};
|
||||
protected static final String name2 = "user.a2";
|
||||
protected static final byte[] value2 = {0x37, 0x38, 0x39};
|
||||
protected static final String name3 = "user.a3";
|
||||
protected static final String name4 = "user.a4";
|
||||
|
||||
protected FileSystem fs;
|
||||
|
||||
@BeforeClass
|
||||
public static void init() throws Exception {
|
||||
conf = new HdfsConfiguration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 3);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, MAX_SIZE);
|
||||
initCluster(true);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void shutdown() {
|
||||
if (dfsCluster != null) {
|
||||
dfsCluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
pathCount += 1;
|
||||
path = new Path("/p" + pathCount);
|
||||
initFileSystem();
|
||||
}
|
||||
|
||||
@After
|
||||
public void destroyFileSystems() {
|
||||
IOUtils.cleanup(null, fs);
|
||||
fs = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests for creating xattr
|
||||
* 1. Create an xattr using XAttrSetFlag.CREATE.
|
||||
* 2. Create an xattr which already exists and expect an exception.
|
||||
* 3. Create multiple xattrs.
|
||||
* 4. Restart NN and save checkpoint scenarios.
|
||||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testCreateXAttr() throws Exception {
|
||||
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 0);
|
||||
|
||||
// Create xattr which already exists.
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
try {
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
Assert.fail("Creating xattr which already exists should fail.");
|
||||
} catch (IOException e) {
|
||||
}
|
||||
fs.removeXAttr(path, name1);
|
||||
|
||||
// Create two xattrs
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, null, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
|
||||
|
||||
restart(false);
|
||||
initFileSystem();
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
|
||||
|
||||
restart(true);
|
||||
initFileSystem();
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests for replacing xattr
|
||||
* 1. Replace an xattr using XAttrSetFlag.REPLACE.
|
||||
* 2. Replace an xattr which doesn't exist and expect an exception.
|
||||
* 3. Create multiple xattrs and replace some.
|
||||
* 4. Restart NN and save checkpoint scenarios.
|
||||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testReplaceXAttr() throws Exception {
|
||||
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE));
|
||||
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(newValue1, xattrs.get(name1));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
|
||||
// Replace xattr which does not exist.
|
||||
try {
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.REPLACE));
|
||||
Assert.fail("Replacing xattr which does not exist should fail.");
|
||||
} catch (IOException e) {
|
||||
}
|
||||
|
||||
// Create two xattrs, then replace one
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, null, EnumSet.of(XAttrSetFlag.REPLACE));
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
|
||||
|
||||
restart(false);
|
||||
initFileSystem();
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
|
||||
|
||||
restart(true);
|
||||
initFileSystem();
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests for setting xattr
|
||||
* 1. Set xattr with XAttrSetFlag.CREATE|XAttrSetFlag.REPLACE flag.
|
||||
* 2. Set xattr with illegal name.
|
||||
* 3. Set xattr without XAttrSetFlag.
|
||||
* 4. Set xattr and total number exceeds max limit.
|
||||
* 5. Set xattr and name is too long.
|
||||
* 6. Set xattr and value is too long.
|
||||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testSetXAttr() throws Exception {
|
||||
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE,
|
||||
XAttrSetFlag.REPLACE));
|
||||
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
fs.removeXAttr(path, name1);
|
||||
|
||||
// Set xattr with null name
|
||||
try {
|
||||
fs.setXAttr(path, null, value1, EnumSet.of(XAttrSetFlag.CREATE,
|
||||
XAttrSetFlag.REPLACE));
|
||||
Assert.fail("Setting xattr with null name should fail.");
|
||||
} catch (NullPointerException e) {
|
||||
GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e);
|
||||
} catch (RemoteException e) {
|
||||
GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e);
|
||||
}
|
||||
|
||||
// Set xattr with empty name: "user."
|
||||
try {
|
||||
fs.setXAttr(path, "user.", value1, EnumSet.of(XAttrSetFlag.CREATE,
|
||||
XAttrSetFlag.REPLACE));
|
||||
Assert.fail("Setting xattr with empty name should fail.");
|
||||
} catch (HadoopIllegalArgumentException e) {
|
||||
GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
|
||||
} catch (IllegalArgumentException e) {
|
||||
GenericTestUtils.assertExceptionContains("Invalid value: \"user.\" does " +
|
||||
"not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e);
|
||||
}
|
||||
|
||||
// Set xattr with invalid name: "a1"
|
||||
try {
|
||||
fs.setXAttr(path, "a1", value1, EnumSet.of(XAttrSetFlag.CREATE,
|
||||
XAttrSetFlag.REPLACE));
|
||||
Assert.fail("Setting xattr with invalid name prefix or without " +
|
||||
"name prefix should fail.");
|
||||
} catch (HadoopIllegalArgumentException e) {
|
||||
GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
|
||||
} catch (IllegalArgumentException e) {
|
||||
GenericTestUtils.assertExceptionContains("Invalid value: \"a1\" does " +
|
||||
"not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e);
|
||||
}
|
||||
|
||||
// Set xattr without XAttrSetFlag
|
||||
fs.setXAttr(path, name1, value1);
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
fs.removeXAttr(path, name1);
|
||||
|
||||
// XAttr exists, and replace it using CREATE|REPLACE flag.
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.CREATE,
|
||||
XAttrSetFlag.REPLACE));
|
||||
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(newValue1, xattrs.get(name1));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
|
||||
// Total number exceeds max limit
|
||||
fs.setXAttr(path, name1, value1);
|
||||
fs.setXAttr(path, name2, value2);
|
||||
fs.setXAttr(path, name3, null);
|
||||
try {
|
||||
fs.setXAttr(path, name4, null);
|
||||
Assert.fail("Setting xattr should fail if total number of xattrs " +
|
||||
"for inode exceeds max limit.");
|
||||
} catch (IOException e) {
|
||||
GenericTestUtils.assertExceptionContains("Cannot add additional XAttr", e);
|
||||
}
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
fs.removeXAttr(path, name3);
|
||||
|
||||
// Name length exceeds max limit
|
||||
String longName = "user.0123456789abcdefX";
|
||||
try {
|
||||
fs.setXAttr(path, longName, null);
|
||||
Assert.fail("Setting xattr should fail if name is too long.");
|
||||
} catch (IOException e) {
|
||||
GenericTestUtils.assertExceptionContains("XAttr is too big", e);
|
||||
GenericTestUtils.assertExceptionContains("total size is 17", e);
|
||||
}
|
||||
|
||||
// Value length exceeds max limit
|
||||
byte[] longValue = new byte[MAX_SIZE];
|
||||
try {
|
||||
fs.setXAttr(path, "user.a", longValue);
|
||||
Assert.fail("Setting xattr should fail if value is too long.");
|
||||
} catch (IOException e) {
|
||||
GenericTestUtils.assertExceptionContains("XAttr is too big", e);
|
||||
GenericTestUtils.assertExceptionContains("total size is 17", e);
|
||||
}
|
||||
|
||||
// Name + value exactly equal the limit
|
||||
String name = "user.111";
|
||||
byte[] value = new byte[MAX_SIZE-3];
|
||||
fs.setXAttr(path, name, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests for getting xattr
|
||||
* 1. To get xattr which does not exist.
|
||||
* 2. To get multiple xattrs.
|
||||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testGetXAttrs() throws Exception {
|
||||
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
|
||||
// XAttr does not exist.
|
||||
byte[] value = fs.getXAttr(path, name3);
|
||||
Assert.assertEquals(value, null);
|
||||
|
||||
List<String> names = Lists.newArrayList();
|
||||
names.add(name1);
|
||||
names.add(name2);
|
||||
names.add(name3);
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(path, names);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests for removing xattr
|
||||
* 1. Remove xattr.
|
||||
* 2. Restart NN and save checkpoint scenarios.
|
||||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testRemoveXAttr() throws Exception {
|
||||
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
|
||||
|
||||
restart(false);
|
||||
initFileSystem();
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
|
||||
|
||||
restart(true);
|
||||
initFileSystem();
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
|
||||
|
||||
fs.removeXAttr(path, name3);
|
||||
}
|
||||
|
||||
/**
|
||||
* Steps:
|
||||
* 1) Set xattrs on a file.
|
||||
* 2) Remove xattrs from that file.
|
||||
* 3) Save a checkpoint and restart NN.
|
||||
* 4) Set xattrs again on the same file.
|
||||
* 5) Remove xattrs from that file.
|
||||
* 6) Restart NN without saving a checkpoint.
|
||||
* 7) Set xattrs again on the same file.
|
||||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testCleanupXAttrs() throws Exception {
|
||||
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
|
||||
restart(true);
|
||||
initFileSystem();
|
||||
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
|
||||
restart(false);
|
||||
initFileSystem();
|
||||
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a FileSystem for the super-user.
|
||||
*
|
||||
* @return FileSystem for super-user
|
||||
* @throws Exception if creation fails
|
||||
*/
|
||||
protected FileSystem createFileSystem() throws Exception {
|
||||
return dfsCluster.getFileSystem();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes all FileSystem instances used in the tests.
|
||||
*
|
||||
* @throws Exception if initialization fails
|
||||
*/
|
||||
private void initFileSystem() throws Exception {
|
||||
fs = createFileSystem();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the cluster, wait for it to become active, and get FileSystem
|
||||
* instances for our test users.
|
||||
*
|
||||
* @param format if true, format the NameNode and DataNodes before starting up
|
||||
* @throws Exception if any step fails
|
||||
*/
|
||||
protected static void initCluster(boolean format) throws Exception {
|
||||
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
|
||||
.build();
|
||||
dfsCluster.waitActive();
|
||||
}
|
||||
|
||||
/**
|
||||
* Restart the cluster, optionally saving a new checkpoint.
|
||||
*
|
||||
* @param checkpoint boolean true to save a new checkpoint
|
||||
* @throws Exception if restart fails
|
||||
*/
|
||||
protected static void restart(boolean checkpoint) throws Exception {
|
||||
NameNode nameNode = dfsCluster.getNameNode();
|
||||
if (checkpoint) {
|
||||
NameNodeAdapter.enterSafeMode(nameNode, false);
|
||||
NameNodeAdapter.saveNamespace(nameNode);
|
||||
}
|
||||
shutdown();
|
||||
initCluster(false);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.EnumSet;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* 1) save xattrs, restart NN, assert xattrs reloaded from edit log,
|
||||
* 2) save xattrs, create new checkpoint, restart NN, assert xattrs
|
||||
* reloaded from fsimage
|
||||
*/
|
||||
public class TestFSImageWithXAttr {
|
||||
private static Configuration conf;
|
||||
private static MiniDFSCluster cluster;
|
||||
|
||||
//xattrs
|
||||
private static final String name1 = "user.a1";
|
||||
private static final byte[] value1 = {0x31, 0x32, 0x33};
|
||||
private static final byte[] newValue1 = {0x31, 0x31, 0x31};
|
||||
private static final String name2 = "user.a2";
|
||||
private static final byte[] value2 = {0x37, 0x38, 0x39};
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws IOException {
|
||||
conf = new Configuration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||
cluster.waitActive();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
private void testXAttr(boolean persistNamespace) throws IOException {
|
||||
Path path = new Path("/p");
|
||||
DistributedFileSystem fs = cluster.getFileSystem();
|
||||
fs.create(path).close();
|
||||
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
|
||||
restart(fs, persistNamespace);
|
||||
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE));
|
||||
|
||||
restart(fs, persistNamespace);
|
||||
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(newValue1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
|
||||
restart(fs, persistNamespace);
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPersistXAttr() throws IOException {
|
||||
testXAttr(true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testXAttrEditLog() throws IOException {
|
||||
testXAttr(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Restart the NameNode, optionally saving a new checkpoint.
|
||||
*
|
||||
* @param fs DistributedFileSystem used for saving namespace
|
||||
* @param persistNamespace boolean true to save a new checkpoint
|
||||
* @throws IOException if restart fails
|
||||
*/
|
||||
private void restart(DistributedFileSystem fs, boolean persistNamespace)
|
||||
throws IOException {
|
||||
if (persistNamespace) {
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
}
|
||||
|
||||
cluster.restartNameNode();
|
||||
cluster.waitActive();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,96 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
/**
|
||||
* Tests of XAttr operations using FileContext APIs.
|
||||
*/
|
||||
public class TestFileContextXAttr extends FSXAttrBaseTest {
|
||||
|
||||
@Override
|
||||
protected FileSystem createFileSystem() throws Exception {
|
||||
FileContextFS fcFs = new FileContextFS();
|
||||
fcFs.initialize(FileSystem.getDefaultUri(conf), conf);
|
||||
return fcFs;
|
||||
}
|
||||
|
||||
/**
|
||||
* This reuses FSXAttrBaseTest's testcases by creating a filesystem
|
||||
* implementation which uses FileContext by only overriding the xattr related
|
||||
* methods. Other operations will use the normal filesystem.
|
||||
*/
|
||||
public static class FileContextFS extends DistributedFileSystem {
|
||||
|
||||
private FileContext fc;
|
||||
|
||||
@Override
|
||||
public void initialize(URI uri, Configuration conf) throws IOException {
|
||||
super.initialize(uri, conf);
|
||||
fc = FileContext.getFileContext(conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, final String name, final byte[] value)
|
||||
throws IOException {
|
||||
fc.setXAttr(path, name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, final String name, final byte[] value,
|
||||
final EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
fc.setXAttr(path, name, value, flag);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getXAttr(Path path, final String name) throws IOException {
|
||||
return fc.getXAttr(path, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
return fc.getXAttrs(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path, final List<String> names)
|
||||
throws IOException {
|
||||
return fc.getXAttrs(path, names);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(Path path, final String name) throws IOException {
|
||||
fc.removeXAttr(path, name);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -46,6 +46,7 @@ import org.apache.hadoop.fs.Options.Rename;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.DFSClient;
|
||||
|
@ -67,6 +68,8 @@ import org.apache.hadoop.util.Time;
|
|||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
public class TestINodeFile {
|
||||
public static final Log LOG = LogFactory.getLog(TestINodeFile.class);
|
||||
|
||||
|
@ -1077,4 +1080,22 @@ public class TestINodeFile {
|
|||
file.toCompleteFile(Time.now());
|
||||
assertFalse(file.isUnderConstruction());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testXAttrFeature() {
|
||||
replication = 3;
|
||||
preferredBlockSize = 128*1024*1024;
|
||||
INodeFile inf = createINodeFile(replication, preferredBlockSize);
|
||||
ImmutableList.Builder<XAttr> builder = new ImmutableList.Builder<XAttr>();
|
||||
XAttr xAttr = new XAttr.Builder().setNameSpace(XAttr.NameSpace.USER).
|
||||
setName("a1").setValue(new byte[]{0x31, 0x32, 0x33}).build();
|
||||
builder.add(xAttr);
|
||||
XAttrFeature f = new XAttrFeature(builder.build());
|
||||
inf.addXAttrFeature(f);
|
||||
XAttrFeature f1 = inf.getXAttrFeature();
|
||||
assertEquals(xAttr, f1.getXAttrs().get(0));
|
||||
inf.removeXAttrFeature();
|
||||
f1 = inf.getXAttrFeature();
|
||||
assertEquals(f1, null);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* Tests NameNode interaction for all XAttr APIs.
|
||||
* This test suite covers restarting NN, saving new checkpoint,
|
||||
* and also includes test of xattrs for symlinks.
|
||||
*/
|
||||
public class TestNameNodeXAttr extends FSXAttrBaseTest {
|
||||
|
||||
private static final Path linkParent = new Path("/symdir1");
|
||||
private static final Path targetParent = new Path("/symdir2");
|
||||
private static final Path link = new Path(linkParent, "link");
|
||||
private static final Path target = new Path(targetParent, "target");
|
||||
|
||||
@Test(timeout = 120000)
|
||||
public void testXAttrSymlinks() throws Exception {
|
||||
fs.mkdirs(linkParent);
|
||||
fs.mkdirs(targetParent);
|
||||
DFSTestUtil.createFile(fs, target, 1024, (short)3, 0xBEEFl);
|
||||
fs.createSymlink(target, link, false);
|
||||
|
||||
fs.setXAttr(target, name1, value1);
|
||||
fs.setXAttr(target, name2, value2);
|
||||
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(link);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
fs.setXAttr(link, name3, null);
|
||||
xattrs = fs.getXAttrs(target);
|
||||
Assert.assertEquals(xattrs.size(), 3);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
|
||||
|
||||
fs.removeXAttr(link, name1);
|
||||
xattrs = fs.getXAttrs(target);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
|
||||
|
||||
fs.removeXAttr(target, name3);
|
||||
xattrs = fs.getXAttrs(link);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
fs.delete(linkParent, true);
|
||||
fs.delete(targetParent, true);
|
||||
}
|
||||
}
|
|
@ -415,7 +415,7 @@ public class TestNamenodeRetryCache {
|
|||
|
||||
LightWeightCache<CacheEntry, CacheEntry> cacheSet =
|
||||
(LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
|
||||
assertEquals(20, cacheSet.size());
|
||||
assertEquals(22, cacheSet.size());
|
||||
|
||||
Map<CacheEntry, CacheEntry> oldEntries =
|
||||
new HashMap<CacheEntry, CacheEntry>();
|
||||
|
@ -434,7 +434,7 @@ public class TestNamenodeRetryCache {
|
|||
assertTrue(namesystem.hasRetryCache());
|
||||
cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
|
||||
.getRetryCache().getCacheSet();
|
||||
assertEquals(20, cacheSet.size());
|
||||
assertEquals(22, cacheSet.size());
|
||||
iter = cacheSet.iterator();
|
||||
while (iter.hasNext()) {
|
||||
CacheEntry entry = iter.next();
|
||||
|
|
|
@ -620,4 +620,68 @@ public class TestStartup {
|
|||
fileSys.delete(name, true);
|
||||
assertTrue(!fileSys.exists(name));
|
||||
}
|
||||
|
||||
|
||||
@Test(timeout = 120000)
|
||||
public void testXattrConfiguration() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster = null;
|
||||
|
||||
try {
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, -1);
|
||||
cluster =
|
||||
new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
|
||||
fail("Expected exception with negative xattr size");
|
||||
} catch (IllegalArgumentException e) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"Cannot set a negative value for the maximum size of an xattr", e);
|
||||
} finally {
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, -1);
|
||||
cluster =
|
||||
new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
|
||||
fail("Expected exception with negative # xattrs per inode");
|
||||
} catch (IllegalArgumentException e) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"Cannot set a negative limit on the number of xattrs per inode", e);
|
||||
} finally {
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
// Set up a logger to check log message
|
||||
final LogVerificationAppender appender = new LogVerificationAppender();
|
||||
final Logger logger = Logger.getRootLogger();
|
||||
logger.addAppender(appender);
|
||||
int count = appender.countLinesWithMessage(
|
||||
"Maximum size of an xattr: 0 (unlimited)");
|
||||
assertEquals("Expected no messages about unlimited xattr size", 0, count);
|
||||
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, 0);
|
||||
cluster =
|
||||
new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
|
||||
|
||||
count = appender.countLinesWithMessage(
|
||||
"Maximum size of an xattr: 0 (unlimited)");
|
||||
// happens twice because we format then run
|
||||
assertEquals("Expected unlimited xattr size", 2, count);
|
||||
} finally {
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,150 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
/**
|
||||
* Tests that the configuration flag that controls support for XAttrs is off
|
||||
* and causes all attempted operations related to XAttrs to fail. The
|
||||
* NameNode can still load XAttrs from fsimage or edits.
|
||||
*/
|
||||
public class TestXAttrConfigFlag {
|
||||
private static final Path PATH = new Path("/path");
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
private DistributedFileSystem fs;
|
||||
|
||||
@Rule
|
||||
public ExpectedException exception = ExpectedException.none();
|
||||
|
||||
@After
|
||||
public void shutdown() throws Exception {
|
||||
IOUtils.cleanup(null, fs);
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetXAttr() throws Exception {
|
||||
initCluster(true, false);
|
||||
fs.mkdirs(PATH);
|
||||
expectException();
|
||||
fs.setXAttr(PATH, "user.foo", null);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetXAttrs() throws Exception {
|
||||
initCluster(true, false);
|
||||
fs.mkdirs(PATH);
|
||||
expectException();
|
||||
fs.getXAttrs(PATH);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemoveXAttr() throws Exception {
|
||||
initCluster(true, false);
|
||||
fs.mkdirs(PATH);
|
||||
expectException();
|
||||
fs.removeXAttr(PATH, "user.foo");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEditLog() throws Exception {
|
||||
// With XAttrs enabled, set an XAttr.
|
||||
initCluster(true, true);
|
||||
fs.mkdirs(PATH);
|
||||
fs.setXAttr(PATH, "user.foo", null);
|
||||
|
||||
// Restart with XAttrs disabled. Expect successful restart.
|
||||
restart(false, false);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFsImage() throws Exception {
|
||||
// With XAttrs enabled, set an XAttr.
|
||||
initCluster(true, true);
|
||||
fs.mkdirs(PATH);
|
||||
fs.setXAttr(PATH, "user.foo", null);
|
||||
|
||||
// Save a new checkpoint and restart with XAttrs still enabled.
|
||||
restart(true, true);
|
||||
|
||||
// Restart with XAttrs disabled. Expect successful restart.
|
||||
restart(false, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* We expect an IOException, and we want the exception text to state the
|
||||
* configuration key that controls XAttr support.
|
||||
*/
|
||||
private void expectException() {
|
||||
exception.expect(IOException.class);
|
||||
exception.expectMessage(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the cluster, wait for it to become active, and get FileSystem.
|
||||
*
|
||||
* @param format if true, format the NameNode and DataNodes before starting up
|
||||
* @param xattrsEnabled if true, XAttr support is enabled
|
||||
* @throws Exception if any step fails
|
||||
*/
|
||||
private void initCluster(boolean format, boolean xattrsEnabled)
|
||||
throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
// not explicitly setting to false, should be false by default
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, xattrsEnabled);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
fs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
/**
|
||||
* Restart the cluster, optionally saving a new checkpoint.
|
||||
*
|
||||
* @param checkpoint boolean true to save a new checkpoint
|
||||
* @param xattrsEnabled if true, XAttr support is enabled
|
||||
* @throws Exception if restart fails
|
||||
*/
|
||||
private void restart(boolean checkpoint, boolean xattrsEnabled)
|
||||
throws Exception {
|
||||
NameNode nameNode = cluster.getNameNode();
|
||||
if (checkpoint) {
|
||||
NameNodeAdapter.enterSafeMode(nameNode, false);
|
||||
NameNodeAdapter.saveNamespace(nameNode);
|
||||
}
|
||||
shutdown();
|
||||
initCluster(false, xattrsEnabled);
|
||||
}
|
||||
}
|
|
@ -33,6 +33,7 @@ import java.util.HashSet;
|
|||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -45,6 +46,7 @@ import org.apache.hadoop.fs.FileStatus;
|
|||
import org.apache.hadoop.fs.Options.Rename;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSClient;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
|
@ -126,6 +128,7 @@ public class TestRetryCacheWithHA {
|
|||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.nnTopology(MiniDFSNNTopology.simpleHATopology())
|
||||
.numDataNodes(DataNodes).build();
|
||||
|
@ -157,7 +160,7 @@ public class TestRetryCacheWithHA {
|
|||
FSNamesystem fsn0 = cluster.getNamesystem(0);
|
||||
LightWeightCache<CacheEntry, CacheEntry> cacheSet =
|
||||
(LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
|
||||
assertEquals(20, cacheSet.size());
|
||||
assertEquals(22, cacheSet.size());
|
||||
|
||||
Map<CacheEntry, CacheEntry> oldEntries =
|
||||
new HashMap<CacheEntry, CacheEntry>();
|
||||
|
@ -178,7 +181,7 @@ public class TestRetryCacheWithHA {
|
|||
FSNamesystem fsn1 = cluster.getNamesystem(1);
|
||||
cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
|
||||
.getRetryCache().getCacheSet();
|
||||
assertEquals(20, cacheSet.size());
|
||||
assertEquals(22, cacheSet.size());
|
||||
iter = cacheSet.iterator();
|
||||
while (iter.hasNext()) {
|
||||
CacheEntry entry = iter.next();
|
||||
|
@ -1001,6 +1004,48 @@ public class TestRetryCacheWithHA {
|
|||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/** setXAttr */
|
||||
class SetXAttrOp extends AtMostOnceOp {
|
||||
private final String src;
|
||||
|
||||
SetXAttrOp(DFSClient client, String src) {
|
||||
super("setXAttr", client);
|
||||
this.src = src;
|
||||
}
|
||||
|
||||
@Override
|
||||
void prepare() throws Exception {
|
||||
Path p = new Path(src);
|
||||
if (!dfs.exists(p)) {
|
||||
DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
void invoke() throws Exception {
|
||||
client.setXAttr(src, "user.key", "value".getBytes(),
|
||||
EnumSet.of(XAttrSetFlag.CREATE));
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean checkNamenodeBeforeReturn() throws Exception {
|
||||
for (int i = 0; i < CHECKTIMES; i++) {
|
||||
Map<String, byte[]> iter = dfs.getXAttrs(new Path(src));
|
||||
Set<String> keySet = iter.keySet();
|
||||
if (keySet.contains("user.key")) {
|
||||
return true;
|
||||
}
|
||||
Thread.sleep(1000);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
Object getResult() {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Test (timeout=60000)
|
||||
public void testCreateSnapshot() throws Exception {
|
||||
|
@ -1130,6 +1175,13 @@ public class TestRetryCacheWithHA {
|
|||
AtMostOnceOp op = new RemoveCachePoolOp(client, "pool");
|
||||
testClientRetryWithFailover(op);
|
||||
}
|
||||
|
||||
@Test (timeout=60000)
|
||||
public void testSetXAttr() throws Exception {
|
||||
DFSClient client = genClientWithDummyHandler();
|
||||
AtMostOnceOp op = new SetXAttrOp(client, "/setxattr");
|
||||
testClientRetryWithFailover(op);
|
||||
}
|
||||
|
||||
/**
|
||||
* When NN failover happens, if the client did not receive the response and
|
||||
|
|
|
@ -0,0 +1,114 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.ha;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.HAUtil;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* Tests interaction of XAttrs with HA failover.
|
||||
*/
|
||||
public class TestXAttrsWithHA {
|
||||
private static final Path path = new Path("/file");
|
||||
|
||||
// XAttrs
|
||||
protected static final String name1 = "user.a1";
|
||||
protected static final byte[] value1 = {0x31, 0x32, 0x33};
|
||||
protected static final byte[] newValue1 = {0x31, 0x31, 0x31};
|
||||
protected static final String name2 = "user.a2";
|
||||
protected static final byte[] value2 = {0x37, 0x38, 0x39};
|
||||
protected static final String name3 = "user.a3";
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
private NameNode nn0;
|
||||
private NameNode nn1;
|
||||
private FileSystem fs;
|
||||
|
||||
@Before
|
||||
public void setupCluster() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
|
||||
HAUtil.setAllowStandbyReads(conf, true);
|
||||
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.nnTopology(MiniDFSNNTopology.simpleHATopology())
|
||||
.numDataNodes(1)
|
||||
.waitSafeMode(false)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
|
||||
nn0 = cluster.getNameNode(0);
|
||||
nn1 = cluster.getNameNode(1);
|
||||
fs = HATestUtil.configureFailoverFs(cluster, conf);
|
||||
|
||||
cluster.transitionToActive(0);
|
||||
}
|
||||
|
||||
@After
|
||||
public void shutdownCluster() throws IOException {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that xattrs are properly tracked by the standby
|
||||
*/
|
||||
@Test(timeout = 60000)
|
||||
public void testXAttrsTrackedOnStandby() throws Exception {
|
||||
fs.create(path).close();
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
|
||||
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
|
||||
List<XAttr> xAttrs = nn1.getRpcServer().getXAttrs("/file", null);
|
||||
assertEquals(2, xAttrs.size());
|
||||
cluster.shutdownNameNode(0);
|
||||
|
||||
// Failover the current standby to active.
|
||||
cluster.shutdownNameNode(0);
|
||||
cluster.transitionToActive(1);
|
||||
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
fs.delete(path, true);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,371 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import java.util.EnumSet;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
/**
|
||||
* Tests interaction of XAttrs with snapshots.
|
||||
*/
|
||||
public class TestXAttrWithSnapshot {
|
||||
|
||||
private static MiniDFSCluster cluster;
|
||||
private static Configuration conf;
|
||||
private static DistributedFileSystem hdfs;
|
||||
private static int pathCount = 0;
|
||||
private static Path path, snapshotPath;
|
||||
private static String snapshotName;
|
||||
// XAttrs
|
||||
private static final String name1 = "user.a1";
|
||||
private static final byte[] value1 = { 0x31, 0x32, 0x33 };
|
||||
private static final byte[] newValue1 = { 0x31, 0x31, 0x31 };
|
||||
private static final String name2 = "user.a2";
|
||||
private static final byte[] value2 = { 0x37, 0x38, 0x39 };
|
||||
|
||||
@Rule
|
||||
public ExpectedException exception = ExpectedException.none();
|
||||
|
||||
@BeforeClass
|
||||
public static void init() throws Exception {
|
||||
conf = new Configuration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
|
||||
initCluster(true);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void shutdown() throws Exception {
|
||||
IOUtils.cleanup(null, hdfs);
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
++pathCount;
|
||||
path = new Path("/p" + pathCount);
|
||||
snapshotName = "snapshot" + pathCount;
|
||||
snapshotPath = new Path(path, new Path(".snapshot", snapshotName));
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests modifying xattrs on a directory that has been snapshotted
|
||||
*/
|
||||
@Test (timeout = 120000)
|
||||
public void testModifyReadsCurrentState() throws Exception {
|
||||
// Init
|
||||
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
|
||||
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
|
||||
hdfs.setXAttr(path, name1, value1);
|
||||
hdfs.setXAttr(path, name2, value2);
|
||||
|
||||
// Verify that current path reflects xattrs, snapshot doesn't
|
||||
Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
|
||||
assertEquals(xattrs.size(), 2);
|
||||
assertArrayEquals(value1, xattrs.get(name1));
|
||||
assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
xattrs = hdfs.getXAttrs(snapshotPath);
|
||||
assertEquals(xattrs.size(), 0);
|
||||
|
||||
// Modify each xattr and make sure it's reflected
|
||||
hdfs.setXAttr(path, name1, value2, EnumSet.of(XAttrSetFlag.REPLACE));
|
||||
xattrs = hdfs.getXAttrs(path);
|
||||
assertEquals(xattrs.size(), 2);
|
||||
assertArrayEquals(value2, xattrs.get(name1));
|
||||
assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
hdfs.setXAttr(path, name2, value1, EnumSet.of(XAttrSetFlag.REPLACE));
|
||||
xattrs = hdfs.getXAttrs(path);
|
||||
assertEquals(xattrs.size(), 2);
|
||||
assertArrayEquals(value2, xattrs.get(name1));
|
||||
assertArrayEquals(value1, xattrs.get(name2));
|
||||
|
||||
// Paranoia checks
|
||||
xattrs = hdfs.getXAttrs(snapshotPath);
|
||||
assertEquals(xattrs.size(), 0);
|
||||
|
||||
hdfs.removeXAttr(path, name1);
|
||||
hdfs.removeXAttr(path, name2);
|
||||
xattrs = hdfs.getXAttrs(path);
|
||||
assertEquals(xattrs.size(), 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests removing xattrs on a directory that has been snapshotted
|
||||
*/
|
||||
@Test (timeout = 120000)
|
||||
public void testRemoveReadsCurrentState() throws Exception {
|
||||
// Init
|
||||
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
|
||||
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
|
||||
hdfs.setXAttr(path, name1, value1);
|
||||
hdfs.setXAttr(path, name2, value2);
|
||||
|
||||
// Verify that current path reflects xattrs, snapshot doesn't
|
||||
Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
|
||||
assertEquals(xattrs.size(), 2);
|
||||
assertArrayEquals(value1, xattrs.get(name1));
|
||||
assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
xattrs = hdfs.getXAttrs(snapshotPath);
|
||||
assertEquals(xattrs.size(), 0);
|
||||
|
||||
// Remove xattrs and verify one-by-one
|
||||
hdfs.removeXAttr(path, name2);
|
||||
xattrs = hdfs.getXAttrs(path);
|
||||
assertEquals(xattrs.size(), 1);
|
||||
assertArrayEquals(value1, xattrs.get(name1));
|
||||
|
||||
hdfs.removeXAttr(path, name1);
|
||||
xattrs = hdfs.getXAttrs(path);
|
||||
assertEquals(xattrs.size(), 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* 1) Save xattrs, then create snapshot. Assert that inode of original and
|
||||
* snapshot have same xattrs. 2) Change the original xattrs, assert snapshot
|
||||
* still has old xattrs.
|
||||
*/
|
||||
@Test
|
||||
public void testXAttrForSnapshotRootAfterChange() throws Exception {
|
||||
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
|
||||
hdfs.setXAttr(path, name1, value1);
|
||||
hdfs.setXAttr(path, name2, value2);
|
||||
|
||||
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
|
||||
|
||||
// Both original and snapshot have same XAttrs.
|
||||
Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
xattrs = hdfs.getXAttrs(snapshotPath);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
// Original XAttrs have changed, but snapshot still has old XAttrs.
|
||||
hdfs.setXAttr(path, name1, newValue1);
|
||||
|
||||
doSnapshotRootChangeAssertions(path, snapshotPath);
|
||||
restart(false);
|
||||
doSnapshotRootChangeAssertions(path, snapshotPath);
|
||||
restart(true);
|
||||
doSnapshotRootChangeAssertions(path, snapshotPath);
|
||||
}
|
||||
|
||||
private static void doSnapshotRootChangeAssertions(Path path,
|
||||
Path snapshotPath) throws Exception {
|
||||
Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(newValue1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
xattrs = hdfs.getXAttrs(snapshotPath);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
}
|
||||
|
||||
/**
|
||||
* 1) Save xattrs, then create snapshot. Assert that inode of original and
|
||||
* snapshot have same xattrs. 2) Remove some original xattrs, assert snapshot
|
||||
* still has old xattrs.
|
||||
*/
|
||||
@Test
|
||||
public void testXAttrForSnapshotRootAfterRemove() throws Exception {
|
||||
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
|
||||
hdfs.setXAttr(path, name1, value1);
|
||||
hdfs.setXAttr(path, name2, value2);
|
||||
|
||||
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
|
||||
|
||||
// Both original and snapshot have same XAttrs.
|
||||
Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
xattrs = hdfs.getXAttrs(snapshotPath);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
// Original XAttrs have been removed, but snapshot still has old XAttrs.
|
||||
hdfs.removeXAttr(path, name1);
|
||||
hdfs.removeXAttr(path, name2);
|
||||
|
||||
doSnapshotRootRemovalAssertions(path, snapshotPath);
|
||||
restart(false);
|
||||
doSnapshotRootRemovalAssertions(path, snapshotPath);
|
||||
restart(true);
|
||||
doSnapshotRootRemovalAssertions(path, snapshotPath);
|
||||
}
|
||||
|
||||
private static void doSnapshotRootRemovalAssertions(Path path,
|
||||
Path snapshotPath) throws Exception {
|
||||
Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 0);
|
||||
|
||||
xattrs = hdfs.getXAttrs(snapshotPath);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
}
|
||||
|
||||
/**
|
||||
* Assert exception of setting xattr on read-only snapshot.
|
||||
*/
|
||||
@Test
|
||||
public void testSetXAttrSnapshotPath() throws Exception {
|
||||
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
|
||||
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
|
||||
exception.expect(SnapshotAccessControlException.class);
|
||||
hdfs.setXAttr(snapshotPath, name1, value1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Assert exception of setting xattr when exceeding quota.
|
||||
*/
|
||||
@Test
|
||||
public void testSetXAttrExceedsQuota() throws Exception {
|
||||
Path filePath = new Path(path, "file1");
|
||||
Path fileSnapshotPath = new Path(snapshotPath, "file1");
|
||||
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0755));
|
||||
hdfs.allowSnapshot(path);
|
||||
hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET);
|
||||
FileSystem.create(hdfs, filePath,
|
||||
FsPermission.createImmutable((short) 0600)).close();
|
||||
hdfs.setXAttr(filePath, name1, value1);
|
||||
|
||||
hdfs.createSnapshot(path, snapshotName);
|
||||
|
||||
byte[] value = hdfs.getXAttr(filePath, name1);
|
||||
Assert.assertArrayEquals(value, value1);
|
||||
|
||||
value = hdfs.getXAttr(fileSnapshotPath, name1);
|
||||
Assert.assertArrayEquals(value, value1);
|
||||
|
||||
exception.expect(NSQuotaExceededException.class);
|
||||
hdfs.setXAttr(filePath, name2, value2);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Test that an exception is thrown when adding an XAttr Feature to
|
||||
* a snapshotted path
|
||||
*/
|
||||
@Test
|
||||
public void testSetXAttrAfterSnapshotExceedsQuota() throws Exception {
|
||||
Path filePath = new Path(path, "file1");
|
||||
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0755));
|
||||
hdfs.allowSnapshot(path);
|
||||
hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET);
|
||||
FileSystem.create(hdfs, filePath,
|
||||
FsPermission.createImmutable((short) 0600)).close();
|
||||
hdfs.createSnapshot(path, snapshotName);
|
||||
// This adds an XAttr feature, which can throw an exception
|
||||
exception.expect(NSQuotaExceededException.class);
|
||||
hdfs.setXAttr(filePath, name1, value1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Assert exception of removing xattr when exceeding quota.
|
||||
*/
|
||||
@Test
|
||||
public void testRemoveXAttrExceedsQuota() throws Exception {
|
||||
Path filePath = new Path(path, "file1");
|
||||
Path fileSnapshotPath = new Path(snapshotPath, "file1");
|
||||
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0755));
|
||||
hdfs.allowSnapshot(path);
|
||||
hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET);
|
||||
FileSystem.create(hdfs, filePath,
|
||||
FsPermission.createImmutable((short) 0600)).close();
|
||||
hdfs.setXAttr(filePath, name1, value1);
|
||||
|
||||
hdfs.createSnapshot(path, snapshotName);
|
||||
|
||||
byte[] value = hdfs.getXAttr(filePath, name1);
|
||||
Assert.assertArrayEquals(value, value1);
|
||||
|
||||
value = hdfs.getXAttr(fileSnapshotPath, name1);
|
||||
Assert.assertArrayEquals(value, value1);
|
||||
|
||||
exception.expect(NSQuotaExceededException.class);
|
||||
hdfs.removeXAttr(filePath, name1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the cluster, wait for it to become active, and get FileSystem
|
||||
* instances for our test users.
|
||||
*
|
||||
* @param format if true, format the NameNode and DataNodes before starting up
|
||||
* @throws Exception if any step fails
|
||||
*/
|
||||
private static void initCluster(boolean format) throws Exception {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
hdfs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
/**
|
||||
* Restart the cluster, optionally saving a new checkpoint.
|
||||
*
|
||||
* @param checkpoint boolean true to save a new checkpoint
|
||||
* @throws Exception if restart fails
|
||||
*/
|
||||
private static void restart(boolean checkpoint) throws Exception {
|
||||
NameNode nameNode = cluster.getNameNode();
|
||||
if (checkpoint) {
|
||||
NameNodeAdapter.enterSafeMode(nameNode, false);
|
||||
NameNodeAdapter.saveNamespace(nameNode);
|
||||
}
|
||||
shutdown();
|
||||
initCluster(false);
|
||||
}
|
||||
}
|
|
@ -22,16 +22,22 @@ import static org.apache.hadoop.fs.permission.AclEntryType.*;
|
|||
import static org.apache.hadoop.fs.permission.FsAction.*;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrCodec;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||
|
@ -186,6 +192,48 @@ public class TestJsonUtil {
|
|||
JsonUtil.toJsonString(aclStatusBuilder.build()));
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testToJsonFromXAttrs() throws IOException {
|
||||
String jsonString =
|
||||
"{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," +
|
||||
"{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}";
|
||||
XAttr xAttr1 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
|
||||
setName("a1").setValue(XAttrCodec.decodeValue("0x313233")).build();
|
||||
XAttr xAttr2 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
|
||||
setName("a2").setValue(XAttrCodec.decodeValue("0x313131")).build();
|
||||
List<XAttr> xAttrs = Lists.newArrayList();
|
||||
xAttrs.add(xAttr1);
|
||||
xAttrs.add(xAttr2);
|
||||
|
||||
Assert.assertEquals(jsonString, JsonUtil.toJsonString(xAttrs,
|
||||
XAttrCodec.HEX));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testToXAttrMap() throws IOException {
|
||||
String jsonString =
|
||||
"{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," +
|
||||
"{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}";
|
||||
Map<?, ?> json = (Map<?, ?>)JSON.parse(jsonString);
|
||||
XAttr xAttr1 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
|
||||
setName("a1").setValue(XAttrCodec.decodeValue("0x313233")).build();
|
||||
XAttr xAttr2 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
|
||||
setName("a2").setValue(XAttrCodec.decodeValue("0x313131")).build();
|
||||
List<XAttr> xAttrs = Lists.newArrayList();
|
||||
xAttrs.add(xAttr1);
|
||||
xAttrs.add(xAttr2);
|
||||
Map<String, byte[]> xAttrMap = XAttrHelper.buildXAttrMap(xAttrs);
|
||||
Map<String, byte[]> parsedXAttrMap = JsonUtil.toXAttrs(json);
|
||||
|
||||
Assert.assertEquals(xAttrMap.size(), parsedXAttrMap.size());
|
||||
Iterator<Entry<String, byte[]>> iter = xAttrMap.entrySet().iterator();
|
||||
while(iter.hasNext()) {
|
||||
Entry<String, byte[]> entry = iter.next();
|
||||
Assert.assertArrayEquals(entry.getValue(),
|
||||
parsedXAttrMap.get(entry.getKey()));
|
||||
}
|
||||
}
|
||||
|
||||
private void checkDecodeFailure(Map<String, Object> map) {
|
||||
try {
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.web;
|
||||
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSXAttrBaseTest;
|
||||
|
||||
/**
|
||||
* Tests XAttr APIs via WebHDFS.
|
||||
*/
|
||||
public class TestWebHDFSXAttr extends FSXAttrBaseTest {
|
||||
/**
|
||||
* Overridden to provide a WebHdfsFileSystem wrapper for the super-user.
|
||||
*
|
||||
* @return WebHdfsFileSystem for super-user
|
||||
* @throws Exception if creation fails
|
||||
*/
|
||||
@Override
|
||||
protected WebHdfsFileSystem createFileSystem() throws Exception {
|
||||
return WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
|
||||
}
|
||||
}
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.web.resources;
|
|||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
|
@ -30,6 +31,8 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttrCodec;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
|
@ -348,6 +351,43 @@ public class TestParam {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testXAttrNameParam() {
|
||||
final XAttrNameParam p = new XAttrNameParam("user.a1");
|
||||
Assert.assertEquals(p.getXAttrName(), "user.a1");
|
||||
try {
|
||||
new XAttrNameParam("a1");
|
||||
Assert.fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
LOG.info("EXPECTED: " + e);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testXAttrValueParam() throws IOException {
|
||||
final XAttrValueParam p = new XAttrValueParam("0x313233");
|
||||
Assert.assertArrayEquals(p.getXAttrValue(),
|
||||
XAttrCodec.decodeValue("0x313233"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testXAttrEncodingParam() {
|
||||
final XAttrEncodingParam p = new XAttrEncodingParam(XAttrCodec.BASE64);
|
||||
Assert.assertEquals(p.getEncoding(), XAttrCodec.BASE64);
|
||||
final XAttrEncodingParam p1 = new XAttrEncodingParam(p.getValueString());
|
||||
Assert.assertEquals(p1.getEncoding(), XAttrCodec.BASE64);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testXAttrSetFlagParam() {
|
||||
EnumSet<XAttrSetFlag> flag = EnumSet.of(
|
||||
XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE);
|
||||
final XAttrSetFlagParam p = new XAttrSetFlagParam(flag);
|
||||
Assert.assertEquals(p.getFlag(), flag);
|
||||
final XAttrSetFlagParam p1 = new XAttrSetFlagParam(p.getValueString());
|
||||
Assert.assertEquals(p1.getFlag(), flag);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRenameOptionSetParam() {
|
||||
final RenameOptionSetParam p = new RenameOptionSetParam(
|
||||
|
|
Binary file not shown.
|
@ -1,6 +1,6 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<EDITS>
|
||||
<EDITS_VERSION>-56</EDITS_VERSION>
|
||||
<EDITS_VERSION>-57</EDITS_VERSION>
|
||||
<RECORD>
|
||||
<OPCODE>OP_START_LOG_SEGMENT</OPCODE>
|
||||
<DATA>
|
||||
|
@ -938,9 +938,34 @@
|
|||
</DATA>
|
||||
</RECORD>
|
||||
<RECORD>
|
||||
<OPCODE>OP_END_LOG_SEGMENT</OPCODE>
|
||||
<OPCODE>OP_SET_XATTR</OPCODE>
|
||||
<DATA>
|
||||
<TXID>75</TXID>
|
||||
<SRC>/file_concat_target</SRC>
|
||||
<XATTR>
|
||||
<NAMESPACE>USER</NAMESPACE>
|
||||
<NAME>a1</NAME>
|
||||
<VALUE>0x313233</VALUE>
|
||||
</XATTR>
|
||||
<RPC_CLIENTID>9b85a845-bbfa-42f6-8a16-c433614b8eb9</RPC_CLIENTID>
|
||||
<RPC_CALLID>80</RPC_CALLID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
<RECORD>
|
||||
<OPCODE>OP_REMOVE_XATTR</OPCODE>
|
||||
<DATA>
|
||||
<TXID>76</TXID>
|
||||
<SRC>/file_concat_target</SRC>
|
||||
<XATTR>
|
||||
<NAMESPACE>USER</NAMESPACE>
|
||||
<NAME>a1</NAME>
|
||||
</XATTR>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
<RECORD>
|
||||
<OPCODE>OP_END_LOG_SEGMENT</OPCODE>
|
||||
<DATA>
|
||||
<TXID>77</TXID>
|
||||
</DATA>
|
||||
</RECORD>
|
||||
</EDITS>
|
||||
|
|
|
@ -0,0 +1,409 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<?xml-stylesheet type="text/xsl" href="testConf.xsl"?>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
<configuration>
|
||||
<!-- Normal mode is test. To run just the commands and dump the output
|
||||
to the log, set it to nocompare -->
|
||||
<mode>test</mode>
|
||||
|
||||
<!-- Comparator types:
|
||||
ExactComparator
|
||||
SubstringComparator
|
||||
RegexpComparator
|
||||
TokenComparator
|
||||
-->
|
||||
<tests>
|
||||
<test>
|
||||
<description>setfattr : Add an xattr</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a1 -v 123456 /file1</command>
|
||||
<command>-fs NAMENODE -getfattr -d /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output># file: /file1</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>user.a1="123456"</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>setfattr : Add an xattr which has wrong prefix</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n uuu.a1 -v 123456 /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>name must be prefixed with user/trusted/security/system, followed by a '.'</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>setfattr : Add an xattr of trusted namespace</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n trusted.a1 -v 123456 /file1</command>
|
||||
<command>-fs NAMENODE -getfattr -d /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output># file: /file1</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>trusted.a1="123456"</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>setfattr : Add an xattr of system namespace</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n system.a1 -v 123456 /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>setfattr: User doesn't have permission for xattr: system.a1</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>setfattr : Add an xattr of security namespace</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n security.a1 -v 123456 /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>setfattr: User doesn't have permission for xattr: security.a1</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>setfattr : Add an xattr, and encode is text</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a1 -v "123456" /file1</command>
|
||||
<command>-fs NAMENODE -getfattr -d /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output># file: /file1</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>user.a1="123456"</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>setfattr : Add an xattr, and encode is hex</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a1 -v 0x313233343536 /file1</command>
|
||||
<command>-fs NAMENODE -getfattr -d /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output># file: /file1</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>user.a1="123456"</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>setfattr : Add an xattr, and encode is base64</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a1 -v 0sMTIzNDU2 /file1</command>
|
||||
<command>-fs NAMENODE -getfattr -d /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output># file: /file1</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>user.a1="123456"</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>setfattr : Add multiple xattrs</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a1 -v 123456 /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a2 -v abc /file1</command>
|
||||
<command>-fs NAMENODE -getfattr -d /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output># file: /file1</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>user.a1="123456"</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>user.a2="abc"</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>setfattr : Remove an xattr</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a1 -v 123456 /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a2 -v abc /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -x user.a1 /file1</command>
|
||||
<command>-fs NAMENODE -getfattr -d /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>ExactComparator</type>
|
||||
<expected-output># file: /file1#LF#user.a2="abc"#LF#</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>setfattr : Remove an xattr which doesn't exist</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a1 -v 123456 /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -x user.a2 /file1</command>
|
||||
<command>-fs NAMENODE -getfattr -d /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>ExactComparator</type>
|
||||
<expected-output># file: /file1#LF#user.a1="123456"#LF#</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>getfattr : Get an xattr</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a1 -v 123456 /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a2 -v abc /file1</command>
|
||||
<command>-fs NAMENODE -getfattr -n user.a1 /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output># file: /file1</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>user.a1="123456"</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>getfattr : Get an xattr which doesn't exist</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -getfattr -n user.a1 /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>ExactComparator</type>
|
||||
<expected-output># file: /file1#LF#</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>getfattr : Get an xattr, and encode is text</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a1 -v 123456 /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a2 -v abc /file1</command>
|
||||
<command>-fs NAMENODE -getfattr -n user.a1 -e text /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output># file: /file1</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>user.a1="123456"</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>getfattr : Get an xattr, and encode is hex</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a1 -v 123456 /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a2 -v abc /file1</command>
|
||||
<command>-fs NAMENODE -getfattr -n user.a1 -e hex /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output># file: /file1</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>user.a1=0x313233343536</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>getfattr : Get an xattr, and encode is base64</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a1 -v 123456 /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a2 -v abc /file1</command>
|
||||
<command>-fs NAMENODE -getfattr -n user.a1 -e base64 /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output># file: /file1</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>user.a1=0sMTIzNDU2</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>getfattr : Get an xattr, and encode is invalid</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -touchz /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a1 -v 123456 /file1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a2 -v abc /file1</command>
|
||||
<command>-fs NAMENODE -getfattr -n user.a1 -e invalid /file1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm /file1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>Invalid/unsupported encoding option specified: invalid</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test>
|
||||
<description>getfattr -R : recursive</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -mkdir /dir1</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a1 -v 123456 /dir1</command>
|
||||
<command>-fs NAMENODE -mkdir /dir1/dir2</command>
|
||||
<command>-fs NAMENODE -setfattr -n user.a2 -v abc /dir1/dir2</command>
|
||||
<command>-fs NAMENODE -getfattr -R -d /dir1</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm -R /dir1</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>ExactComparator</type>
|
||||
<expected-output># file: /dir1#LF#user.a1="123456"#LF## file: /dir1/dir2#LF#user.a2="abc"#LF#</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
</tests>
|
||||
</configuration>
|
|
@ -86,6 +86,7 @@
|
|||
<item name="Centralized Cache Management" href="hadoop-project-dist/hadoop-hdfs/CentralizedCacheManagement.html"/>
|
||||
<item name="HDFS NFS Gateway" href="hadoop-project-dist/hadoop-hdfs/HdfsNfsGateway.html"/>
|
||||
<item name="HDFS Rolling Upgrade" href="hadoop-project-dist/hadoop-hdfs/HdfsRollingUpgrade.html"/>
|
||||
<item name="Extended Attributes" href="hadoop-project-dist/hadoop-hdfs/ExtendedAttributes.html"/>
|
||||
</menu>
|
||||
|
||||
<menu name="MapReduce" inherit="top">
|
||||
|
|
Loading…
Reference in New Issue