Merge from trunk to fs-encryption branch
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1596873 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
57d3daa7c9
|
@ -1,6 +1,8 @@
|
|||
*.iml
|
||||
*.ipr
|
||||
*.iws
|
||||
*.orig
|
||||
*.rej
|
||||
.idea
|
||||
.svn
|
||||
.classpath
|
||||
|
|
|
@ -7,6 +7,8 @@ Trunk (Unreleased)
|
|||
HADOOP-8124. Remove the deprecated FSDataOutputStream constructor,
|
||||
FSDataOutputStream.sync() and Syncable.sync(). (szetszwo)
|
||||
|
||||
HADOOP-10474 Move o.a.h.record to hadoop-streaming. (wheat9)
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
HADOOP-10433. Key Management Server based on KeyProvider API. (tucu)
|
||||
|
@ -148,6 +150,8 @@ Trunk (Unreleased)
|
|||
|
||||
HADOOP-10563. Remove the dependency of jsp in trunk. (wheat9)
|
||||
|
||||
HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-9451. Fault single-layer config if node group topology is enabled.
|
||||
|
@ -336,12 +340,28 @@ Trunk (Unreleased)
|
|||
|
||||
HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
|
||||
|
||||
BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HADOOP-10520. Extended attributes definition and FileSystem APIs for
|
||||
extended attributes. (Yi Liu via wang)
|
||||
|
||||
HADOOP-10546. Javadoc and other small fixes for extended attributes in
|
||||
hadoop-common. (Charles Lamb via wang)
|
||||
|
||||
HADOOP-10521. FsShell commands for extended attributes. (Yi Liu via wang)
|
||||
|
||||
HADOOP-10548. Improve FsShell xattr error handling and other fixes. (Charles Lamb via umamahesh)
|
||||
|
||||
HADOOP-10567. Shift XAttr value encoding code out for reuse. (Yi Liu via umamahesh)
|
||||
|
||||
HADOOP-10621. Remove CRLF for xattr value base64 encoding for better display.(Yi Liu via umamahesh)
|
||||
|
||||
HADOOP-10575. Small fixes for XAttrCommands and test. (Yi Liu via umamahesh)
|
||||
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
HADOOP-10474 Move o.a.h.record to hadoop-streaming. (wheat9)
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
HADOOP-10498. Add support for proxy server. (daryn)
|
||||
|
@ -359,8 +379,6 @@ Release 2.5.0 - UNRELEASED
|
|||
|
||||
HADOOP-10104. Update jackson to 1.9.13 (Akira Ajisaka via stevel)
|
||||
|
||||
HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
|
||||
|
||||
HADOOP-10503. Move junit up to v 4.11. (cnauroth)
|
||||
|
||||
HADOOP-10535. Make the retry numbers in ActiveStandbyElector configurable.
|
||||
|
@ -390,6 +408,11 @@ Release 2.5.0 - UNRELEASED
|
|||
HADOOP-10572. Example NFS mount command must pass noacl as it isn't
|
||||
supported by the server yet. (Harsh J via brandonli)
|
||||
|
||||
HADOOP-10609. .gitignore should ignore .orig and .rej files. (kasha)
|
||||
|
||||
HADOOP-10614. CBZip2InputStream is not threadsafe (Xiangrui Meng via
|
||||
Sandy Ryza)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -460,9 +483,6 @@ Release 2.5.0 - UNRELEASED
|
|||
HADOOP-10543. RemoteException's unwrapRemoteException method failed for
|
||||
PathIOException. (Yongjun Zhang via atm)
|
||||
|
||||
HADOOP-10562. Namenode exits on exception without printing stack trace
|
||||
in AbstractDelegationTokenSecretManager. (Arpit Agarwal)
|
||||
|
||||
HADOOP-10568. Add s3 server-side encryption. (David S. Wang via atm)
|
||||
|
||||
HADOOP-10541. InputStream in MiniKdc#initKDCServer for minikdc.ldiff is not
|
||||
|
@ -481,6 +501,9 @@ Release 2.5.0 - UNRELEASED
|
|||
HADOOP-10401. ShellBasedUnixGroupsMapping#getGroups does not always return
|
||||
primary group first (Akira AJISAKA via Colin Patrick McCabe)
|
||||
|
||||
HADOOP-10489. UserGroupInformation#getTokens and UserGroupInformation
|
||||
#addToken can lead to ConcurrentModificationException (Robert Kanter via atm)
|
||||
|
||||
Release 2.4.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -513,6 +536,12 @@ Release 2.4.1 - UNRELEASED
|
|||
HADOOP-10527. Fix incorrect return code and allow more retries on EINTR.
|
||||
(kihwal)
|
||||
|
||||
HADOOP-10612. NFS failed to refresh the user group id mapping table (brandonli)
|
||||
|
||||
HADOOP-10562. Namenode exits on exception without printing stack trace
|
||||
in AbstractDelegationTokenSecretManager. (Suresh Srinivas via Arpit
|
||||
Agarwal)
|
||||
|
||||
Release 2.4.0 - 2014-04-07
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Constructor;
|
||||
|
@ -1039,6 +1038,163 @@ public abstract class AbstractFileSystem {
|
|||
+ " doesn't support getAclStatus");
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
* @param value xattr value.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void setXAttr(Path path, String name, byte[] value)
|
||||
throws IOException {
|
||||
setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE,
|
||||
XAttrSetFlag.REPLACE));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
* @param value xattr value.
|
||||
* @param flag xattr set flag
|
||||
* @throws IOException
|
||||
*/
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support setXAttr");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an xattr for a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only get an xattr for the "user" namespace.
|
||||
* The super user can get an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* An xattr will only be returned when the logged-in user has the correct permissions.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attribute
|
||||
* @param name xattr name.
|
||||
* @return byte[] xattr value.
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte[] getXAttr(Path path, String name) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttr");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the xattrs for a file or directory.
|
||||
* Only those xattrs for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattr of "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @return Map<String, byte[]> describing the XAttrs of the file or directory
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttrs");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the xattrs for a file or directory.
|
||||
* Only those xattrs for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattr of "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @param names XAttr names.
|
||||
* @return Map<String, byte[]> describing the XAttrs of the file or directory
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttrs");
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only remove an xattr for the "user" namespace.
|
||||
* The super user can remove an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to remove extended attribute
|
||||
* @param name xattr name
|
||||
* @throws IOException
|
||||
*/
|
||||
public void removeXAttr(Path path, String name) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support removeXAttr");
|
||||
}
|
||||
|
||||
@Override //Object
|
||||
public int hashCode() {
|
||||
return myUri.hashCode();
|
||||
|
|
|
@ -2294,4 +2294,194 @@ public final class FileContext {
|
|||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
* @param value xattr value.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void setXAttr(Path path, String name, byte[] value)
|
||||
throws IOException {
|
||||
setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE,
|
||||
XAttrSetFlag.REPLACE));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
* @param value xattr value.
|
||||
* @param flag xattr set flag
|
||||
* @throws IOException
|
||||
*/
|
||||
public void setXAttr(Path path, final String name, final byte[] value,
|
||||
final EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
new FSLinkResolver<Void>() {
|
||||
@Override
|
||||
public Void next(final AbstractFileSystem fs, final Path p)
|
||||
throws IOException {
|
||||
fs.setXAttr(p, name, value, flag);
|
||||
return null;
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an xattr for a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
*
|
||||
* A regular user can only get an xattr for the "user" namespace.
|
||||
* The super user can get an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* An xattr will only be returned when the logged-in user has the correct permissions.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attribute
|
||||
* @param name xattr name.
|
||||
* @return byte[] xattr value.
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte[] getXAttr(Path path, final String name) throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
return new FSLinkResolver<byte[]>() {
|
||||
@Override
|
||||
public byte[] next(final AbstractFileSystem fs, final Path p)
|
||||
throws IOException {
|
||||
return fs.getXAttr(p, name);
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the xattrs for a file or directory.
|
||||
* Only those xattrs for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattr of "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @return Map<String, byte[]> describing the XAttrs of the file or directory
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
return new FSLinkResolver<Map<String, byte[]>>() {
|
||||
@Override
|
||||
public Map<String, byte[]> next(final AbstractFileSystem fs, final Path p)
|
||||
throws IOException {
|
||||
return fs.getXAttrs(p);
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the xattrs for a file or directory.
|
||||
* Only those xattrs for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattr of "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @param names XAttr names.
|
||||
* @return Map<String, byte[]> describing the XAttrs of the file or directory
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<String, byte[]> getXAttrs(Path path, final List<String> names)
|
||||
throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
return new FSLinkResolver<Map<String, byte[]>>() {
|
||||
@Override
|
||||
public Map<String, byte[]> next(final AbstractFileSystem fs, final Path p)
|
||||
throws IOException {
|
||||
return fs.getXAttrs(p, names);
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only remove an xattr for the "user" namespace.
|
||||
* The super user can remove an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to remove extended attribute
|
||||
* @param name xattr name
|
||||
* @throws IOException
|
||||
*/
|
||||
public void removeXAttr(Path path, final String name) throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
new FSLinkResolver<Void>() {
|
||||
@Override
|
||||
public Void next(final AbstractFileSystem fs, final Path p)
|
||||
throws IOException {
|
||||
fs.removeXAttr(p, name);
|
||||
return null;
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2350,6 +2350,164 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
+ " doesn't support getAclStatus");
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
* @param value xattr value.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void setXAttr(Path path, String name, byte[] value)
|
||||
throws IOException {
|
||||
setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE,
|
||||
XAttrSetFlag.REPLACE));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
* @param value xattr value.
|
||||
* @param flag xattr set flag
|
||||
* @throws IOException
|
||||
*/
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support setXAttr");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an xattr for a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
*
|
||||
* A regular user can only get an xattr for the "user" namespace.
|
||||
* The super user can get an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* An xattr will only be returned when the logged-in user has the correct permissions.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attribute
|
||||
* @param name xattr name.
|
||||
* @return byte[] xattr value.
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte[] getXAttr(Path path, String name) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttr");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the xattrs for a file or directory.
|
||||
* Only those xattrs for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattr of "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @return Map<String, byte[]> describing the XAttrs of the file or directory
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttrs");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the xattrs for a file or directory.
|
||||
* Only those xattrs for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattr of "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @param names XAttr names.
|
||||
* @return Map<String, byte[]> describing the XAttrs of the file or directory
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support getXAttrs");
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only remove an xattr for the "user" namespace.
|
||||
* The super user can remove an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
* @param path Path to remove extended attribute
|
||||
* @param name xattr name
|
||||
* @throws IOException
|
||||
*/
|
||||
public void removeXAttr(Path path, String name) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support removeXAttr");
|
||||
}
|
||||
|
||||
// making it volatile to be able to do a double checked locking
|
||||
private volatile static boolean FILE_SYSTEMS_LOADED = false;
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.net.URI;
|
|||
import java.net.URISyntaxException;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -538,4 +539,37 @@ public class FilterFileSystem extends FileSystem {
|
|||
public AclStatus getAclStatus(Path path) throws IOException {
|
||||
return fs.getAclStatus(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, String name, byte[] value)
|
||||
throws IOException {
|
||||
fs.setXAttr(path, name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
fs.setXAttr(path, name, value, flag);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getXAttr(Path path, String name) throws IOException {
|
||||
return fs.getXAttr(path, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
return fs.getXAttrs(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException {
|
||||
return fs.getXAttrs(path, names);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(Path path, String name) throws IOException {
|
||||
fs.removeXAttr(path, name);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.net.URI;
|
|||
import java.net.URISyntaxException;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -316,4 +317,37 @@ public abstract class FilterFs extends AbstractFileSystem {
|
|||
public AclStatus getAclStatus(Path path) throws IOException {
|
||||
return myFs.getAclStatus(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, String name, byte[] value)
|
||||
throws IOException {
|
||||
myFs.setXAttr(path, name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
myFs.setXAttr(path, name, value, flag);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getXAttr(Path path, String name) throws IOException {
|
||||
return myFs.getXAttr(path, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
return myFs.getXAttrs(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException {
|
||||
return myFs.getXAttrs(path, names);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(Path path, String name) throws IOException {
|
||||
myFs.removeXAttr(path, name);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,121 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.codec.DecoderException;
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.apache.commons.codec.binary.Hex;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* The value of <code>XAttr</code> is byte[], this class is to
|
||||
* covert byte[] to some kind of string representation or convert back.
|
||||
* String representation is convenient for display and input. For example
|
||||
* display in screen as shell response and json response, input as http
|
||||
* or shell parameter.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public enum XAttrCodec {
|
||||
/**
|
||||
* Value encoded as text
|
||||
* string is enclosed in double quotes (\").
|
||||
*/
|
||||
TEXT,
|
||||
|
||||
/**
|
||||
* Value encoded as hexadecimal string
|
||||
* is prefixed with 0x.
|
||||
*/
|
||||
HEX,
|
||||
|
||||
/**
|
||||
* Value encoded as base64 string
|
||||
* is prefixed with 0s.
|
||||
*/
|
||||
BASE64;
|
||||
|
||||
private static final String HEX_PREFIX = "0x";
|
||||
private static final String BASE64_PREFIX = "0s";
|
||||
private static final Base64 base64 = new Base64(0);
|
||||
|
||||
/**
|
||||
* Decode string representation of a value and check whether it's
|
||||
* encoded. If the given string begins with 0x or 0X, it expresses
|
||||
* a hexadecimal number. If the given string begins with 0s or 0S,
|
||||
* base64 encoding is expected. If the given string is enclosed in
|
||||
* double quotes, the inner string is treated as text. Otherwise
|
||||
* the given string is treated as text.
|
||||
* @param value string representation of the value.
|
||||
* @return byte[] the value
|
||||
* @throws IOException
|
||||
*/
|
||||
public static byte[] decodeValue(String value) throws IOException {
|
||||
byte[] result = null;
|
||||
if (value != null) {
|
||||
if (value.length() >= 2) {
|
||||
String en = value.substring(0, 2);
|
||||
if (value.startsWith("\"") && value.endsWith("\"")) {
|
||||
value = value.substring(1, value.length()-1);
|
||||
result = value.getBytes("utf-8");
|
||||
} else if (en.equalsIgnoreCase(HEX_PREFIX)) {
|
||||
value = value.substring(2, value.length());
|
||||
try {
|
||||
result = Hex.decodeHex(value.toCharArray());
|
||||
} catch (DecoderException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
} else if (en.equalsIgnoreCase(BASE64_PREFIX)) {
|
||||
value = value.substring(2, value.length());
|
||||
result = base64.decode(value);
|
||||
}
|
||||
}
|
||||
if (result == null) {
|
||||
result = value.getBytes("utf-8");
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode byte[] value to string representation with encoding.
|
||||
* Values encoded as text strings are enclosed in double quotes (\"),
|
||||
* while strings encoded as hexadecimal and base64 are prefixed with
|
||||
* 0x and 0s, respectively.
|
||||
* @param value byte[] value
|
||||
* @param encoding
|
||||
* @return String string representation of value
|
||||
* @throws IOException
|
||||
*/
|
||||
public static String encodeValue(byte[] value, XAttrCodec encoding)
|
||||
throws IOException {
|
||||
Preconditions.checkNotNull(value, "Value can not be null.");
|
||||
if (encoding == HEX) {
|
||||
return HEX_PREFIX + Hex.encodeHexString(value);
|
||||
} else if (encoding == BASE64) {
|
||||
return BASE64_PREFIX + base64.encodeToString(value);
|
||||
} else {
|
||||
return "\"" + new String(value, "utf-8") + "\"";
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.EnumSet;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public enum XAttrSetFlag {
|
||||
/**
|
||||
* Create a new xattr.
|
||||
* If the xattr exists already, exception will be thrown.
|
||||
*/
|
||||
CREATE((short) 0x01),
|
||||
|
||||
/**
|
||||
* Replace a existing xattr.
|
||||
* If the xattr does not exist, exception will be thrown.
|
||||
*/
|
||||
REPLACE((short) 0x02);
|
||||
|
||||
private final short flag;
|
||||
|
||||
private XAttrSetFlag(short flag) {
|
||||
this.flag = flag;
|
||||
}
|
||||
|
||||
short getFlag() {
|
||||
return flag;
|
||||
}
|
||||
|
||||
public static void validate(String xAttrName, boolean xAttrExists,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
if (flag == null || flag.isEmpty()) {
|
||||
throw new HadoopIllegalArgumentException("A flag must be specified.");
|
||||
}
|
||||
|
||||
if (xAttrExists) {
|
||||
if (!flag.contains(REPLACE)) {
|
||||
throw new IOException("XAttr: " + xAttrName +
|
||||
" already exists. The REPLACE flag must be specified.");
|
||||
}
|
||||
} else {
|
||||
if (!flag.contains(CREATE)) {
|
||||
throw new IOException("XAttr: " + xAttrName +
|
||||
" does not exist. The CREATE flag must be specified.");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -59,6 +59,7 @@ abstract public class FsCommand extends Command {
|
|||
factory.registerCommands(Test.class);
|
||||
factory.registerCommands(Touch.class);
|
||||
factory.registerCommands(SnapshotCommands.class);
|
||||
factory.registerCommands(XAttrCommands.class);
|
||||
}
|
||||
|
||||
protected FsCommand() {}
|
||||
|
|
|
@ -0,0 +1,188 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.shell;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import com.google.common.base.Enums;
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.XAttrCodec;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
/**
|
||||
* XAttr related operations
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
class XAttrCommands extends FsCommand {
|
||||
private static final String GET_FATTR = "getfattr";
|
||||
private static final String SET_FATTR = "setfattr";
|
||||
|
||||
public static void registerCommands(CommandFactory factory) {
|
||||
factory.addClass(GetfattrCommand.class, "-" + GET_FATTR);
|
||||
factory.addClass(SetfattrCommand.class, "-" + SET_FATTR);
|
||||
}
|
||||
|
||||
/**
|
||||
* Implements the '-getfattr' command for the FsShell.
|
||||
*/
|
||||
public static class GetfattrCommand extends FsCommand {
|
||||
public static final String NAME = GET_FATTR;
|
||||
public static final String USAGE = "[-R] {-n name | -d} [-e en] <path>";
|
||||
public static final String DESCRIPTION =
|
||||
"Displays the extended attribute names and values (if any) for a " +
|
||||
"file or directory.\n" +
|
||||
"-R: Recursively list the attributes for all files and directories.\n" +
|
||||
"-n name: Dump the named extended attribute value.\n" +
|
||||
"-d: Dump all extended attribute values associated with pathname.\n" +
|
||||
"-e <encoding>: Encode values after retrieving them.\n" +
|
||||
"Valid encodings are \"text\", \"hex\", and \"base64\".\n" +
|
||||
"Values encoded as text strings are enclosed in double quotes (\"),\n" +
|
||||
" and values encoded as hexadecimal and base64 are prefixed with\n" +
|
||||
"0x and 0s, respectively.\n" +
|
||||
"<path>: The file or directory.\n";
|
||||
private final static Function<String, XAttrCodec> enValueOfFunc =
|
||||
Enums.valueOfFunction(XAttrCodec.class);
|
||||
|
||||
private String name = null;
|
||||
private boolean dump = false;
|
||||
private XAttrCodec encoding = XAttrCodec.TEXT;
|
||||
|
||||
@Override
|
||||
protected void processOptions(LinkedList<String> args) throws IOException {
|
||||
name = StringUtils.popOptionWithArgument("-n", args);
|
||||
String en = StringUtils.popOptionWithArgument("-e", args);
|
||||
if (en != null) {
|
||||
encoding = enValueOfFunc.apply(en.toUpperCase());
|
||||
Preconditions.checkArgument(encoding != null,
|
||||
"Invalid/unsupported encoding option specified: " + en);
|
||||
}
|
||||
|
||||
boolean r = StringUtils.popOption("-R", args);
|
||||
setRecursive(r);
|
||||
dump = StringUtils.popOption("-d", args);
|
||||
|
||||
if (!dump && name == null) {
|
||||
throw new HadoopIllegalArgumentException(
|
||||
"Must specify '-n name' or '-d' option.");
|
||||
}
|
||||
|
||||
if (args.isEmpty()) {
|
||||
throw new HadoopIllegalArgumentException("<path> is missing.");
|
||||
}
|
||||
if (args.size() > 1) {
|
||||
throw new HadoopIllegalArgumentException("Too many arguments.");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processPath(PathData item) throws IOException {
|
||||
out.println("# file: " + item);
|
||||
if (dump) {
|
||||
Map<String, byte[]> xattrs = item.fs.getXAttrs(item.path);
|
||||
if (xattrs != null) {
|
||||
Iterator<Entry<String, byte[]>> iter = xattrs.entrySet().iterator();
|
||||
while(iter.hasNext()) {
|
||||
Entry<String, byte[]> entry = iter.next();
|
||||
printXAttr(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
byte[] value = item.fs.getXAttr(item.path, name);
|
||||
printXAttr(name, value);
|
||||
}
|
||||
}
|
||||
|
||||
private void printXAttr(String name, byte[] value) throws IOException{
|
||||
if (value != null) {
|
||||
if (value.length != 0) {
|
||||
out.println(name + "=" + XAttrCodec.encodeValue(value, encoding));
|
||||
} else {
|
||||
out.println(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Implements the '-setfattr' command for the FsShell.
|
||||
*/
|
||||
public static class SetfattrCommand extends FsCommand {
|
||||
public static final String NAME = SET_FATTR;
|
||||
public static final String USAGE = "{-n name [-v value] | -x name} <path>";
|
||||
public static final String DESCRIPTION =
|
||||
"Sets an extended attribute name and value for a file or directory.\n" +
|
||||
"-n name: The extended attribute name.\n" +
|
||||
"-v value: The extended attribute value. There are three different\n" +
|
||||
"encoding methods for the value. If the argument is enclosed in double\n" +
|
||||
"quotes, then the value is the string inside the quotes. If the\n" +
|
||||
"argument is prefixed with 0x or 0X, then it is taken as a hexadecimal\n" +
|
||||
"number. If the argument begins with 0s or 0S, then it is taken as a\n" +
|
||||
"base64 encoding.\n" +
|
||||
"-x name: Remove the extended attribute.\n" +
|
||||
"<path>: The file or directory.\n";
|
||||
|
||||
private String name = null;
|
||||
private byte[] value = null;
|
||||
private String xname = null;
|
||||
|
||||
@Override
|
||||
protected void processOptions(LinkedList<String> args) throws IOException {
|
||||
name = StringUtils.popOptionWithArgument("-n", args);
|
||||
String v = StringUtils.popOptionWithArgument("-v", args);
|
||||
if (v != null) {
|
||||
value = XAttrCodec.decodeValue(v);
|
||||
}
|
||||
xname = StringUtils.popOptionWithArgument("-x", args);
|
||||
|
||||
if (name != null && xname != null) {
|
||||
throw new HadoopIllegalArgumentException(
|
||||
"Can not specify both '-n name' and '-x name' option.");
|
||||
}
|
||||
if (name == null && xname == null) {
|
||||
throw new HadoopIllegalArgumentException(
|
||||
"Must specify '-n name' or '-x name' option.");
|
||||
}
|
||||
|
||||
if (args.isEmpty()) {
|
||||
throw new HadoopIllegalArgumentException("<path> is missing.");
|
||||
}
|
||||
if (args.size() > 1) {
|
||||
throw new HadoopIllegalArgumentException("Too many arguments.");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processPath(PathData item) throws IOException {
|
||||
if (name != null) {
|
||||
item.fs.setXAttr(item.path, name, value);
|
||||
} else if (xname != null) {
|
||||
item.fs.removeXAttr(item.path, xname);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
|||
import java.net.URI;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -37,6 +38,7 @@ import org.apache.hadoop.fs.FilterFileSystem;
|
|||
import org.apache.hadoop.fs.FsServerDefaults;
|
||||
import org.apache.hadoop.fs.FsStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -313,6 +315,33 @@ class ChRootedFileSystem extends FilterFileSystem {
|
|||
return super.getAclStatus(fullPath(path));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
super.setXAttr(fullPath(path), name, value, flag);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getXAttr(Path path, String name) throws IOException {
|
||||
return super.getXAttr(fullPath(path), name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
return super.getXAttrs(fullPath(path));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException {
|
||||
return super.getXAttrs(fullPath(path), names);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(Path path, String name) throws IOException {
|
||||
super.removeXAttr(fullPath(path), name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Path resolvePath(final Path p) throws IOException {
|
||||
return super.resolvePath(fullPath(p));
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.util.Arrays;
|
|||
import java.util.EnumSet;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
|
@ -46,6 +47,7 @@ import org.apache.hadoop.fs.FsConstants;
|
|||
import org.apache.hadoop.fs.FsServerDefaults;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.UnsupportedFileSystemException;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -519,6 +521,43 @@ public class ViewFileSystem extends FileSystem {
|
|||
return res.targetFileSystem.getAclStatus(res.remainingPath);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
InodeTree.ResolveResult<FileSystem> res =
|
||||
fsState.resolve(getUriPath(path), true);
|
||||
res.targetFileSystem.setXAttr(res.remainingPath, name, value, flag);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getXAttr(Path path, String name) throws IOException {
|
||||
InodeTree.ResolveResult<FileSystem> res =
|
||||
fsState.resolve(getUriPath(path), true);
|
||||
return res.targetFileSystem.getXAttr(res.remainingPath, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
InodeTree.ResolveResult<FileSystem> res =
|
||||
fsState.resolve(getUriPath(path), true);
|
||||
return res.targetFileSystem.getXAttrs(res.remainingPath);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException {
|
||||
InodeTree.ResolveResult<FileSystem> res =
|
||||
fsState.resolve(getUriPath(path), true);
|
||||
return res.targetFileSystem.getXAttrs(res.remainingPath, names);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(Path path, String name) throws IOException {
|
||||
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path),
|
||||
true);
|
||||
res.targetFileSystem.removeXAttr(res.remainingPath, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setVerifyChecksum(final boolean verifyChecksum) {
|
||||
List<InodeTree.MountPoint<FileSystem>> mountPoints =
|
||||
|
|
|
@ -129,7 +129,7 @@ public class CBZip2InputStream extends InputStream implements BZip2Constants {
|
|||
private int computedBlockCRC, computedCombinedCRC;
|
||||
|
||||
private boolean skipResult = false;// used by skipToNextMarker
|
||||
private static boolean skipDecompression = false;
|
||||
private boolean skipDecompression = false;
|
||||
|
||||
// Variables used by setup* methods exclusively
|
||||
|
||||
|
@ -281,12 +281,18 @@ public class CBZip2InputStream extends InputStream implements BZip2Constants {
|
|||
*/
|
||||
public CBZip2InputStream(final InputStream in, READ_MODE readMode)
|
||||
throws IOException {
|
||||
this(in, readMode, false);
|
||||
}
|
||||
|
||||
private CBZip2InputStream(final InputStream in, READ_MODE readMode, boolean skipDecompression)
|
||||
throws IOException {
|
||||
|
||||
super();
|
||||
int blockSize = 0X39;// i.e 9
|
||||
this.blockSize100k = blockSize - '0';
|
||||
this.in = new BufferedInputStream(in, 1024 * 9);// >1 MB buffer
|
||||
this.readMode = readMode;
|
||||
this.skipDecompression = skipDecompression;
|
||||
if (readMode == READ_MODE.CONTINUOUS) {
|
||||
currentState = STATE.START_BLOCK_STATE;
|
||||
lazyInitialization = (in.available() == 0)?true:false;
|
||||
|
@ -316,11 +322,7 @@ public class CBZip2InputStream extends InputStream implements BZip2Constants {
|
|||
*
|
||||
*/
|
||||
public static long numberOfBytesTillNextMarker(final InputStream in) throws IOException{
|
||||
CBZip2InputStream.skipDecompression = true;
|
||||
CBZip2InputStream anObject = null;
|
||||
|
||||
anObject = new CBZip2InputStream(in, READ_MODE.BYBLOCK);
|
||||
|
||||
CBZip2InputStream anObject = new CBZip2InputStream(in, READ_MODE.BYBLOCK, true);
|
||||
return anObject.getProcessedByteCount();
|
||||
}
|
||||
|
||||
|
@ -397,7 +399,7 @@ public class CBZip2InputStream extends InputStream implements BZip2Constants {
|
|||
|
||||
if(skipDecompression){
|
||||
changeStateToProcessABlock();
|
||||
CBZip2InputStream.skipDecompression = false;
|
||||
skipDecompression = false;
|
||||
}
|
||||
|
||||
final int hi = offs + len;
|
||||
|
|
|
@ -1392,7 +1392,7 @@ public class UserGroupInformation {
|
|||
* @param token Token to be added
|
||||
* @return true on successful add of new token
|
||||
*/
|
||||
public synchronized boolean addToken(Token<? extends TokenIdentifier> token) {
|
||||
public boolean addToken(Token<? extends TokenIdentifier> token) {
|
||||
return (token != null) ? addToken(token.getService(), token) : false;
|
||||
}
|
||||
|
||||
|
@ -1403,10 +1403,11 @@ public class UserGroupInformation {
|
|||
* @param token Token to be added
|
||||
* @return true on successful add of new token
|
||||
*/
|
||||
public synchronized boolean addToken(Text alias,
|
||||
Token<? extends TokenIdentifier> token) {
|
||||
getCredentialsInternal().addToken(alias, token);
|
||||
return true;
|
||||
public boolean addToken(Text alias, Token<? extends TokenIdentifier> token) {
|
||||
synchronized (subject) {
|
||||
getCredentialsInternal().addToken(alias, token);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1414,10 +1415,11 @@ public class UserGroupInformation {
|
|||
*
|
||||
* @return an unmodifiable collection of tokens associated with user
|
||||
*/
|
||||
public synchronized
|
||||
Collection<Token<? extends TokenIdentifier>> getTokens() {
|
||||
return Collections.unmodifiableCollection(
|
||||
new ArrayList<Token<?>>(getCredentialsInternal().getAllTokens()));
|
||||
public Collection<Token<? extends TokenIdentifier>> getTokens() {
|
||||
synchronized (subject) {
|
||||
return Collections.unmodifiableCollection(
|
||||
new ArrayList<Token<?>>(getCredentialsInternal().getAllTokens()));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1425,23 +1427,27 @@ public class UserGroupInformation {
|
|||
*
|
||||
* @return Credentials of tokens associated with this user
|
||||
*/
|
||||
public synchronized Credentials getCredentials() {
|
||||
Credentials creds = new Credentials(getCredentialsInternal());
|
||||
Iterator<Token<?>> iter = creds.getAllTokens().iterator();
|
||||
while (iter.hasNext()) {
|
||||
if (iter.next() instanceof Token.PrivateToken) {
|
||||
iter.remove();
|
||||
public Credentials getCredentials() {
|
||||
synchronized (subject) {
|
||||
Credentials creds = new Credentials(getCredentialsInternal());
|
||||
Iterator<Token<?>> iter = creds.getAllTokens().iterator();
|
||||
while (iter.hasNext()) {
|
||||
if (iter.next() instanceof Token.PrivateToken) {
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
return creds;
|
||||
}
|
||||
return creds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the given Credentials to this user.
|
||||
* @param credentials of tokens and secrets
|
||||
*/
|
||||
public synchronized void addCredentials(Credentials credentials) {
|
||||
getCredentialsInternal().addAll(credentials);
|
||||
public void addCredentials(Credentials credentials) {
|
||||
synchronized (subject) {
|
||||
getCredentialsInternal().addAll(credentials);
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized Credentials getCredentialsInternal() {
|
||||
|
|
|
@ -254,6 +254,35 @@ getfacl
|
|||
|
||||
Returns 0 on success and non-zero on error.
|
||||
|
||||
getfattr
|
||||
|
||||
Usage: <<<hdfs dfs -getfattr [-R] {-n name | -d} [-e en] <path> >>>
|
||||
|
||||
Displays the extended attribute names and values (if any) for a file or
|
||||
directory.
|
||||
|
||||
Options:
|
||||
|
||||
* -R: Recursively list the attributes for all files and directories.
|
||||
|
||||
* -n name: Dump the named extended attribute value.
|
||||
|
||||
* -d: Dump all extended attribute values associated with pathname.
|
||||
|
||||
* -e <encoding>: Encode values after retrieving them. Valid encodings are "text", "hex", and "base64". Values encoded as text strings are enclosed in double quotes ("), and values encoded as hexadecimal and base64 are prefixed with 0x and 0s, respectively.
|
||||
|
||||
* <path>: The file or directory.
|
||||
|
||||
Examples:
|
||||
|
||||
* <<<hdfs dfs -getfattr -d /file>>>
|
||||
|
||||
* <<<hdfs dfs -getfattr -R -n user.myAttr /dir>>>
|
||||
|
||||
Exit Code:
|
||||
|
||||
Returns 0 on success and non-zero on error.
|
||||
|
||||
getmerge
|
||||
|
||||
Usage: <<<hdfs dfs -getmerge <src> <localdst> [addnl]>>>
|
||||
|
@ -450,6 +479,36 @@ setfacl
|
|||
|
||||
Returns 0 on success and non-zero on error.
|
||||
|
||||
setfattr
|
||||
|
||||
Usage: <<<hdfs dfs -setfattr {-n name [-v value] | -x name} <path> >>>
|
||||
|
||||
Sets an extended attribute name and value for a file or directory.
|
||||
|
||||
Options:
|
||||
|
||||
* -b: Remove all but the base ACL entries. The entries for user, group and others are retained for compatibility with permission bits.
|
||||
|
||||
* -n name: The extended attribute name.
|
||||
|
||||
* -v value: The extended attribute value. There are three different encoding methods for the value. If the argument is enclosed in double quotes, then the value is the string inside the quotes. If the argument is prefixed with 0x or 0X, then it is taken as a hexadecimal number. If the argument begins with 0s or 0S, then it is taken as a base64 encoding.
|
||||
|
||||
* -x name: Remove the extended attribute.
|
||||
|
||||
* <path>: The file or directory.
|
||||
|
||||
Examples:
|
||||
|
||||
* <<<hdfs dfs -setfattr -n user.myAttr -v myValue /file>>>
|
||||
|
||||
* <<<hdfs dfs -setfattr -n user.noValue /file>>>
|
||||
|
||||
* <<<hdfs dfs -setfattr -x user.myAttr /file>>>
|
||||
|
||||
Exit Code:
|
||||
|
||||
Returns 0 on success and non-zero on error.
|
||||
|
||||
setrep
|
||||
|
||||
Usage: <<<hdfs dfs -setrep [-R] [-w] <numReplicas> <path> >>>
|
||||
|
|
|
@ -36,6 +36,7 @@ import java.lang.reflect.Modifier;
|
|||
import java.util.EnumSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||
import static org.apache.hadoop.fs.Options.CreateOpts;
|
||||
|
@ -181,6 +182,21 @@ public class TestHarFileSystem {
|
|||
|
||||
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException;
|
||||
|
||||
public void setXAttr(Path path, String name, byte[] value)
|
||||
throws IOException;
|
||||
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException;
|
||||
|
||||
public byte[] getXAttr(Path path, String name) throws IOException;
|
||||
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException;
|
||||
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException;
|
||||
|
||||
public void removeXAttr(Path path, String name) throws IOException;
|
||||
|
||||
public AclStatus getAclStatus(Path path) throws IOException;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.shell;
|
||||
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FsShell;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestXAttrCommands {
|
||||
private final ByteArrayOutputStream errContent =
|
||||
new ByteArrayOutputStream();
|
||||
private Configuration conf = null;
|
||||
private PrintStream initialStdErr;
|
||||
|
||||
@Before
|
||||
public void setup() throws IOException {
|
||||
errContent.reset();
|
||||
initialStdErr = System.err;
|
||||
System.setErr(new PrintStream(errContent));
|
||||
conf = new Configuration();
|
||||
}
|
||||
|
||||
@After
|
||||
public void cleanUp() throws Exception {
|
||||
errContent.reset();
|
||||
System.setErr(initialStdErr);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetfattrValidations() throws Exception {
|
||||
errContent.reset();
|
||||
assertFalse("getfattr should fail without path",
|
||||
0 == runCommand(new String[] { "-getfattr", "-d"}));
|
||||
assertTrue(errContent.toString().contains("<path> is missing"));
|
||||
|
||||
errContent.reset();
|
||||
assertFalse("getfattr should fail with extra argument",
|
||||
0 == runCommand(new String[] { "-getfattr", "extra", "-d", "/test"}));
|
||||
assertTrue(errContent.toString().contains("Too many arguments"));
|
||||
|
||||
errContent.reset();
|
||||
assertFalse("getfattr should fail without \"-n name\" or \"-d\"",
|
||||
0 == runCommand(new String[] { "-getfattr", "/test"}));
|
||||
assertTrue(errContent.toString().contains("Must specify '-n name' or '-d' option"));
|
||||
|
||||
errContent.reset();
|
||||
assertFalse("getfattr should fail with invalid encoding",
|
||||
0 == runCommand(new String[] { "-getfattr", "-d", "-e", "aaa", "/test"}));
|
||||
assertTrue(errContent.toString().contains("Invalid/unsupported encoding option specified: aaa"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetfattrValidations() throws Exception {
|
||||
errContent.reset();
|
||||
assertFalse("setfattr should fail without path",
|
||||
0 == runCommand(new String[] { "-setfattr", "-n", "user.a1" }));
|
||||
assertTrue(errContent.toString().contains("<path> is missing"));
|
||||
|
||||
errContent.reset();
|
||||
assertFalse("setfattr should fail with extra arguments",
|
||||
0 == runCommand(new String[] { "-setfattr", "extra", "-n", "user.a1", "/test"}));
|
||||
assertTrue(errContent.toString().contains("Too many arguments"));
|
||||
|
||||
errContent.reset();
|
||||
assertFalse("setfattr should fail without \"-n name\" or \"-x name\"",
|
||||
0 == runCommand(new String[] { "-setfattr", "/test"}));
|
||||
assertTrue(errContent.toString().contains("Must specify '-n name' or '-x name' option"));
|
||||
}
|
||||
|
||||
private int runCommand(String[] commands) throws Exception {
|
||||
return ToolRunner.run(conf, new FsShell(), commands);
|
||||
}
|
||||
}
|
|
@ -37,6 +37,7 @@ import java.io.InputStreamReader;
|
|||
import java.lang.reflect.Method;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.Collection;
|
||||
import java.util.ConcurrentModificationException;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -845,4 +846,69 @@ public class TestUserGroupInformation {
|
|||
Collection<Token<? extends TokenIdentifier>> tokens = ugi.getCredentials().getAllTokens();
|
||||
assertEquals(1, tokens.size());
|
||||
}
|
||||
|
||||
/**
|
||||
* This test checks a race condition between getting and adding tokens for
|
||||
* the current user. Calling UserGroupInformation.getCurrentUser() returns
|
||||
* a new object each time, so simply making these methods synchronized was not
|
||||
* enough to prevent race conditions and causing a
|
||||
* ConcurrentModificationException. These methods are synchronized on the
|
||||
* Subject, which is the same object between UserGroupInformation instances.
|
||||
* This test tries to cause a CME, by exposing the race condition. Previously
|
||||
* this test would fail every time; now it does not.
|
||||
*/
|
||||
@Test
|
||||
public void testTokenRaceCondition() throws Exception {
|
||||
UserGroupInformation userGroupInfo =
|
||||
UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES);
|
||||
userGroupInfo.doAs(new PrivilegedExceptionAction<Void>(){
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
// make sure it is not the same as the login user because we use the
|
||||
// same UGI object for every instantiation of the login user and you
|
||||
// won't run into the race condition otherwise
|
||||
assertNotEquals(UserGroupInformation.getLoginUser(),
|
||||
UserGroupInformation.getCurrentUser());
|
||||
|
||||
GetTokenThread thread = new GetTokenThread();
|
||||
try {
|
||||
thread.start();
|
||||
for (int i = 0; i < 100; i++) {
|
||||
@SuppressWarnings("unchecked")
|
||||
Token<? extends TokenIdentifier> t = mock(Token.class);
|
||||
when(t.getService()).thenReturn(new Text("t" + i));
|
||||
UserGroupInformation.getCurrentUser().addToken(t);
|
||||
assertNull("ConcurrentModificationException encountered",
|
||||
thread.cme);
|
||||
}
|
||||
} catch (ConcurrentModificationException cme) {
|
||||
cme.printStackTrace();
|
||||
fail("ConcurrentModificationException encountered");
|
||||
} finally {
|
||||
thread.runThread = false;
|
||||
thread.join(5 * 1000);
|
||||
}
|
||||
return null;
|
||||
}});
|
||||
}
|
||||
|
||||
static class GetTokenThread extends Thread {
|
||||
boolean runThread = true;
|
||||
volatile ConcurrentModificationException cme = null;
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
while(runThread) {
|
||||
try {
|
||||
UserGroupInformation.getCurrentUser().getCredentials();
|
||||
} catch (ConcurrentModificationException cme) {
|
||||
this.cme = cme;
|
||||
cme.printStackTrace();
|
||||
runThread = false;
|
||||
} catch (IOException ex) {
|
||||
ex.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.io.InputStreamReader;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.BiMap;
|
||||
|
@ -79,7 +80,7 @@ public class IdUserGroup {
|
|||
}
|
||||
|
||||
synchronized private boolean isExpired() {
|
||||
return lastUpdateTime - System.currentTimeMillis() > timeout;
|
||||
return Time.monotonicNow() - lastUpdateTime > timeout;
|
||||
}
|
||||
|
||||
// If can't update the maps, will keep using the old ones
|
||||
|
@ -210,7 +211,7 @@ public class IdUserGroup {
|
|||
|
||||
uidNameMap = uMap;
|
||||
gidNameMap = gMap;
|
||||
lastUpdateTime = System.currentTimeMillis();
|
||||
lastUpdateTime = Time.monotonicNow();
|
||||
}
|
||||
|
||||
synchronized public int getUid(String user) throws IOException {
|
||||
|
|
|
@ -19,11 +19,14 @@ package org.apache.hadoop.oncrpc;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.DatagramSocket;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketAddress;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.apache.hadoop.portmap.PortmapMapping;
|
||||
import org.apache.hadoop.portmap.PortmapRequest;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
|
@ -37,7 +40,7 @@ import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
|
|||
* and implement {@link #handleInternal} to handle the requests received.
|
||||
*/
|
||||
public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
|
||||
private static final Log LOG = LogFactory.getLog(RpcProgram.class);
|
||||
static final Log LOG = LogFactory.getLog(RpcProgram.class);
|
||||
public static final int RPCB_PORT = 111;
|
||||
private final String program;
|
||||
private final String host;
|
||||
|
@ -45,6 +48,7 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
|
|||
private final int progNumber;
|
||||
private final int lowProgVersion;
|
||||
private final int highProgVersion;
|
||||
private final boolean allowInsecurePorts;
|
||||
|
||||
/**
|
||||
* If not null, this will be used as the socket to use to connect to the
|
||||
|
@ -61,10 +65,14 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
|
|||
* @param progNumber program number as defined in RFC 1050
|
||||
* @param lowProgVersion lowest version of the specification supported
|
||||
* @param highProgVersion highest version of the specification supported
|
||||
* @param DatagramSocket registrationSocket if not null, use this socket to
|
||||
* register with portmap daemon
|
||||
* @param allowInsecurePorts true to allow client connections from
|
||||
* unprivileged ports, false otherwise
|
||||
*/
|
||||
protected RpcProgram(String program, String host, int port, int progNumber,
|
||||
int lowProgVersion, int highProgVersion,
|
||||
DatagramSocket registrationSocket) {
|
||||
DatagramSocket registrationSocket, boolean allowInsecurePorts) {
|
||||
this.program = program;
|
||||
this.host = host;
|
||||
this.port = port;
|
||||
|
@ -72,6 +80,9 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
|
|||
this.lowProgVersion = lowProgVersion;
|
||||
this.highProgVersion = highProgVersion;
|
||||
this.registrationSocket = registrationSocket;
|
||||
this.allowInsecurePorts = allowInsecurePorts;
|
||||
LOG.info("Will " + (allowInsecurePorts ? "" : "not ") + "accept client "
|
||||
+ "connections from unprivileged ports");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -133,43 +144,82 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
|
|||
throws Exception {
|
||||
RpcInfo info = (RpcInfo) e.getMessage();
|
||||
RpcCall call = (RpcCall) info.header();
|
||||
|
||||
SocketAddress remoteAddress = info.remoteAddress();
|
||||
if (!allowInsecurePorts) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Will not allow connections from unprivileged ports. " +
|
||||
"Checking for valid client port...");
|
||||
}
|
||||
if (remoteAddress instanceof InetSocketAddress) {
|
||||
InetSocketAddress inetRemoteAddress = (InetSocketAddress) remoteAddress;
|
||||
if (inetRemoteAddress.getPort() > 1023) {
|
||||
LOG.warn("Connection attempted from '" + inetRemoteAddress + "' "
|
||||
+ "which is an unprivileged port. Rejecting connection.");
|
||||
sendRejectedReply(call, remoteAddress, ctx);
|
||||
return;
|
||||
} else {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Accepting connection from '" + remoteAddress + "'");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOG.warn("Could not determine remote port of socket address '" +
|
||||
remoteAddress + "'. Rejecting connection.");
|
||||
sendRejectedReply(call, remoteAddress, ctx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(program + " procedure #" + call.getProcedure());
|
||||
}
|
||||
|
||||
if (this.progNumber != call.getProgram()) {
|
||||
LOG.warn("Invalid RPC call program " + call.getProgram());
|
||||
RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
|
||||
AcceptState.PROG_UNAVAIL, Verifier.VERIFIER_NONE);
|
||||
|
||||
XDR out = new XDR();
|
||||
reply.write(out);
|
||||
ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
|
||||
.buffer());
|
||||
RpcResponse rsp = new RpcResponse(b, info.remoteAddress());
|
||||
RpcUtil.sendRpcResponse(ctx, rsp);
|
||||
sendAcceptedReply(call, remoteAddress, AcceptState.PROG_UNAVAIL, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
int ver = call.getVersion();
|
||||
if (ver < lowProgVersion || ver > highProgVersion) {
|
||||
LOG.warn("Invalid RPC call version " + ver);
|
||||
RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
|
||||
AcceptState.PROG_MISMATCH, Verifier.VERIFIER_NONE);
|
||||
|
||||
XDR out = new XDR();
|
||||
reply.write(out);
|
||||
out.writeInt(lowProgVersion);
|
||||
out.writeInt(highProgVersion);
|
||||
ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
|
||||
.buffer());
|
||||
RpcResponse rsp = new RpcResponse(b, info.remoteAddress());
|
||||
RpcUtil.sendRpcResponse(ctx, rsp);
|
||||
sendAcceptedReply(call, remoteAddress, AcceptState.PROG_MISMATCH, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
handleInternal(ctx, info);
|
||||
}
|
||||
|
||||
private void sendAcceptedReply(RpcCall call, SocketAddress remoteAddress,
|
||||
AcceptState acceptState, ChannelHandlerContext ctx) {
|
||||
RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
|
||||
acceptState, Verifier.VERIFIER_NONE);
|
||||
|
||||
XDR out = new XDR();
|
||||
reply.write(out);
|
||||
if (acceptState == AcceptState.PROG_MISMATCH) {
|
||||
out.writeInt(lowProgVersion);
|
||||
out.writeInt(highProgVersion);
|
||||
}
|
||||
ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
|
||||
.buffer());
|
||||
RpcResponse rsp = new RpcResponse(b, remoteAddress);
|
||||
RpcUtil.sendRpcResponse(ctx, rsp);
|
||||
}
|
||||
|
||||
private static void sendRejectedReply(RpcCall call,
|
||||
SocketAddress remoteAddress, ChannelHandlerContext ctx) {
|
||||
XDR out = new XDR();
|
||||
RpcDeniedReply reply = new RpcDeniedReply(call.getXid(),
|
||||
RpcReply.ReplyState.MSG_DENIED,
|
||||
RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone());
|
||||
reply.write(out);
|
||||
ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
|
||||
.buffer());
|
||||
RpcResponse rsp = new RpcResponse(buf, remoteAddress);
|
||||
RpcUtil.sendRpcResponse(ctx, rsp);
|
||||
}
|
||||
|
||||
protected abstract void handleInternal(ChannelHandlerContext ctx, RpcInfo info);
|
||||
|
||||
|
|
|
@ -28,6 +28,8 @@ import java.util.Random;
|
|||
import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder;
|
||||
import org.apache.hadoop.oncrpc.security.CredentialsNone;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
import org.jboss.netty.buffer.ChannelBuffers;
|
||||
|
@ -38,10 +40,16 @@ import org.junit.Test;
|
|||
import org.mockito.Mockito;
|
||||
|
||||
public class TestFrameDecoder {
|
||||
|
||||
static {
|
||||
((Log4JLogger) RpcProgram.LOG).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
||||
private static int resultSize;
|
||||
|
||||
static void testRequest(XDR request, int serverPort) {
|
||||
// Reset resultSize so as to avoid interference from other tests in this class.
|
||||
resultSize = 0;
|
||||
SimpleTcpClient tcpClient = new SimpleTcpClient("localhost", serverPort, request,
|
||||
true);
|
||||
tcpClient.run();
|
||||
|
@ -50,9 +58,10 @@ public class TestFrameDecoder {
|
|||
static class TestRpcProgram extends RpcProgram {
|
||||
|
||||
protected TestRpcProgram(String program, String host, int port,
|
||||
int progNumber, int lowProgVersion, int highProgVersion) {
|
||||
int progNumber, int lowProgVersion, int highProgVersion,
|
||||
boolean allowInsecurePorts) {
|
||||
super(program, host, port, progNumber, lowProgVersion, highProgVersion,
|
||||
null);
|
||||
null, allowInsecurePorts);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -149,26 +158,7 @@ public class TestFrameDecoder {
|
|||
|
||||
@Test
|
||||
public void testFrames() {
|
||||
|
||||
Random rand = new Random();
|
||||
int serverPort = 30000 + rand.nextInt(10000);
|
||||
int retries = 10; // A few retries in case initial choice is in use.
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram",
|
||||
"localhost", serverPort, 100000, 1, 2);
|
||||
SimpleTcpServer tcpServer = new SimpleTcpServer(serverPort, program, 1);
|
||||
tcpServer.run();
|
||||
break; // Successfully bound a port, break out.
|
||||
} catch (ChannelException ce) {
|
||||
if (retries-- > 0) {
|
||||
serverPort += rand.nextInt(20); // Port in use? Try another.
|
||||
} else {
|
||||
throw ce; // Out of retries.
|
||||
}
|
||||
}
|
||||
}
|
||||
int serverPort = startRpcServer(true);
|
||||
|
||||
XDR xdrOut = createGetportMount();
|
||||
int headerSize = xdrOut.size();
|
||||
|
@ -183,6 +173,47 @@ public class TestFrameDecoder {
|
|||
// Verify the server got the request with right size
|
||||
assertEquals(requestSize, resultSize);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnprivilegedPort() {
|
||||
// Don't allow connections from unprivileged ports. Given that this test is
|
||||
// presumably not being run by root, this will be the case.
|
||||
int serverPort = startRpcServer(false);
|
||||
|
||||
XDR xdrOut = createGetportMount();
|
||||
int bufsize = 2 * 1024 * 1024;
|
||||
byte[] buffer = new byte[bufsize];
|
||||
xdrOut.writeFixedOpaque(buffer);
|
||||
|
||||
// Send the request to the server
|
||||
testRequest(xdrOut, serverPort);
|
||||
|
||||
// Verify the server rejected the request.
|
||||
assertEquals(0, resultSize);
|
||||
}
|
||||
|
||||
private static int startRpcServer(boolean allowInsecurePorts) {
|
||||
Random rand = new Random();
|
||||
int serverPort = 30000 + rand.nextInt(10000);
|
||||
int retries = 10; // A few retries in case initial choice is in use.
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram",
|
||||
"localhost", serverPort, 100000, 1, 2, allowInsecurePorts);
|
||||
SimpleTcpServer tcpServer = new SimpleTcpServer(serverPort, program, 1);
|
||||
tcpServer.run();
|
||||
break; // Successfully bound a port, break out.
|
||||
} catch (ChannelException ce) {
|
||||
if (retries-- > 0) {
|
||||
serverPort += rand.nextInt(20); // Port in use? Try another.
|
||||
} else {
|
||||
throw ce; // Out of retries.
|
||||
}
|
||||
}
|
||||
}
|
||||
return serverPort;
|
||||
}
|
||||
|
||||
static void createPortmapXDRheader(XDR xdr_out, int procedure) {
|
||||
// Make this a method
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# log4j configuration used during build and unit tests
|
||||
|
||||
log4j.rootLogger=info,stdout
|
||||
log4j.threshhold=ALL
|
||||
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
|
|
@ -32,14 +32,14 @@ import org.apache.hadoop.mount.MountdBase;
|
|||
*/
|
||||
public class Mountd extends MountdBase {
|
||||
|
||||
public Mountd(Configuration config, DatagramSocket registrationSocket)
|
||||
throws IOException {
|
||||
super(new RpcProgramMountd(config, registrationSocket));
|
||||
public Mountd(Configuration config, DatagramSocket registrationSocket,
|
||||
boolean allowInsecurePorts) throws IOException {
|
||||
super(new RpcProgramMountd(config, registrationSocket, allowInsecurePorts));
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws IOException {
|
||||
Configuration config = new Configuration();
|
||||
Mountd mountd = new Mountd(config, null);
|
||||
Mountd mountd = new Mountd(config, null, true);
|
||||
mountd.start(true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -79,11 +79,11 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
|
|||
|
||||
private final NfsExports hostsMatcher;
|
||||
|
||||
public RpcProgramMountd(Configuration config,
|
||||
DatagramSocket registrationSocket) throws IOException {
|
||||
public RpcProgramMountd(Configuration config, DatagramSocket registrationSocket,
|
||||
boolean allowInsecurePorts) throws IOException {
|
||||
// Note that RPC cache is not enabled
|
||||
super("mountd", "localhost", config.getInt("nfs3.mountd.port", PORT),
|
||||
PROGRAM, VERSION_1, VERSION_3, registrationSocket);
|
||||
PROGRAM, VERSION_1, VERSION_3, registrationSocket, allowInsecurePorts);
|
||||
exports = new ArrayList<String>();
|
||||
exports.add(config.get(Nfs3Constant.EXPORT_POINT,
|
||||
Nfs3Constant.EXPORT_POINT_DEFAULT));
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
|||
import java.net.DatagramSocket;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.nfs.mount.Mountd;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Base;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
@ -41,12 +42,13 @@ public class Nfs3 extends Nfs3Base {
|
|||
}
|
||||
|
||||
public Nfs3(Configuration conf) throws IOException {
|
||||
this(conf, null);
|
||||
this(conf, null, true);
|
||||
}
|
||||
|
||||
public Nfs3(Configuration conf, DatagramSocket registrationSocket) throws IOException {
|
||||
super(new RpcProgramNfs3(conf, registrationSocket), conf);
|
||||
mountd = new Mountd(conf, registrationSocket);
|
||||
public Nfs3(Configuration conf, DatagramSocket registrationSocket,
|
||||
boolean allowInsecurePorts) throws IOException {
|
||||
super(new RpcProgramNfs3(conf, registrationSocket, allowInsecurePorts), conf);
|
||||
mountd = new Mountd(conf, registrationSocket, allowInsecurePorts);
|
||||
}
|
||||
|
||||
public Mountd getMountd() {
|
||||
|
@ -61,8 +63,13 @@ public class Nfs3 extends Nfs3Base {
|
|||
|
||||
static void startService(String[] args,
|
||||
DatagramSocket registrationSocket) throws IOException {
|
||||
StringUtils.startupShutdownMessage(Nfs3.class, args, LOG);
|
||||
final Nfs3 nfsServer = new Nfs3(new Configuration(), registrationSocket);
|
||||
StringUtils.startupShutdownMessage(Nfs3.class, args, LOG);
|
||||
Configuration conf = new Configuration();
|
||||
boolean allowInsecurePorts = conf.getBoolean(
|
||||
DFSConfigKeys.DFS_NFS_ALLOW_INSECURE_PORTS_KEY,
|
||||
DFSConfigKeys.DFS_NFS_ALLOW_INSECURE_PORTS_DEFAULT);
|
||||
final Nfs3 nfsServer = new Nfs3(new Configuration(), registrationSocket,
|
||||
allowInsecurePorts);
|
||||
nfsServer.startServiceInternal(true);
|
||||
}
|
||||
|
||||
|
|
|
@ -166,11 +166,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
|||
|
||||
private final RpcCallCache rpcCallCache;
|
||||
|
||||
public RpcProgramNfs3(Configuration config, DatagramSocket registrationSocket)
|
||||
throws IOException {
|
||||
public RpcProgramNfs3(Configuration config, DatagramSocket registrationSocket,
|
||||
boolean allowInsecurePorts) throws IOException {
|
||||
super("NFS3", "localhost", config.getInt(Nfs3Constant.NFS3_SERVER_PORT,
|
||||
Nfs3Constant.NFS3_SERVER_PORT_DEFAULT), Nfs3Constant.PROGRAM,
|
||||
Nfs3Constant.VERSION, Nfs3Constant.VERSION, registrationSocket);
|
||||
Nfs3Constant.VERSION, Nfs3Constant.VERSION, registrationSocket,
|
||||
allowInsecurePorts);
|
||||
|
||||
config.set(FsPermission.UMASK_LABEL, "000");
|
||||
iug = new IdUserGroup();
|
||||
|
|
|
@ -254,6 +254,69 @@ Trunk (Unreleased)
|
|||
HDFS-5794. Fix the inconsistency of layout version number of
|
||||
ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
|
||||
|
||||
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6302. Implement XAttr as a INode feature. (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6309. Javadocs for Xattrs apis in DFSClient and other minor fixups. (Charles Lamb via umamahesh)
|
||||
|
||||
HDFS-6258. Namenode server-side storage for XAttrs. (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6303. HDFS implementation of FileContext API for XAttrs. (Yi Liu and Charles Lamb via umamahesh)
|
||||
|
||||
HDFS-6324. Shift XAttr helper code out for reuse. (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6301. NameNode: persist XAttrs in fsimage and record XAttrs modifications to edit log.
|
||||
(Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6298. XML based End-to-End test for getfattr and setfattr commands. (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6314. Test cases for XAttrs. (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6344. Maximum limit on the size of an xattr. (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6377. Unify xattr name and value limits into a single limit. (wang)
|
||||
|
||||
HDFS-6373. Remove support for extended attributes on symlinks. (Charles Lamb via wang)
|
||||
|
||||
HDFS-6283. Write end user documentation for xattrs. (wang)
|
||||
|
||||
HDFS-6412. Interface audience and stability annotations missing from
|
||||
several new classes related to xattrs. (wang)
|
||||
|
||||
HDFS-6259. Support extended attributes via WebHDFS. (yliu)
|
||||
|
||||
HDFS-6346. Optimize OP_SET_XATTRS by persisting single Xattr entry per setXattr/removeXattr api call
|
||||
(Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6331. ClientProtocol#setXattr should not be annotated idempotent.
|
||||
(umamahesh via wang)
|
||||
|
||||
HDFS-6335. TestOfflineEditsViewer for XAttr. (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6343. fix TestNamenodeRetryCache and TestRetryCacheWithHA failures. (umamahesh)
|
||||
|
||||
HDFS-6366. FsImage loading failed with RemoveXattr op (umamahesh)
|
||||
|
||||
HDFS-6357. SetXattr should persist rpcIDs for handling retrycache with Namenode restart and HA
|
||||
(umamahesh)
|
||||
|
||||
HDFS-6372. Handle setXattr rpcIDs for OfflineEditsViewer. (umamahesh)
|
||||
|
||||
HDFS-6410. DFSClient unwraps AclException in xattr methods, but those
|
||||
methods cannot throw AclException. (wang)
|
||||
|
||||
HDFS-6413. xattr names erroneously handled as case-insensitive.
|
||||
(Charles Lamb via cnauroth)
|
||||
|
||||
HDFS-6414. xattr modification operations are based on state of latest
|
||||
snapshot instead of current version of inode. (Andrew Wang via cnauroth)
|
||||
|
||||
HDFS-6374. setXAttr should require the user to be the owner of the file
|
||||
or directory (Charles Lamb via wang)
|
||||
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -273,6 +336,9 @@ Release 2.5.0 - UNRELEASED
|
|||
HDFS-6334. Client failover proxy provider for IP failover based NN HA.
|
||||
(kihwal)
|
||||
|
||||
HDFS-6406. Add capability for NFS gateway to reject connections from
|
||||
unprivileged ports. (atm)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HDFS-6007. Update documentation about short-circuit local reads (iwasakims
|
||||
|
@ -369,6 +435,11 @@ Release 2.5.0 - UNRELEASED
|
|||
HDFS-6345. DFS.listCacheDirectives() should allow filtering based on
|
||||
cache directive ID. (wang)
|
||||
|
||||
HDFS-6432. Add snapshot related APIs to webhdfs. (jing9)
|
||||
|
||||
HDFS-6396. Remove support for ACL feature from INodeSymlink.
|
||||
(Charles Lamb via wang)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
|
||||
|
@ -492,6 +563,26 @@ Release 2.5.0 - UNRELEASED
|
|||
HDFS-6250. Fix test failed in TestBalancerWithNodeGroup.testBalancerWithRackLocality
|
||||
(Binglin Chang and Chen He via junping_du)
|
||||
|
||||
HDFS-4913. Deleting file through fuse-dfs when using trash fails requiring
|
||||
root permissions (cmccabe)
|
||||
|
||||
HDFS-6421. Fix vecsum.c compile on BSD and some other systems. (Mit Desai
|
||||
via Colin Patrick McCabe)
|
||||
|
||||
HDFS-6419. TestBookKeeperHACheckpoints#TestSBNCheckpoints fails on trunk.
|
||||
(Akira AJISAKA via kihwal)
|
||||
|
||||
HDFS-6409. Fix typo in log message about NameNode layout version upgrade.
|
||||
(Chen He via cnauroth)
|
||||
|
||||
HDFS-6433. Replace BytesMoved class with AtomicLong.
|
||||
(Benoy Antony via cnauroth)
|
||||
|
||||
HDFS-6438. DeleteSnapshot should be a DELETE request in WebHdfs. (jing9)
|
||||
|
||||
HDFS-6423. Diskspace quota usage should be updated when appending data to
|
||||
partial block. (jing9)
|
||||
|
||||
Release 2.4.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -569,6 +660,12 @@ Release 2.4.1 - UNRELEASED
|
|||
HDFS-6402. Suppress findbugs warning for failure to override equals and
|
||||
hashCode in FsAclPermission. (cnauroth)
|
||||
|
||||
HDFS-6325. Append should fail if the last block has insufficient number of
|
||||
replicas (Keith Pak via cos)
|
||||
|
||||
HDFS-6397. NN shows inconsistent value in deadnode count.
|
||||
(Mohammad Kamrul Islam via kihwal)
|
||||
|
||||
Release 2.4.0 - 2014-04-07
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -290,6 +290,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<include>NamenodeProtocol.proto</include>
|
||||
<include>QJournalProtocol.proto</include>
|
||||
<include>acl.proto</include>
|
||||
<include>xattr.proto</include>
|
||||
<include>datatransfer.proto</include>
|
||||
<include>fsimage.proto</include>
|
||||
<include>hdfs.proto</include>
|
||||
|
|
|
@ -23,13 +23,10 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.TestStandbyCheckpoints;
|
||||
import org.apache.hadoop.io.compress.CompressionCodecFactory;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
/**
|
||||
* Runs the same tests as TestStandbyCheckpoints, but
|
||||
* using a bookkeeper journal manager as the shared directory
|
||||
|
@ -43,19 +40,11 @@ public class TestBookKeeperHACheckpoints extends TestStandbyCheckpoints {
|
|||
@Override
|
||||
@Before
|
||||
public void setupCluster() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
|
||||
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
|
||||
Configuration conf = setupCommonConfig();
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
|
||||
BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
|
||||
.toString());
|
||||
BKJMUtil.addJournalManagerDefinition(conf);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
|
||||
conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, SlowCodec.class
|
||||
.getCanonicalName());
|
||||
CompressionCodecFactory.setCodecClasses(conf, ImmutableList
|
||||
.<Class> of(SlowCodec.class));
|
||||
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
||||
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
||||
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.net.URISyntaxException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
@ -414,6 +415,33 @@ public class Hdfs extends AbstractFileSystem {
|
|||
public AclStatus getAclStatus(Path path) throws IOException {
|
||||
return dfs.getAclStatus(getUriPath(path));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
dfs.setXAttr(getUriPath(path), name, value, flag);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getXAttr(Path path, String name) throws IOException {
|
||||
return dfs.getXAttr(getUriPath(path), name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
return dfs.getXAttrs(getUriPath(path));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
|
||||
throws IOException {
|
||||
return dfs.getXAttrs(getUriPath(path), names);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(Path path, String name) throws IOException {
|
||||
dfs.removeXAttr(getUriPath(path), name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Renew an existing delegation token.
|
||||
|
|
|
@ -0,0 +1,149 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* XAttr is the POSIX Extended Attribute model similar to that found in
|
||||
* traditional Operating Systems. Extended Attributes consist of one
|
||||
* or more name/value pairs associated with a file or directory. Four
|
||||
* namespaces are defined: user, trusted, security and system.
|
||||
* 1) USER namespace attributes may be used by any user to store
|
||||
* arbitrary information. Access permissions in this namespace are
|
||||
* defined by a file directory's permission bits.
|
||||
* <br>
|
||||
* 2) TRUSTED namespace attributes are only visible and accessible to
|
||||
* privileged users (a file or directory's owner or the fs
|
||||
* admin). This namespace is available from both user space
|
||||
* (filesystem API) and fs kernel.
|
||||
* <br>
|
||||
* 3) SYSTEM namespace attributes are used by the fs kernel to store
|
||||
* system objects. This namespace is only available in the fs
|
||||
* kernel. It is not visible to users.
|
||||
* <br>
|
||||
* 4) SECURITY namespace attributes are used by the fs kernel for
|
||||
* security features. It is not visible to users.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
*
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class XAttr {
|
||||
|
||||
public static enum NameSpace {
|
||||
USER,
|
||||
TRUSTED,
|
||||
SECURITY,
|
||||
SYSTEM;
|
||||
}
|
||||
|
||||
private final NameSpace ns;
|
||||
private final String name;
|
||||
private final byte[] value;
|
||||
|
||||
public static class Builder {
|
||||
private NameSpace ns = NameSpace.USER;
|
||||
private String name;
|
||||
private byte[] value;
|
||||
|
||||
public Builder setNameSpace(NameSpace ns) {
|
||||
this.ns = ns;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setName(String name) {
|
||||
this.name = name;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setValue(byte[] value) {
|
||||
this.value = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
public XAttr build() {
|
||||
return new XAttr(ns, name, value);
|
||||
}
|
||||
}
|
||||
|
||||
private XAttr(NameSpace ns, String name, byte[] value) {
|
||||
this.ns = ns;
|
||||
this.name = name;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public NameSpace getNameSpace() {
|
||||
return ns;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public byte[] getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
int result = 1;
|
||||
result = prime * result + ((name == null) ? 0 : name.hashCode());
|
||||
result = prime * result + ((ns == null) ? 0 : ns.hashCode());
|
||||
result = prime * result + Arrays.hashCode(value);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
XAttr other = (XAttr) obj;
|
||||
if (name == null) {
|
||||
if (other.name != null) {
|
||||
return false;
|
||||
}
|
||||
} else if (!name.equals(other.name)) {
|
||||
return false;
|
||||
}
|
||||
if (ns != other.ns) {
|
||||
return false;
|
||||
}
|
||||
if (!Arrays.equals(value, other.value)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "XAttr [ns=" + ns + ", name=" + name + ", value="
|
||||
+ Arrays.toString(value) + "]";
|
||||
}
|
||||
}
|
|
@ -109,6 +109,8 @@ import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
|
|||
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
|
||||
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -2757,6 +2759,72 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
|
|||
UnresolvedPathException.class);
|
||||
}
|
||||
}
|
||||
|
||||
public void setXAttr(String src, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
checkOpen();
|
||||
try {
|
||||
namenode.setXAttr(src, XAttrHelper.buildXAttr(name, value), flag);
|
||||
} catch (RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class,
|
||||
FileNotFoundException.class,
|
||||
NSQuotaExceededException.class,
|
||||
SafeModeException.class,
|
||||
SnapshotAccessControlException.class,
|
||||
UnresolvedPathException.class);
|
||||
}
|
||||
}
|
||||
|
||||
public byte[] getXAttr(String src, String name) throws IOException {
|
||||
checkOpen();
|
||||
try {
|
||||
final List<XAttr> xAttrs = XAttrHelper.buildXAttrAsList(name);
|
||||
final List<XAttr> result = namenode.getXAttrs(src, xAttrs);
|
||||
return XAttrHelper.getFirstXAttrValue(result);
|
||||
} catch(RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class,
|
||||
FileNotFoundException.class,
|
||||
UnresolvedPathException.class);
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, byte[]> getXAttrs(String src) throws IOException {
|
||||
checkOpen();
|
||||
try {
|
||||
return XAttrHelper.buildXAttrMap(namenode.getXAttrs(src, null));
|
||||
} catch(RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class,
|
||||
FileNotFoundException.class,
|
||||
UnresolvedPathException.class);
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, byte[]> getXAttrs(String src, List<String> names)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
try {
|
||||
return XAttrHelper.buildXAttrMap(namenode.getXAttrs(
|
||||
src, XAttrHelper.buildXAttrs(names)));
|
||||
} catch(RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class,
|
||||
FileNotFoundException.class,
|
||||
UnresolvedPathException.class);
|
||||
}
|
||||
}
|
||||
|
||||
public void removeXAttr(String src, String name) throws IOException {
|
||||
checkOpen();
|
||||
try {
|
||||
namenode.removeXAttr(src, XAttrHelper.buildXAttr(name));
|
||||
} catch(RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class,
|
||||
FileNotFoundException.class,
|
||||
NSQuotaExceededException.class,
|
||||
SafeModeException.class,
|
||||
SnapshotAccessControlException.class,
|
||||
UnresolvedPathException.class);
|
||||
}
|
||||
}
|
||||
|
||||
@Override // RemotePeerFactory
|
||||
public Peer newConnectedPeer(InetSocketAddress addr) throws IOException {
|
||||
|
|
|
@ -192,6 +192,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final String DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT = "supergroup";
|
||||
public static final String DFS_NAMENODE_ACLS_ENABLED_KEY = "dfs.namenode.acls.enabled";
|
||||
public static final boolean DFS_NAMENODE_ACLS_ENABLED_DEFAULT = false;
|
||||
public static final String DFS_NAMENODE_XATTRS_ENABLED_KEY = "dfs.namenode.xattrs.enabled";
|
||||
public static final boolean DFS_NAMENODE_XATTRS_ENABLED_DEFAULT = true;
|
||||
public static final String DFS_ADMIN = "dfs.cluster.administrators";
|
||||
public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.https.server.keystore.resource";
|
||||
public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml";
|
||||
|
@ -295,6 +297,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final long DFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT = 1024*1024;
|
||||
public static final String DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY = "dfs.namenode.fs-limits.max-blocks-per-file";
|
||||
public static final long DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT = 1024*1024;
|
||||
public static final String DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY = "dfs.namenode.fs-limits.max-xattrs-per-inode";
|
||||
public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
|
||||
public static final String DFS_NAMENODE_MAX_XATTR_SIZE_KEY = "dfs.namenode.fs-limits.max-xattr-size";
|
||||
public static final int DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT = 16384;
|
||||
|
||||
|
||||
//Following keys have no defaults
|
||||
public static final String DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir";
|
||||
|
@ -631,9 +638,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
|
||||
public static final String DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE =
|
||||
"dfs.client.hedged.read.threadpool.size";
|
||||
public static final int DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE = 0;
|
||||
public static final String DFS_NFS_KEYTAB_FILE_KEY = "dfs.nfs.keytab.file";
|
||||
public static final String DFS_NFS_KERBEROS_PRINCIPAL_KEY = "dfs.nfs.kerberos.principal";
|
||||
public static final String DFS_NFS_REGISTRATION_PORT_KEY = "dfs.nfs.registration.port";
|
||||
public static final int DFS_NFS_REGISTRATION_PORT_DEFAULT = 40; // Currently unassigned.
|
||||
public static final int DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE = 0;
|
||||
public static final String DFS_NFS_KEYTAB_FILE_KEY = "dfs.nfs.keytab.file";
|
||||
public static final String DFS_NFS_KERBEROS_PRINCIPAL_KEY = "dfs.nfs.kerberos.principal";
|
||||
public static final String DFS_NFS_REGISTRATION_PORT_KEY = "dfs.nfs.registration.port";
|
||||
public static final int DFS_NFS_REGISTRATION_PORT_DEFAULT = 40; // Currently unassigned.
|
||||
public static final String DFS_NFS_ALLOW_INSECURE_PORTS_KEY = "dfs.nfs.allow.insecure.ports";
|
||||
public static final boolean DFS_NFS_ALLOW_INSECURE_PORTS_DEFAULT = true;
|
||||
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.net.URI;
|
|||
import java.util.ArrayList;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -46,6 +47,7 @@ import org.apache.hadoop.fs.FsServerDefaults;
|
|||
import org.apache.hadoop.fs.FsStatus;
|
||||
import org.apache.hadoop.fs.LocatedFileStatus;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -1769,4 +1771,91 @@ public class DistributedFileSystem extends FileSystem {
|
|||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, final String name, final byte[] value,
|
||||
final EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
Path absF = fixRelativePart(path);
|
||||
new FileSystemLinkResolver<Void>() {
|
||||
|
||||
@Override
|
||||
public Void doCall(final Path p) throws IOException {
|
||||
dfs.setXAttr(getPathName(p), name, value, flag);
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void next(final FileSystem fs, final Path p) throws IOException {
|
||||
fs.setXAttr(p, name, value, flag);
|
||||
return null;
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getXAttr(Path path, final String name) throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
return new FileSystemLinkResolver<byte[]>() {
|
||||
@Override
|
||||
public byte[] doCall(final Path p) throws IOException {
|
||||
return dfs.getXAttr(getPathName(p), name);
|
||||
}
|
||||
@Override
|
||||
public byte[] next(final FileSystem fs, final Path p)
|
||||
throws IOException, UnresolvedLinkException {
|
||||
return fs.getXAttr(p, name);
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
return new FileSystemLinkResolver<Map<String, byte[]>>() {
|
||||
@Override
|
||||
public Map<String, byte[]> doCall(final Path p) throws IOException {
|
||||
return dfs.getXAttrs(getPathName(p));
|
||||
}
|
||||
@Override
|
||||
public Map<String, byte[]> next(final FileSystem fs, final Path p)
|
||||
throws IOException, UnresolvedLinkException {
|
||||
return fs.getXAttrs(p);
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path, final List<String> names)
|
||||
throws IOException {
|
||||
final Path absF = fixRelativePart(path);
|
||||
return new FileSystemLinkResolver<Map<String, byte[]>>() {
|
||||
@Override
|
||||
public Map<String, byte[]> doCall(final Path p) throws IOException {
|
||||
return dfs.getXAttrs(getPathName(p), names);
|
||||
}
|
||||
@Override
|
||||
public Map<String, byte[]> next(final FileSystem fs, final Path p)
|
||||
throws IOException, UnresolvedLinkException {
|
||||
return fs.getXAttrs(p, names);
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(Path path, final String name) throws IOException {
|
||||
Path absF = fixRelativePart(path);
|
||||
new FileSystemLinkResolver<Void>() {
|
||||
@Override
|
||||
public Void doCall(final Path p) throws IOException {
|
||||
dfs.removeXAttr(getPathName(p), name);
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void next(final FileSystem fs, final Path p) throws IOException {
|
||||
fs.removeXAttr(p, name);
|
||||
return null;
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,164 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttr.NameSpace;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public class XAttrHelper {
|
||||
|
||||
/**
|
||||
* Build <code>XAttr</code> from xattr name with prefix.
|
||||
*/
|
||||
public static XAttr buildXAttr(String name) {
|
||||
return buildXAttr(name, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build <code>XAttr</code> from name with prefix and value.
|
||||
* Name can not be null. Value can be null. The name and prefix
|
||||
* are validated.
|
||||
* Both name and namespace are case sensitive.
|
||||
*/
|
||||
public static XAttr buildXAttr(String name, byte[] value) {
|
||||
Preconditions.checkNotNull(name, "XAttr name cannot be null.");
|
||||
|
||||
final int prefixIndex = name.indexOf(".");
|
||||
if (prefixIndex < 4) {// Prefix length is at least 4.
|
||||
throw new HadoopIllegalArgumentException("An XAttr name must be " +
|
||||
"prefixed with user/trusted/security/system, followed by a '.'");
|
||||
} else if (prefixIndex == name.length() - 1) {
|
||||
throw new HadoopIllegalArgumentException("XAttr name cannot be empty.");
|
||||
}
|
||||
|
||||
NameSpace ns;
|
||||
final String prefix = name.substring(0, prefixIndex).toLowerCase();
|
||||
if (prefix.equals(NameSpace.USER.toString().toLowerCase())) {
|
||||
ns = NameSpace.USER;
|
||||
} else if (prefix.equals(NameSpace.TRUSTED.toString().toLowerCase())) {
|
||||
ns = NameSpace.TRUSTED;
|
||||
} else if (prefix.equals(NameSpace.SYSTEM.toString().toLowerCase())) {
|
||||
ns = NameSpace.SYSTEM;
|
||||
} else if (prefix.equals(NameSpace.SECURITY.toString().toLowerCase())) {
|
||||
ns = NameSpace.SECURITY;
|
||||
} else {
|
||||
throw new HadoopIllegalArgumentException("An XAttr name must be " +
|
||||
"prefixed with user/trusted/security/system, followed by a '.'");
|
||||
}
|
||||
XAttr xAttr = (new XAttr.Builder()).setNameSpace(ns).setName(name.
|
||||
substring(prefixIndex + 1)).setValue(value).build();
|
||||
|
||||
return xAttr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build xattr name with prefix as <code>XAttr</code> list.
|
||||
*/
|
||||
public static List<XAttr> buildXAttrAsList(String name) {
|
||||
XAttr xAttr = buildXAttr(name);
|
||||
List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
|
||||
xAttrs.add(xAttr);
|
||||
|
||||
return xAttrs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get value of first xattr from <code>XAttr</code> list
|
||||
*/
|
||||
public static byte[] getFirstXAttrValue(List<XAttr> xAttrs) {
|
||||
byte[] value = null;
|
||||
XAttr xAttr = getFirstXAttr(xAttrs);
|
||||
if (xAttr != null) {
|
||||
value = xAttr.getValue();
|
||||
if (value == null) {
|
||||
value = new byte[0]; // xattr exists, but no value.
|
||||
}
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get first xattr from <code>XAttr</code> list
|
||||
*/
|
||||
public static XAttr getFirstXAttr(List<XAttr> xAttrs) {
|
||||
if (xAttrs != null && !xAttrs.isEmpty()) {
|
||||
return xAttrs.get(0);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build xattr map from <code>XAttr</code> list, the key is
|
||||
* xattr name with prefix, and value is xattr value.
|
||||
*/
|
||||
public static Map<String, byte[]> buildXAttrMap(List<XAttr> xAttrs) {
|
||||
if (xAttrs == null) {
|
||||
return null;
|
||||
}
|
||||
Map<String, byte[]> xAttrMap = Maps.newHashMap();
|
||||
for (XAttr xAttr : xAttrs) {
|
||||
String name = getPrefixName(xAttr);
|
||||
byte[] value = xAttr.getValue();
|
||||
if (value == null) {
|
||||
value = new byte[0];
|
||||
}
|
||||
xAttrMap.put(name, value);
|
||||
}
|
||||
|
||||
return xAttrMap;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get name with prefix from <code>XAttr</code>
|
||||
*/
|
||||
public static String getPrefixName(XAttr xAttr) {
|
||||
if (xAttr == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
String namespace = xAttr.getNameSpace().toString();
|
||||
return namespace.toLowerCase() + "." + xAttr.getName();
|
||||
}
|
||||
|
||||
/**
|
||||
* Build <code>XAttr</code> list from xattr name list.
|
||||
*/
|
||||
public static List<XAttr> buildXAttrs(List<String> names) {
|
||||
if (names == null || names.isEmpty()) {
|
||||
throw new HadoopIllegalArgumentException("XAttr names can not be " +
|
||||
"null or empty.");
|
||||
}
|
||||
|
||||
List<XAttr> xAttrs = Lists.newArrayListWithCapacity(names.size());
|
||||
for (String name : names) {
|
||||
xAttrs.add(buildXAttr(name, null));
|
||||
}
|
||||
return xAttrs;
|
||||
}
|
||||
}
|
|
@ -31,10 +31,12 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
|
|||
import org.apache.hadoop.fs.FsServerDefaults;
|
||||
import org.apache.hadoop.fs.InvalidPathException;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
|
||||
import org.apache.hadoop.fs.Options.Rename;
|
||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -1254,4 +1256,66 @@ public interface ClientProtocol {
|
|||
*/
|
||||
@Idempotent
|
||||
public AclStatus getAclStatus(String src) throws IOException;
|
||||
|
||||
/**
|
||||
* Set xattr of a file or directory.
|
||||
* A regular user only can set xattr of "user" namespace.
|
||||
* A super user can set xattr of "user" and "trusted" namespace.
|
||||
* XAttr of "security" and "system" namespace is only used/exposed
|
||||
* internally to the FS impl.
|
||||
* <p/>
|
||||
* For xattr of "user" namespace, its access permissions are
|
||||
* defined by the file or directory permission bits.
|
||||
* XAttr will be set only when login user has correct permissions.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* @param src file or directory
|
||||
* @param xAttr <code>XAttr</code> to set
|
||||
* @param flag set flag
|
||||
* @throws IOException
|
||||
*/
|
||||
@AtMostOnce
|
||||
public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Get xattrs of file or directory. Values in xAttrs parameter are ignored.
|
||||
* If xattrs is null or empty, equals getting all xattrs of the file or
|
||||
* directory.
|
||||
* Only xattrs which login user has correct permissions will be returned.
|
||||
* <p/>
|
||||
* A regular user only can get xattr of "user" namespace.
|
||||
* A super user can get xattr of "user" and "trusted" namespace.
|
||||
* XAttr of "security" and "system" namespace is only used/exposed
|
||||
* internally to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* @param src file or directory
|
||||
* @param xAttrs xAttrs to get
|
||||
* @return List<XAttr> <code>XAttr</code> list
|
||||
* @throws IOException
|
||||
*/
|
||||
@Idempotent
|
||||
public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Remove xattr of a file or directory.Value in xAttr parameter is ignored.
|
||||
* Name must be prefixed with user/trusted/security/system.
|
||||
* <p/>
|
||||
* A regular user only can remove xattr of "user" namespace.
|
||||
* A super user can remove xattr of "user" and "trusted" namespace.
|
||||
* XAttr of "security" and "system" namespace is only used/exposed
|
||||
* internally to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* @param src file or directory
|
||||
* @param xAttr <code>XAttr</code> to remove
|
||||
* @throws IOException
|
||||
*/
|
||||
@Idempotent
|
||||
public void removeXAttr(String src, XAttr xAttr) throws IOException;
|
||||
}
|
||||
|
|
|
@ -174,6 +174,12 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
|
|||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrResponseProto;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||
|
@ -302,6 +308,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
|||
|
||||
private static final RemoveAclResponseProto
|
||||
VOID_REMOVEACL_RESPONSE = RemoveAclResponseProto.getDefaultInstance();
|
||||
|
||||
private static final SetXAttrResponseProto
|
||||
VOID_SETXATTR_RESPONSE = SetXAttrResponseProto.getDefaultInstance();
|
||||
|
||||
private static final RemoveXAttrResponseProto
|
||||
VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance();
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
@ -1262,4 +1274,38 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
|||
throw new ServiceException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public SetXAttrResponseProto setXAttr(RpcController controller,
|
||||
SetXAttrRequestProto req) throws ServiceException {
|
||||
try {
|
||||
server.setXAttr(req.getSrc(), PBHelper.convertXAttr(req.getXAttr()),
|
||||
PBHelper.convert(req.getFlag()));
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return VOID_SETXATTR_RESPONSE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetXAttrsResponseProto getXAttrs(RpcController controller,
|
||||
GetXAttrsRequestProto req) throws ServiceException {
|
||||
try {
|
||||
return PBHelper.convertXAttrsResponse(server.getXAttrs(req.getSrc(),
|
||||
PBHelper.convertXAttrs(req.getXAttrsList())));
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public RemoveXAttrResponseProto removeXAttr(RpcController controller,
|
||||
RemoveXAttrRequestProto req) throws ServiceException {
|
||||
try {
|
||||
server.removeXAttr(req.getSrc(), PBHelper.convertXAttr(req.getXAttr()));
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return VOID_REMOVEXATTR_RESPONSE;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,6 +35,8 @@ import org.apache.hadoop.fs.FsServerDefaults;
|
|||
import org.apache.hadoop.fs.Options.Rename;
|
||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -141,6 +143,9 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSaf
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||
|
@ -1268,4 +1273,47 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
|
||||
throws IOException {
|
||||
SetXAttrRequestProto req = SetXAttrRequestProto.newBuilder()
|
||||
.setSrc(src)
|
||||
.setXAttr(PBHelper.convertXAttrProto(xAttr))
|
||||
.setFlag(PBHelper.convert(flag))
|
||||
.build();
|
||||
try {
|
||||
rpcProxy.setXAttr(null, req);
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
|
||||
throws IOException {
|
||||
GetXAttrsRequestProto.Builder builder = GetXAttrsRequestProto.newBuilder();
|
||||
builder.setSrc(src);
|
||||
if (xAttrs != null) {
|
||||
builder.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
|
||||
}
|
||||
GetXAttrsRequestProto req = builder.build();
|
||||
try {
|
||||
return PBHelper.convert(rpcProxy.getXAttrs(null, req));
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(String src, XAttr xAttr) throws IOException {
|
||||
RemoveXAttrRequestProto req = RemoveXAttrRequestProto
|
||||
.newBuilder().setSrc(src)
|
||||
.setXAttr(PBHelper.convertXAttrProto(xAttr)).build();
|
||||
try {
|
||||
rpcProxy.removeXAttr(null, req);
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,6 +32,8 @@ import org.apache.hadoop.fs.ContentSummary;
|
|||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FsServerDefaults;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclEntryScope;
|
||||
import org.apache.hadoop.fs.permission.AclEntryType;
|
||||
|
@ -150,6 +152,10 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
|
|||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.XAttrNamespaceProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrSetFlagProto;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockKey;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
|
@ -221,6 +227,8 @@ public class PBHelper {
|
|||
AclEntryType.values();
|
||||
private static final FsAction[] FSACTION_VALUES =
|
||||
FsAction.values();
|
||||
private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES =
|
||||
XAttr.NameSpace.values();
|
||||
|
||||
private PBHelper() {
|
||||
/** Hidden constructor */
|
||||
|
@ -2007,6 +2015,14 @@ public class PBHelper {
|
|||
private static AclEntryType convert(AclEntryTypeProto v) {
|
||||
return castEnum(v, ACL_ENTRY_TYPE_VALUES);
|
||||
}
|
||||
|
||||
private static XAttrNamespaceProto convert(XAttr.NameSpace v) {
|
||||
return XAttrNamespaceProto.valueOf(v.ordinal());
|
||||
}
|
||||
|
||||
private static XAttr.NameSpace convert(XAttrNamespaceProto v) {
|
||||
return castEnum(v, XATTR_NAMESPACE_VALUES);
|
||||
}
|
||||
|
||||
private static FsActionProto convert(FsAction v) {
|
||||
return FsActionProto.valueOf(v != null ? v.ordinal() : 0);
|
||||
|
@ -2060,6 +2076,108 @@ public class PBHelper {
|
|||
.addAllEntries(convertAclEntryProto(e.getEntries())).build();
|
||||
return GetAclStatusResponseProto.newBuilder().setResult(r).build();
|
||||
}
|
||||
|
||||
public static XAttrProto convertXAttrProto(XAttr a) {
|
||||
XAttrProto.Builder builder = XAttrProto.newBuilder();
|
||||
builder.setNamespace(convert(a.getNameSpace()));
|
||||
if (a.getName() != null) {
|
||||
builder.setName(a.getName());
|
||||
}
|
||||
if (a.getValue() != null) {
|
||||
builder.setValue(getByteString(a.getValue()));
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static List<XAttrProto> convertXAttrProto(
|
||||
List<XAttr> xAttrSpec) {
|
||||
ArrayList<XAttrProto> xAttrs = Lists.newArrayListWithCapacity(
|
||||
xAttrSpec.size());
|
||||
for (XAttr a : xAttrSpec) {
|
||||
XAttrProto.Builder builder = XAttrProto.newBuilder();
|
||||
builder.setNamespace(convert(a.getNameSpace()));
|
||||
if (a.getName() != null) {
|
||||
builder.setName(a.getName());
|
||||
}
|
||||
if (a.getValue() != null) {
|
||||
builder.setValue(getByteString(a.getValue()));
|
||||
}
|
||||
xAttrs.add(builder.build());
|
||||
}
|
||||
return xAttrs;
|
||||
}
|
||||
|
||||
/**
|
||||
* The flag field in PB is a bitmask whose values are the same a the
|
||||
* emum values of XAttrSetFlag
|
||||
*/
|
||||
public static int convert(EnumSet<XAttrSetFlag> flag) {
|
||||
int value = 0;
|
||||
if (flag.contains(XAttrSetFlag.CREATE)) {
|
||||
value |= XAttrSetFlagProto.XATTR_CREATE.getNumber();
|
||||
}
|
||||
if (flag.contains(XAttrSetFlag.REPLACE)) {
|
||||
value |= XAttrSetFlagProto.XATTR_REPLACE.getNumber();
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
public static EnumSet<XAttrSetFlag> convert(int flag) {
|
||||
EnumSet<XAttrSetFlag> result =
|
||||
EnumSet.noneOf(XAttrSetFlag.class);
|
||||
if ((flag & XAttrSetFlagProto.XATTR_CREATE_VALUE) ==
|
||||
XAttrSetFlagProto.XATTR_CREATE_VALUE) {
|
||||
result.add(XAttrSetFlag.CREATE);
|
||||
}
|
||||
if ((flag & XAttrSetFlagProto.XATTR_REPLACE_VALUE) ==
|
||||
XAttrSetFlagProto.XATTR_REPLACE_VALUE) {
|
||||
result.add(XAttrSetFlag.REPLACE);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public static XAttr convertXAttr(XAttrProto a) {
|
||||
XAttr.Builder builder = new XAttr.Builder();
|
||||
builder.setNameSpace(convert(a.getNamespace()));
|
||||
if (a.hasName()) {
|
||||
builder.setName(a.getName());
|
||||
}
|
||||
if (a.hasValue()) {
|
||||
builder.setValue(a.getValue().toByteArray());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static List<XAttr> convertXAttrs(List<XAttrProto> xAttrSpec) {
|
||||
ArrayList<XAttr> xAttrs = Lists.newArrayListWithCapacity(xAttrSpec.size());
|
||||
for (XAttrProto a : xAttrSpec) {
|
||||
XAttr.Builder builder = new XAttr.Builder();
|
||||
builder.setNameSpace(convert(a.getNamespace()));
|
||||
if (a.hasName()) {
|
||||
builder.setName(a.getName());
|
||||
}
|
||||
if (a.hasValue()) {
|
||||
builder.setValue(a.getValue().toByteArray());
|
||||
}
|
||||
xAttrs.add(builder.build());
|
||||
}
|
||||
return xAttrs;
|
||||
}
|
||||
|
||||
public static List<XAttr> convert(GetXAttrsResponseProto a) {
|
||||
List<XAttrProto> xAttrs = a.getXAttrsList();
|
||||
return convertXAttrs(xAttrs);
|
||||
}
|
||||
|
||||
public static GetXAttrsResponseProto convertXAttrsResponse(
|
||||
List<XAttr> xAttrs) {
|
||||
GetXAttrsResponseProto.Builder builder = GetXAttrsResponseProto
|
||||
.newBuilder();
|
||||
if (xAttrs != null) {
|
||||
builder.addAllXAttrs(convertXAttrProto(xAttrs));
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static ShortCircuitShmSlotProto convert(SlotId slotId) {
|
||||
return ShortCircuitShmSlotProto.newBuilder().
|
||||
|
|
|
@ -47,6 +47,7 @@ import java.util.concurrent.ExecutionException;
|
|||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -365,7 +366,7 @@ public class Balancer {
|
|||
|
||||
sendRequest(out);
|
||||
receiveResponse(in);
|
||||
bytesMoved.inc(block.getNumBytes());
|
||||
bytesMoved.addAndGet(block.getNumBytes());
|
||||
LOG.info("Successfully moved " + this);
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Failed to move " + this + ": " + e.getMessage());
|
||||
|
@ -1111,17 +1112,7 @@ public class Balancer {
|
|||
return null;
|
||||
}
|
||||
|
||||
private static class BytesMoved {
|
||||
private long bytesMoved = 0L;;
|
||||
private synchronized void inc( long bytes ) {
|
||||
bytesMoved += bytes;
|
||||
}
|
||||
|
||||
private synchronized long get() {
|
||||
return bytesMoved;
|
||||
}
|
||||
};
|
||||
private final BytesMoved bytesMoved = new BytesMoved();
|
||||
private final AtomicLong bytesMoved = new AtomicLong();
|
||||
|
||||
/* Start a thread to dispatch block moves for each source.
|
||||
* The thread selects blocks to move & sends request to proxy source to
|
||||
|
|
|
@ -945,6 +945,16 @@ public class BlockManager {
|
|||
minReplication);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a block is replicated to at least the minimum replication.
|
||||
*/
|
||||
public boolean isSufficientlyReplicated(BlockInfo b) {
|
||||
// Compare against the lesser of the minReplication and number of live DNs.
|
||||
final int replication =
|
||||
Math.min(minReplication, getDatanodeManager().getNumLiveDataNodes());
|
||||
return countNodes(b).liveReplicas() >= replication;
|
||||
}
|
||||
|
||||
/**
|
||||
* return a list of blocks & their locations on <code>datanode</code> whose
|
||||
* total size is <code>size</code>
|
||||
|
|
|
@ -1057,15 +1057,7 @@ public class DatanodeManager {
|
|||
|
||||
/** @return the number of dead datanodes. */
|
||||
public int getNumDeadDataNodes() {
|
||||
int numDead = 0;
|
||||
synchronized (datanodeMap) {
|
||||
for(DatanodeDescriptor dn : datanodeMap.values()) {
|
||||
if (isDatanodeDead(dn) ) {
|
||||
numDead++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return numDead;
|
||||
return getDatanodeListForReport(DatanodeReportType.DEAD).size();
|
||||
}
|
||||
|
||||
/** @return list of datanodes where decommissioning is in progress. */
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.AclException;
|
||||
|
||||
/**
|
||||
* Support for ACLs is controlled by a configuration flag. If the configuration
|
||||
* flag is false, then the NameNode will reject all ACL-related operations.
|
||||
*/
|
||||
final class AclConfigFlag {
|
||||
private final boolean enabled;
|
||||
|
||||
/**
|
||||
* Creates a new AclConfigFlag from configuration.
|
||||
*
|
||||
* @param conf Configuration to check
|
||||
*/
|
||||
public AclConfigFlag(Configuration conf) {
|
||||
enabled = conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_DEFAULT);
|
||||
LogFactory.getLog(AclConfigFlag.class).info("ACLs enabled? " + enabled);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the flag on behalf of an ACL API call.
|
||||
*
|
||||
* @throws AclException if ACLs are disabled
|
||||
*/
|
||||
public void checkForApiCall() throws AclException {
|
||||
if (!enabled) {
|
||||
throw new AclException(String.format(
|
||||
"The ACL operation has been rejected. "
|
||||
+ "Support for ACLs has been disabled by setting %s to false.",
|
||||
DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -24,6 +24,7 @@ import java.io.FileNotFoundException;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
|
@ -39,6 +40,8 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
|
@ -47,6 +50,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
|
|||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||
import org.apache.hadoop.hdfs.protocol.AclException;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
|
@ -79,6 +83,7 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
|||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
/*************************************************
|
||||
* FSDirectory stores the filesystem directory state.
|
||||
|
@ -125,6 +130,7 @@ public class FSDirectory implements Closeable {
|
|||
private final int contentCountLimit; // max content summary counts per run
|
||||
private final INodeMap inodeMap; // Synchronized by dirLock
|
||||
private long yieldCount = 0; // keep track of lock yield count.
|
||||
private final int inodeXAttrsLimit; //inode xattrs max limit
|
||||
|
||||
// lock to protect the directory and BlockMap
|
||||
private final ReentrantReadWriteLock dirLock;
|
||||
|
@ -190,6 +196,12 @@ public class FSDirectory implements Closeable {
|
|||
this.maxDirItems = conf.getInt(
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT);
|
||||
this.inodeXAttrsLimit = conf.getInt(
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);
|
||||
Preconditions.checkArgument(this.inodeXAttrsLimit >= 0,
|
||||
"Cannot set a negative limit on the number of xattrs per inode (%s).",
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY);
|
||||
// We need a maximum maximum because by default, PB limits message sizes
|
||||
// to 64MB. This means we can only store approximately 6.7 million entries
|
||||
// per directory, but let's use 6.4 million for some safety.
|
||||
|
@ -2856,6 +2868,116 @@ public class FSDirectory implements Closeable {
|
|||
readUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
void removeXAttr(String src, XAttr xAttr) throws IOException {
|
||||
writeLock();
|
||||
try {
|
||||
XAttr removedXAttr = unprotectedRemoveXAttr(src, xAttr);
|
||||
if (removedXAttr != null) {
|
||||
fsImage.getEditLog().logRemoveXAttr(src, removedXAttr);
|
||||
} else {
|
||||
NameNode.stateChangeLog.info("DIR* FSDirectory.removeXAttr: XAttr " +
|
||||
XAttrHelper.getPrefixName(xAttr) +
|
||||
" does not exist on the path " + src);
|
||||
}
|
||||
} finally {
|
||||
writeUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
XAttr unprotectedRemoveXAttr(String src,
|
||||
XAttr xAttr) throws IOException {
|
||||
assert hasWriteLock();
|
||||
INodesInPath iip = getINodesInPath4Write(normalizePath(src), true);
|
||||
INode inode = resolveLastINode(src, iip);
|
||||
int snapshotId = iip.getLatestSnapshotId();
|
||||
List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
|
||||
List<XAttr> newXAttrs = filterINodeXAttr(existingXAttrs, xAttr);
|
||||
if (existingXAttrs.size() != newXAttrs.size()) {
|
||||
XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId);
|
||||
return xAttr;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
List<XAttr> filterINodeXAttr(List<XAttr> existingXAttrs,
|
||||
XAttr xAttr) throws QuotaExceededException {
|
||||
if (existingXAttrs == null || existingXAttrs.isEmpty()) {
|
||||
return existingXAttrs;
|
||||
}
|
||||
|
||||
List<XAttr> xAttrs = Lists.newArrayListWithCapacity(existingXAttrs.size());
|
||||
for (XAttr a : existingXAttrs) {
|
||||
if (!(a.getNameSpace() == xAttr.getNameSpace()
|
||||
&& a.getName().equals(xAttr.getName()))) {
|
||||
xAttrs.add(a);
|
||||
}
|
||||
}
|
||||
|
||||
return xAttrs;
|
||||
}
|
||||
|
||||
void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
|
||||
boolean logRetryCache) throws IOException {
|
||||
writeLock();
|
||||
try {
|
||||
unprotectedSetXAttr(src, xAttr, flag);
|
||||
fsImage.getEditLog().logSetXAttr(src, xAttr, logRetryCache);
|
||||
} finally {
|
||||
writeUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
void unprotectedSetXAttr(String src, XAttr xAttr,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
assert hasWriteLock();
|
||||
INodesInPath iip = getINodesInPath4Write(normalizePath(src), true);
|
||||
INode inode = resolveLastINode(src, iip);
|
||||
int snapshotId = iip.getLatestSnapshotId();
|
||||
List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
|
||||
List<XAttr> newXAttrs = setINodeXAttr(existingXAttrs, xAttr, flag);
|
||||
XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId);
|
||||
}
|
||||
|
||||
List<XAttr> setINodeXAttr(List<XAttr> existingXAttrs, XAttr xAttr,
|
||||
EnumSet<XAttrSetFlag> flag) throws QuotaExceededException, IOException {
|
||||
List<XAttr> xAttrs = Lists.newArrayListWithCapacity(
|
||||
existingXAttrs != null ? existingXAttrs.size() + 1 : 1);
|
||||
boolean exist = false;
|
||||
if (existingXAttrs != null) {
|
||||
for (XAttr a: existingXAttrs) {
|
||||
if ((a.getNameSpace() == xAttr.getNameSpace()
|
||||
&& a.getName().equals(xAttr.getName()))) {
|
||||
exist = true;
|
||||
} else {
|
||||
xAttrs.add(a);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
XAttrSetFlag.validate(xAttr.getName(), exist, flag);
|
||||
xAttrs.add(xAttr);
|
||||
|
||||
if (xAttrs.size() > inodeXAttrsLimit) {
|
||||
throw new IOException("Cannot add additional XAttr to inode, "
|
||||
+ "would exceed limit of " + inodeXAttrsLimit);
|
||||
}
|
||||
|
||||
return xAttrs;
|
||||
}
|
||||
|
||||
List<XAttr> getXAttrs(String src) throws IOException {
|
||||
String srcs = normalizePath(src);
|
||||
readLock();
|
||||
try {
|
||||
INodesInPath iip = getLastINodeInPath(srcs, true);
|
||||
INode inode = resolveLastINode(src, iip);
|
||||
int snapshotId = iip.getPathSnapshotId();
|
||||
return XAttrStorage.readINodeXAttrs(inode, snapshotId);
|
||||
} finally {
|
||||
readUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
private static INode resolveLastINode(String src, INodesInPath iip)
|
||||
throws FileNotFoundException {
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Options;
|
|||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||
|
@ -69,6 +70,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
|
|||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveXAttrOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
|
||||
|
@ -80,6 +82,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
|
|||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
|
||||
|
@ -1050,6 +1053,21 @@ public class FSEditLog implements LogsPurgeable {
|
|||
op.aclEntries = entries;
|
||||
logEdit(op);
|
||||
}
|
||||
|
||||
void logSetXAttr(String src, XAttr xAttr, boolean toLogRpcIds) {
|
||||
final SetXAttrOp op = SetXAttrOp.getInstance();
|
||||
op.src = src;
|
||||
op.xAttr = xAttr;
|
||||
logRpcIds(op, toLogRpcIds);
|
||||
logEdit(op);
|
||||
}
|
||||
|
||||
void logRemoveXAttr(String src, XAttr xAttr) {
|
||||
final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
|
||||
op.src = src;
|
||||
op.xAttr = xAttr;
|
||||
logEdit(op);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all the journals this edit log is currently operating on.
|
||||
|
|
|
@ -25,12 +25,14 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumMap;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
|
@ -76,6 +78,8 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
|
|||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveXAttrOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
|
||||
|
@ -798,6 +802,20 @@ public class FSEditLogLoader {
|
|||
fsDir.unprotectedSetAcl(setAclOp.src, setAclOp.aclEntries);
|
||||
break;
|
||||
}
|
||||
case OP_SET_XATTR: {
|
||||
SetXAttrOp setXAttrOp = (SetXAttrOp) op;
|
||||
fsDir.unprotectedSetXAttr(setXAttrOp.src, setXAttrOp.xAttr,
|
||||
EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
|
||||
if (toAddRetryCache) {
|
||||
fsNamesys.addCacheEntry(setXAttrOp.rpcClientId, setXAttrOp.rpcCallId);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OP_REMOVE_XATTR: {
|
||||
RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op;
|
||||
fsDir.unprotectedRemoveXAttr(removeXAttrOp.src, removeXAttrOp.xAttr);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw new IOException("Invalid operation read " + op.opCode);
|
||||
}
|
||||
|
|
|
@ -54,6 +54,8 @@ import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_OWN
|
|||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_PERMISSIONS;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_QUOTA;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_REPLICATION;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_XATTR;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_XATTR;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_START_LOG_SEGMENT;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SYMLINK;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_TIMES;
|
||||
|
@ -79,12 +81,14 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.ChecksumException;
|
||||
import org.apache.hadoop.fs.Options.Rename;
|
||||
import org.apache.hadoop.fs.XAttrCodec;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclEntryScope;
|
||||
import org.apache.hadoop.fs.permission.AclEntryType;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DeprecatedUTF8;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
|
@ -95,6 +99,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEditLogProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrEditLogProto;
|
||||
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.util.XMLUtils;
|
||||
|
@ -186,6 +191,8 @@ public abstract class FSEditLogOp {
|
|||
OP_ROLLING_UPGRADE_START, "start"));
|
||||
inst.put(OP_ROLLING_UPGRADE_FINALIZE, new RollingUpgradeOp(
|
||||
OP_ROLLING_UPGRADE_FINALIZE, "finalize"));
|
||||
inst.put(OP_SET_XATTR, new SetXAttrOp());
|
||||
inst.put(OP_REMOVE_XATTR, new RemoveXAttrOp());
|
||||
}
|
||||
|
||||
public FSEditLogOp get(FSEditLogOpCodes opcode) {
|
||||
|
@ -3490,6 +3497,95 @@ public abstract class FSEditLogOp {
|
|||
return builder.toString();
|
||||
}
|
||||
}
|
||||
|
||||
static class RemoveXAttrOp extends FSEditLogOp {
|
||||
XAttr xAttr;
|
||||
String src;
|
||||
|
||||
private RemoveXAttrOp() {
|
||||
super(OP_REMOVE_XATTR);
|
||||
}
|
||||
|
||||
static RemoveXAttrOp getInstance() {
|
||||
return new RemoveXAttrOp();
|
||||
}
|
||||
|
||||
@Override
|
||||
void readFields(DataInputStream in, int logVersion) throws IOException {
|
||||
XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
|
||||
src = p.getSrc();
|
||||
xAttr = PBHelper.convertXAttr(p.getXAttr());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeFields(DataOutputStream out) throws IOException {
|
||||
XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder();
|
||||
if (src != null) {
|
||||
b.setSrc(src);
|
||||
}
|
||||
b.setXAttr(PBHelper.convertXAttrProto(xAttr));
|
||||
b.build().writeDelimitedTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void toXml(ContentHandler contentHandler) throws SAXException {
|
||||
XMLUtils.addSaxString(contentHandler, "SRC", src);
|
||||
appendXAttrToXml(contentHandler, xAttr);
|
||||
}
|
||||
|
||||
@Override
|
||||
void fromXml(Stanza st) throws InvalidXmlException {
|
||||
src = st.getValue("SRC");
|
||||
xAttr = readXAttrFromXml(st);
|
||||
}
|
||||
}
|
||||
|
||||
static class SetXAttrOp extends FSEditLogOp {
|
||||
XAttr xAttr;
|
||||
String src;
|
||||
|
||||
private SetXAttrOp() {
|
||||
super(OP_SET_XATTR);
|
||||
}
|
||||
|
||||
static SetXAttrOp getInstance() {
|
||||
return new SetXAttrOp();
|
||||
}
|
||||
|
||||
@Override
|
||||
void readFields(DataInputStream in, int logVersion) throws IOException {
|
||||
XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
|
||||
src = p.getSrc();
|
||||
xAttr = PBHelper.convertXAttr(p.getXAttr());
|
||||
readRpcIds(in, logVersion);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeFields(DataOutputStream out) throws IOException {
|
||||
XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder();
|
||||
if (src != null) {
|
||||
b.setSrc(src);
|
||||
}
|
||||
b.setXAttr(PBHelper.convertXAttrProto(xAttr));
|
||||
b.build().writeDelimitedTo(out);
|
||||
// clientId and callId
|
||||
writeRpcIds(rpcClientId, rpcCallId, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void toXml(ContentHandler contentHandler) throws SAXException {
|
||||
XMLUtils.addSaxString(contentHandler, "SRC", src);
|
||||
appendXAttrToXml(contentHandler, xAttr);
|
||||
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
|
||||
}
|
||||
|
||||
@Override
|
||||
void fromXml(Stanza st) throws InvalidXmlException {
|
||||
src = st.getValue("SRC");
|
||||
xAttr = readXAttrFromXml(st);
|
||||
readRpcIdsFromXml(st);
|
||||
}
|
||||
}
|
||||
|
||||
static class SetAclOp extends FSEditLogOp {
|
||||
List<AclEntry> aclEntries = Lists.newArrayList();
|
||||
|
@ -4106,4 +4202,42 @@ public abstract class FSEditLogOp {
|
|||
}
|
||||
return aclEntries;
|
||||
}
|
||||
|
||||
private static void appendXAttrToXml(ContentHandler contentHandler,
|
||||
XAttr xAttr) throws SAXException {
|
||||
contentHandler.startElement("", "", "XATTR", new AttributesImpl());
|
||||
XMLUtils.addSaxString(contentHandler, "NAMESPACE",
|
||||
xAttr.getNameSpace().toString());
|
||||
XMLUtils.addSaxString(contentHandler, "NAME", xAttr.getName());
|
||||
if (xAttr.getValue() != null) {
|
||||
try {
|
||||
XMLUtils.addSaxString(contentHandler, "VALUE",
|
||||
XAttrCodec.encodeValue(xAttr.getValue(), XAttrCodec.HEX));
|
||||
} catch (IOException e) {
|
||||
throw new SAXException(e);
|
||||
}
|
||||
}
|
||||
contentHandler.endElement("", "", "XATTR");
|
||||
}
|
||||
|
||||
private static XAttr readXAttrFromXml(Stanza st)
|
||||
throws InvalidXmlException {
|
||||
if (!st.hasChildren("XATTR")) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Stanza a = st.getChildren("XATTR").get(0);
|
||||
XAttr.Builder builder = new XAttr.Builder();
|
||||
builder.setNameSpace(XAttr.NameSpace.valueOf(a.getValue("NAMESPACE"))).
|
||||
setName(a.getValue("NAME"));
|
||||
String v = a.getValueOrNull("VALUE");
|
||||
if (v != null) {
|
||||
try {
|
||||
builder.setValue(XAttrCodec.decodeValue(v));
|
||||
} catch (IOException e) {
|
||||
throw new InvalidXmlException(e.toString());
|
||||
}
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,6 +70,8 @@ public enum FSEditLogOpCodes {
|
|||
OP_SET_ACL ((byte) 40),
|
||||
OP_ROLLING_UPGRADE_START ((byte) 41),
|
||||
OP_ROLLING_UPGRADE_FINALIZE ((byte) 42),
|
||||
OP_SET_XATTR ((byte) 43),
|
||||
OP_REMOVE_XATTR ((byte) 44),
|
||||
|
||||
// Note that the current range of the valid OP code is 0~127
|
||||
OP_INVALID ((byte) -1);
|
||||
|
|
|
@ -227,7 +227,7 @@ public class FSImage implements Closeable {
|
|||
+ HdfsConstants.NAMENODE_LAYOUT_VERSION + " is required.\n"
|
||||
+ "Please restart NameNode with the \""
|
||||
+ RollingUpgradeStartupOption.STARTED.getOptionString()
|
||||
+ "\" option if a rolling upgraded is already started;"
|
||||
+ "\" option if a rolling upgrade is already started;"
|
||||
+ " or restart NameNode with the \""
|
||||
+ StartupOption.UPGRADE.getName() + "\" option to start"
|
||||
+ " a new upgrade.");
|
||||
|
|
|
@ -877,7 +877,7 @@ public class FSImageFormat {
|
|||
final long preferredBlockSize = in.readLong();
|
||||
|
||||
return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
|
||||
accessTime, replication, preferredBlockSize);
|
||||
accessTime, replication, preferredBlockSize, null);
|
||||
}
|
||||
|
||||
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
|
||||
|
@ -897,10 +897,10 @@ public class FSImageFormat {
|
|||
final long nsQuota = in.readLong();
|
||||
final long dsQuota = in.readLong();
|
||||
|
||||
return nsQuota == -1L && dsQuota == -1L?
|
||||
new INodeDirectoryAttributes.SnapshotCopy(name, permissions, null, modificationTime)
|
||||
return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy(
|
||||
name, permissions, null, modificationTime, null)
|
||||
: new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
|
||||
null, modificationTime, nsQuota, dsQuota);
|
||||
null, modificationTime, nsQuota, dsQuota, null);
|
||||
}
|
||||
|
||||
private void loadFilesUnderConstruction(DataInput in,
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.fs.permission.AclEntryType;
|
|||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||
|
@ -49,7 +50,10 @@ import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructio
|
|||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -74,6 +78,14 @@ public final class FSImageFormatPBINode {
|
|||
.values();
|
||||
private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType
|
||||
.values();
|
||||
|
||||
private static final int XATTR_NAMESPACE_MASK = 3;
|
||||
private static final int XATTR_NAMESPACE_OFFSET = 30;
|
||||
private static final int XATTR_NAME_MASK = (1 << 24) - 1;
|
||||
private static final int XATTR_NAME_OFFSET = 6;
|
||||
private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES =
|
||||
XAttr.NameSpace.values();
|
||||
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(FSImageFormatPBINode.class);
|
||||
|
||||
|
@ -103,6 +115,25 @@ public final class FSImageFormatPBINode {
|
|||
}
|
||||
return b.build();
|
||||
}
|
||||
|
||||
public static ImmutableList<XAttr> loadXAttrs(
|
||||
XAttrFeatureProto proto, final String[] stringTable) {
|
||||
ImmutableList.Builder<XAttr> b = ImmutableList.builder();
|
||||
for (XAttrCompactProto xAttrCompactProto : proto.getXAttrsList()) {
|
||||
int v = xAttrCompactProto.getName();
|
||||
int nid = (v >> XATTR_NAME_OFFSET) & XATTR_NAME_MASK;
|
||||
int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK;
|
||||
String name = stringTable[nid];
|
||||
byte[] value = null;
|
||||
if (xAttrCompactProto.getValue() != null) {
|
||||
value = xAttrCompactProto.getValue().toByteArray();
|
||||
}
|
||||
b.add(new XAttr.Builder().setNameSpace(XATTR_NAMESPACE_VALUES[ns])
|
||||
.setName(name).setValue(value).build());
|
||||
}
|
||||
|
||||
return b.build();
|
||||
}
|
||||
|
||||
public static INodeDirectory loadINodeDirectory(INodeSection.INode n,
|
||||
LoaderContext state) {
|
||||
|
@ -123,6 +154,10 @@ public final class FSImageFormatPBINode {
|
|||
dir.addAclFeature(new AclFeature(loadAclEntries(d.getAcl(),
|
||||
state.getStringTable())));
|
||||
}
|
||||
if (d.hasXAttrs()) {
|
||||
dir.addXAttrFeature(new XAttrFeature(
|
||||
loadXAttrs(d.getXAttrs(), state.getStringTable())));
|
||||
}
|
||||
return dir;
|
||||
}
|
||||
|
||||
|
@ -255,6 +290,11 @@ public final class FSImageFormatPBINode {
|
|||
file.addAclFeature(new AclFeature(loadAclEntries(f.getAcl(),
|
||||
state.getStringTable())));
|
||||
}
|
||||
|
||||
if (f.hasXAttrs()) {
|
||||
file.addXAttrFeature(new XAttrFeature(
|
||||
loadXAttrs(f.getXAttrs(), state.getStringTable())));
|
||||
}
|
||||
|
||||
// under-construction information
|
||||
if (f.hasFileUC()) {
|
||||
|
@ -295,6 +335,11 @@ public final class FSImageFormatPBINode {
|
|||
}
|
||||
dir.rootDir.cloneModificationTime(root);
|
||||
dir.rootDir.clonePermissionStatus(root);
|
||||
// root dir supports having extended attributes according to POSIX
|
||||
final XAttrFeature f = root.getXAttrFeature();
|
||||
if (f != null) {
|
||||
dir.rootDir.addXAttrFeature(f);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -320,6 +365,26 @@ public final class FSImageFormatPBINode {
|
|||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
private static XAttrFeatureProto.Builder buildXAttrs(XAttrFeature f,
|
||||
final SaverContext.DeduplicationMap<String> stringMap) {
|
||||
XAttrFeatureProto.Builder b = XAttrFeatureProto.newBuilder();
|
||||
for (XAttr a : f.getXAttrs()) {
|
||||
XAttrCompactProto.Builder xAttrCompactBuilder = XAttrCompactProto.
|
||||
newBuilder();
|
||||
int v = ((a.getNameSpace().ordinal() & XATTR_NAMESPACE_MASK) <<
|
||||
XATTR_NAMESPACE_OFFSET)
|
||||
| ((stringMap.getId(a.getName()) & XATTR_NAME_MASK) <<
|
||||
XATTR_NAME_OFFSET);
|
||||
xAttrCompactBuilder.setName(v);
|
||||
if (a.getValue() != null) {
|
||||
xAttrCompactBuilder.setValue(PBHelper.getByteString(a.getValue()));
|
||||
}
|
||||
b.addXAttrs(xAttrCompactBuilder.build());
|
||||
}
|
||||
|
||||
return b;
|
||||
}
|
||||
|
||||
public static INodeSection.INodeFile.Builder buildINodeFile(
|
||||
INodeFileAttributes file, final SaverContext state) {
|
||||
|
@ -334,6 +399,10 @@ public final class FSImageFormatPBINode {
|
|||
if (f != null) {
|
||||
b.setAcl(buildAclEntries(f, state.getStringMap()));
|
||||
}
|
||||
XAttrFeature xAttrFeature = file.getXAttrFeature();
|
||||
if (xAttrFeature != null) {
|
||||
b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
|
@ -350,6 +419,10 @@ public final class FSImageFormatPBINode {
|
|||
if (f != null) {
|
||||
b.setAcl(buildAclEntries(f, state.getStringMap()));
|
||||
}
|
||||
XAttrFeature xAttrFeature = dir.getXAttrFeature();
|
||||
if (xAttrFeature != null) {
|
||||
b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
|
|
|
@ -131,6 +131,8 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
|
@ -508,7 +510,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
|
||||
private final RetryCache retryCache;
|
||||
|
||||
private final AclConfigFlag aclConfigFlag;
|
||||
private final NNConf nnConf;
|
||||
|
||||
/**
|
||||
* Set the last allocated inode id when fsimage or editlog is loaded.
|
||||
|
@ -775,7 +777,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
this.isDefaultAuditLogger = auditLoggers.size() == 1 &&
|
||||
auditLoggers.get(0) instanceof DefaultAuditLogger;
|
||||
this.retryCache = ignoreRetryCache ? null : initRetryCache(conf);
|
||||
this.aclConfigFlag = new AclConfigFlag(conf);
|
||||
this.nnConf = new NNConf(conf);
|
||||
} catch(IOException e) {
|
||||
LOG.error(getClass().getSimpleName() + " initialization failed.", e);
|
||||
close();
|
||||
|
@ -1112,8 +1114,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
// so that the tailer starts from the right spot.
|
||||
dir.fsImage.updateLastAppliedTxIdFromWritten();
|
||||
}
|
||||
cacheManager.stopMonitorThread();
|
||||
cacheManager.clearDirectiveStats();
|
||||
if (cacheManager != null) {
|
||||
cacheManager.stopMonitorThread();
|
||||
cacheManager.clearDirectiveStats();
|
||||
}
|
||||
blockManager.getDatanodeManager().clearPendingCachingCommands();
|
||||
blockManager.getDatanodeManager().setShouldSendCachingCommands(false);
|
||||
// Don't want to keep replication queues when not in Active.
|
||||
|
@ -2375,7 +2379,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
// finalizeINodeFileUnderConstruction so we need to refresh
|
||||
// the referenced file.
|
||||
myFile = INodeFile.valueOf(dir.getINode(src), src, true);
|
||||
|
||||
final BlockInfo lastBlock = myFile.getLastBlock();
|
||||
// Check that the block has at least minimum replication.
|
||||
if(lastBlock != null && lastBlock.isComplete() &&
|
||||
!getBlockManager().isSufficientlyReplicated(lastBlock)) {
|
||||
throw new IOException("append: lastBlock=" + lastBlock +
|
||||
" of src=" + src + " is not sufficiently replicated yet.");
|
||||
}
|
||||
final DatanodeDescriptor clientNode =
|
||||
blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
|
||||
return prepareFileForWrite(src, myFile, holder, clientMachine, clientNode,
|
||||
|
@ -2414,6 +2424,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
.getClientName(), src);
|
||||
|
||||
LocatedBlock ret = blockManager.convertLastBlockToUnderConstruction(cons);
|
||||
if (ret != null) {
|
||||
// update the quota: use the preferred block size for UC block
|
||||
final long diff = file.getPreferredBlockSize() - ret.getBlockSize();
|
||||
dir.updateSpaceConsumed(src, 0, diff);
|
||||
}
|
||||
|
||||
if (writeToEditLog) {
|
||||
getEditLog().logOpenFile(src, cons, logRetryCache);
|
||||
}
|
||||
|
@ -7688,7 +7704,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
void modifyAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
|
||||
aclConfigFlag.checkForApiCall();
|
||||
nnConf.checkAclsConfigFlag();
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
|
@ -7709,7 +7725,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
void removeAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
|
||||
aclConfigFlag.checkForApiCall();
|
||||
nnConf.checkAclsConfigFlag();
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
|
@ -7730,7 +7746,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
void removeDefaultAcl(String src) throws IOException {
|
||||
aclConfigFlag.checkForApiCall();
|
||||
nnConf.checkAclsConfigFlag();
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
|
@ -7751,7 +7767,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
void removeAcl(String src) throws IOException {
|
||||
aclConfigFlag.checkForApiCall();
|
||||
nnConf.checkAclsConfigFlag();
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
|
@ -7772,7 +7788,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
|
||||
aclConfigFlag.checkForApiCall();
|
||||
nnConf.checkAclsConfigFlag();
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
|
@ -7793,7 +7809,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
AclStatus getAclStatus(String src) throws IOException {
|
||||
aclConfigFlag.checkForApiCall();
|
||||
nnConf.checkAclsConfigFlag();
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
checkOperation(OperationCategory.READ);
|
||||
readLock();
|
||||
|
@ -7807,6 +7823,167 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
readUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set xattr for a file or directory.
|
||||
*
|
||||
* @param src
|
||||
* - path on which it sets the xattr
|
||||
* @param xAttr
|
||||
* - xAttr details to set
|
||||
* @param flag
|
||||
* - xAttrs flags
|
||||
* @throws AccessControlException
|
||||
* @throws SafeModeException
|
||||
* @throws UnresolvedLinkException
|
||||
* @throws IOException
|
||||
*/
|
||||
void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
|
||||
throws AccessControlException, SafeModeException,
|
||||
UnresolvedLinkException, IOException {
|
||||
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
|
||||
if (cacheEntry != null && cacheEntry.isSuccess()) {
|
||||
return; // Return previous response
|
||||
}
|
||||
boolean success = false;
|
||||
try {
|
||||
setXAttrInt(src, xAttr, flag, cacheEntry != null);
|
||||
success = true;
|
||||
} catch (AccessControlException e) {
|
||||
logAuditEvent(false, "setXAttr", src);
|
||||
throw e;
|
||||
} finally {
|
||||
RetryCache.setState(cacheEntry, success);
|
||||
}
|
||||
}
|
||||
|
||||
private void setXAttrInt(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
|
||||
boolean logRetryCache) throws IOException {
|
||||
nnConf.checkXAttrsConfigFlag();
|
||||
checkXAttrSize(xAttr);
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
|
||||
writeLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
checkNameNodeSafeMode("Cannot set XAttr on " + src);
|
||||
src = FSDirectory.resolvePath(src, pathComponents, dir);
|
||||
if (isPermissionEnabled) {
|
||||
checkOwner(pc, src);
|
||||
checkPathAccess(pc, src, FsAction.WRITE);
|
||||
}
|
||||
dir.setXAttr(src, xAttr, flag, logRetryCache);
|
||||
resultingStat = getAuditFileInfo(src, false);
|
||||
} finally {
|
||||
writeUnlock();
|
||||
}
|
||||
getEditLog().logSync();
|
||||
logAuditEvent(true, "setXAttr", src, null, resultingStat);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies that the combined size of the name and value of an xattr is within
|
||||
* the configured limit. Setting a limit of zero disables this check.
|
||||
*/
|
||||
private void checkXAttrSize(XAttr xAttr) {
|
||||
if (nnConf.xattrMaxSize == 0) {
|
||||
return;
|
||||
}
|
||||
int size = xAttr.getName().getBytes(Charsets.UTF_8).length;
|
||||
if (xAttr.getValue() != null) {
|
||||
size += xAttr.getValue().length;
|
||||
}
|
||||
if (size > nnConf.xattrMaxSize) {
|
||||
throw new HadoopIllegalArgumentException(
|
||||
"The XAttr is too big. The maximum combined size of the"
|
||||
+ " name and value is " + nnConf.xattrMaxSize
|
||||
+ ", but the total size is " + size);
|
||||
}
|
||||
}
|
||||
|
||||
List<XAttr> getXAttrs(String src, List<XAttr> xAttrs) throws IOException {
|
||||
nnConf.checkXAttrsConfigFlag();
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
boolean getAll = xAttrs == null || xAttrs.isEmpty();
|
||||
List<XAttr> filteredXAttrs = null;
|
||||
if (!getAll) {
|
||||
filteredXAttrs = XAttrPermissionFilter.filterXAttrsForApi(pc, xAttrs);
|
||||
if (filteredXAttrs.isEmpty()) {
|
||||
return filteredXAttrs;
|
||||
}
|
||||
}
|
||||
checkOperation(OperationCategory.READ);
|
||||
readLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.READ);
|
||||
if (isPermissionEnabled) {
|
||||
checkPathAccess(pc, src, FsAction.READ);
|
||||
}
|
||||
List<XAttr> all = dir.getXAttrs(src);
|
||||
List<XAttr> filteredAll = XAttrPermissionFilter.
|
||||
filterXAttrsForApi(pc, all);
|
||||
if (getAll) {
|
||||
return filteredAll;
|
||||
} else {
|
||||
if (filteredAll == null || filteredAll.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
List<XAttr> toGet = Lists.newArrayListWithCapacity(filteredXAttrs.size());
|
||||
for (XAttr xAttr : filteredXAttrs) {
|
||||
for (XAttr a : filteredAll) {
|
||||
if (xAttr.getNameSpace() == a.getNameSpace()
|
||||
&& xAttr.getName().equals(a.getName())) {
|
||||
toGet.add(a);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return toGet;
|
||||
}
|
||||
} catch (AccessControlException e) {
|
||||
logAuditEvent(false, "getXAttrs", src);
|
||||
throw e;
|
||||
} finally {
|
||||
readUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
void removeXAttr(String src, XAttr xAttr) throws IOException {
|
||||
nnConf.checkXAttrsConfigFlag();
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
try {
|
||||
XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
|
||||
} catch (AccessControlException e) {
|
||||
logAuditEvent(false, "removeXAttr", src);
|
||||
throw e;
|
||||
}
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
|
||||
writeLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
checkNameNodeSafeMode("Cannot remove XAttr entry on " + src);
|
||||
src = FSDirectory.resolvePath(src, pathComponents, dir);
|
||||
if (isPermissionEnabled) {
|
||||
checkOwner(pc, src);
|
||||
checkPathAccess(pc, src, FsAction.WRITE);
|
||||
}
|
||||
|
||||
dir.removeXAttr(src, xAttr);
|
||||
resultingStat = getAuditFileInfo(src, false);
|
||||
} catch (AccessControlException e) {
|
||||
logAuditEvent(false, "removeXAttr", src);
|
||||
throw e;
|
||||
} finally {
|
||||
writeUnlock();
|
||||
}
|
||||
getEditLog().logSync();
|
||||
logAuditEvent(true, "removeXAttr", src, null, resultingStat);
|
||||
}
|
||||
|
||||
/**
|
||||
* Default AuditLogger implementation; used when no access logger is
|
||||
|
@ -7892,6 +8069,5 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
logger.addAppender(asyncAppender);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -177,6 +177,44 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
|||
nodeToUpdate.removeAclFeature();
|
||||
return nodeToUpdate;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param snapshotId
|
||||
* if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
|
||||
* from the given snapshot; otherwise, get the result from the
|
||||
* current inode.
|
||||
* @return XAttrFeature
|
||||
*/
|
||||
abstract XAttrFeature getXAttrFeature(int snapshotId);
|
||||
|
||||
@Override
|
||||
public final XAttrFeature getXAttrFeature() {
|
||||
return getXAttrFeature(Snapshot.CURRENT_STATE_ID);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set <code>XAttrFeature</code>
|
||||
*/
|
||||
abstract void addXAttrFeature(XAttrFeature xAttrFeature);
|
||||
|
||||
final INode addXAttrFeature(XAttrFeature xAttrFeature, int latestSnapshotId)
|
||||
throws QuotaExceededException {
|
||||
final INode nodeToUpdate = recordModification(latestSnapshotId);
|
||||
nodeToUpdate.addXAttrFeature(xAttrFeature);
|
||||
return nodeToUpdate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove <code>XAttrFeature</code>
|
||||
*/
|
||||
abstract void removeXAttrFeature();
|
||||
|
||||
final INode removeXAttrFeature(int lastestSnapshotId)
|
||||
throws QuotaExceededException {
|
||||
final INode nodeToUpdate = recordModification(lastestSnapshotId);
|
||||
nodeToUpdate.removeXAttrFeature();
|
||||
return nodeToUpdate;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return if the given snapshot id is {@link Snapshot#CURRENT_STATE_ID},
|
||||
|
|
|
@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeWithAdditionalFields.PermissionStatusFormat;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
|
||||
/**
|
||||
* The attributes of an inode.
|
||||
|
@ -50,6 +51,9 @@ public interface INodeAttributes {
|
|||
|
||||
/** @return the ACL feature. */
|
||||
public AclFeature getAclFeature();
|
||||
|
||||
/** @return the XAttrs feature. */
|
||||
public XAttrFeature getXAttrFeature();
|
||||
|
||||
/** @return the modification time. */
|
||||
public long getModificationTime();
|
||||
|
@ -64,14 +68,17 @@ public interface INodeAttributes {
|
|||
private final AclFeature aclFeature;
|
||||
private final long modificationTime;
|
||||
private final long accessTime;
|
||||
private XAttrFeature xAttrFeature;
|
||||
|
||||
SnapshotCopy(byte[] name, PermissionStatus permissions,
|
||||
AclFeature aclFeature, long modificationTime, long accessTime) {
|
||||
AclFeature aclFeature, long modificationTime, long accessTime,
|
||||
XAttrFeature xAttrFeature) {
|
||||
this.name = name;
|
||||
this.permission = PermissionStatusFormat.toLong(permissions);
|
||||
this.aclFeature = aclFeature;
|
||||
this.modificationTime = modificationTime;
|
||||
this.accessTime = accessTime;
|
||||
this.xAttrFeature = xAttrFeature;
|
||||
}
|
||||
|
||||
SnapshotCopy(INode inode) {
|
||||
|
@ -80,6 +87,7 @@ public interface INodeAttributes {
|
|||
this.aclFeature = inode.getAclFeature();
|
||||
this.modificationTime = inode.getModificationTime();
|
||||
this.accessTime = inode.getAccessTime();
|
||||
this.xAttrFeature = inode.getXAttrFeature();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -128,5 +136,10 @@ public interface INodeAttributes {
|
|||
public final long getAccessTime() {
|
||||
return accessTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final XAttrFeature getXAttrFeature() {
|
||||
return xAttrFeature;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
|
@ -35,8 +36,9 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
|
|||
public static class SnapshotCopy extends INodeAttributes.SnapshotCopy
|
||||
implements INodeDirectoryAttributes {
|
||||
public SnapshotCopy(byte[] name, PermissionStatus permissions,
|
||||
AclFeature aclFeature, long modificationTime) {
|
||||
super(name, permissions, aclFeature, modificationTime, 0L);
|
||||
AclFeature aclFeature, long modificationTime,
|
||||
XAttrFeature xAttrsFeature) {
|
||||
super(name, permissions, aclFeature, modificationTime, 0L, xAttrsFeature);
|
||||
}
|
||||
|
||||
public SnapshotCopy(INodeDirectory dir) {
|
||||
|
@ -63,8 +65,8 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
|
|||
|
||||
public CopyWithQuota(byte[] name, PermissionStatus permissions,
|
||||
AclFeature aclFeature, long modificationTime, long nsQuota,
|
||||
long dsQuota) {
|
||||
super(name, permissions, aclFeature, modificationTime);
|
||||
long dsQuota, XAttrFeature xAttrsFeature) {
|
||||
super(name, permissions, aclFeature, modificationTime, xAttrsFeature);
|
||||
this.nsQuota = nsQuota;
|
||||
this.dsQuota = dsQuota;
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
|
||||
/**
|
||||
* The attributes of a file.
|
||||
|
@ -42,8 +43,9 @@ public interface INodeFileAttributes extends INodeAttributes {
|
|||
|
||||
public SnapshotCopy(byte[] name, PermissionStatus permissions,
|
||||
AclFeature aclFeature, long modificationTime, long accessTime,
|
||||
short replication, long preferredBlockSize) {
|
||||
super(name, permissions, aclFeature, modificationTime, accessTime);
|
||||
short replication, long preferredBlockSize, XAttrFeature xAttrsFeature) {
|
||||
super(name, permissions, aclFeature, modificationTime, accessTime,
|
||||
xAttrsFeature);
|
||||
|
||||
final long h = HeaderFormat.combineReplication(0L, replication);
|
||||
header = HeaderFormat.combinePreferredBlockSize(h, preferredBlockSize);
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
|
|||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
|
@ -228,6 +229,21 @@ public abstract class INodeReference extends INode {
|
|||
final void removeAclFeature() {
|
||||
referred.removeAclFeature();
|
||||
}
|
||||
|
||||
@Override
|
||||
final XAttrFeature getXAttrFeature(int snapshotId) {
|
||||
return referred.getXAttrFeature(snapshotId);
|
||||
}
|
||||
|
||||
@Override
|
||||
final void addXAttrFeature(XAttrFeature xAttrFeature) {
|
||||
referred.addXAttrFeature(xAttrFeature);
|
||||
}
|
||||
|
||||
@Override
|
||||
final void removeXAttrFeature() {
|
||||
referred.removeXAttrFeature();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final short getFsPermissionShort() {
|
||||
|
|
|
@ -25,6 +25,8 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
|
|||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
|
||||
/**
|
||||
* An {@link INode} representing a symbolic link.
|
||||
|
@ -110,4 +112,38 @@ public class INodeSymlink extends INodeWithAdditionalFields {
|
|||
super.dumpTreeRecursively(out, prefix, snapshot);
|
||||
out.println();
|
||||
}
|
||||
|
||||
/**
|
||||
* getAclFeature is not overridden because it is needed for resolving
|
||||
* symlinks.
|
||||
@Override
|
||||
final AclFeature getAclFeature(int snapshotId) {
|
||||
throw new UnsupportedOperationException("ACLs are not supported on symlinks");
|
||||
}
|
||||
*/
|
||||
|
||||
@Override
|
||||
public void removeAclFeature() {
|
||||
throw new UnsupportedOperationException("ACLs are not supported on symlinks");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addAclFeature(AclFeature f) {
|
||||
throw new UnsupportedOperationException("ACLs are not supported on symlinks");
|
||||
}
|
||||
|
||||
@Override
|
||||
final XAttrFeature getXAttrFeature(int snapshotId) {
|
||||
throw new UnsupportedOperationException("XAttrs are not supported on symlinks");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttrFeature() {
|
||||
throw new UnsupportedOperationException("XAttrs are not supported on symlinks");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addXAttrFeature(XAttrFeature f) {
|
||||
throw new UnsupportedOperationException("XAttrs are not supported on symlinks");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
|
|||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode.Feature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -340,6 +341,30 @@ public abstract class INodeWithAdditionalFields extends INode
|
|||
|
||||
addFeature(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
XAttrFeature getXAttrFeature(int snapshotId) {
|
||||
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
|
||||
return getSnapshotINode(snapshotId).getXAttrFeature();
|
||||
}
|
||||
|
||||
return getFeature(XAttrFeature.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttrFeature() {
|
||||
XAttrFeature f = getXAttrFeature();
|
||||
Preconditions.checkNotNull(f);
|
||||
removeFeature(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addXAttrFeature(XAttrFeature f) {
|
||||
XAttrFeature f1 = getXAttrFeature();
|
||||
Preconditions.checkState(f1 == null, "Duplicated XAttrFeature");
|
||||
|
||||
addFeature(f);
|
||||
}
|
||||
|
||||
public final Feature[] getFeatures() {
|
||||
return features;
|
||||
|
|
|
@ -0,0 +1,104 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.AclException;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* This class is a common place for NN configuration.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
final class NNConf {
|
||||
/**
|
||||
* Support for ACLs is controlled by a configuration flag. If the
|
||||
* configuration flag is false, then the NameNode will reject all
|
||||
* ACL-related operations.
|
||||
*/
|
||||
private final boolean aclsEnabled;
|
||||
|
||||
/**
|
||||
* Support for XAttrs is controlled by a configuration flag. If the
|
||||
* configuration flag is false, then the NameNode will reject all
|
||||
* XAttr-related operations.
|
||||
*/
|
||||
private final boolean xattrsEnabled;
|
||||
|
||||
/**
|
||||
* Maximum size of a single name-value extended attribute.
|
||||
*/
|
||||
final int xattrMaxSize;
|
||||
|
||||
/**
|
||||
* Creates a new NNConf from configuration.
|
||||
*
|
||||
* @param conf Configuration to check
|
||||
*/
|
||||
public NNConf(Configuration conf) {
|
||||
aclsEnabled = conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_DEFAULT);
|
||||
LogFactory.getLog(NNConf.class).info("ACLs enabled? " + aclsEnabled);
|
||||
xattrsEnabled = conf.getBoolean(
|
||||
DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_DEFAULT);
|
||||
LogFactory.getLog(NNConf.class).info("XAttrs enabled? " + xattrsEnabled);
|
||||
xattrMaxSize = conf.getInt(
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
|
||||
Preconditions.checkArgument(xattrMaxSize >= 0,
|
||||
"Cannot set a negative value for the maximum size of an xattr (%s).",
|
||||
DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY);
|
||||
final String unlimited = xattrMaxSize == 0 ? " (unlimited)" : "";
|
||||
LogFactory.getLog(NNConf.class).info(
|
||||
"Maximum size of an xattr: " + xattrMaxSize + unlimited);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the flag on behalf of an ACL API call.
|
||||
*
|
||||
* @throws AclException if ACLs are disabled
|
||||
*/
|
||||
public void checkAclsConfigFlag() throws AclException {
|
||||
if (!aclsEnabled) {
|
||||
throw new AclException(String.format(
|
||||
"The ACL operation has been rejected. "
|
||||
+ "Support for ACLs has been disabled by setting %s to false.",
|
||||
DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the flag on behalf of an XAttr API call.
|
||||
* @throws IOException if XAttrs are disabled
|
||||
*/
|
||||
public void checkXAttrsConfigFlag() throws IOException {
|
||||
if (!xattrsEnabled) {
|
||||
throw new IOException(String.format(
|
||||
"The XAttr operation has been rejected. "
|
||||
+ "Support for XAttrs has been disabled by setting %s to false.",
|
||||
DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -64,7 +64,8 @@ public class NameNodeLayoutVersion {
|
|||
*/
|
||||
public static enum Feature implements LayoutFeature {
|
||||
ROLLING_UPGRADE(-55, -53, "Support rolling upgrade", false),
|
||||
EDITLOG_LENGTH(-56, "Add length field to every edit log op");
|
||||
EDITLOG_LENGTH(-56, "Add length field to every edit log op"),
|
||||
XATTRS(-57, "Extended attributes");
|
||||
|
||||
private final FeatureInfo info;
|
||||
|
||||
|
|
|
@ -49,6 +49,8 @@ import org.apache.hadoop.fs.Options;
|
|||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -1381,5 +1383,22 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
public AclStatus getAclStatus(String src) throws IOException {
|
||||
return namesystem.getAclStatus(src);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
|
||||
throws IOException {
|
||||
namesystem.setXAttr(src, xAttr, flag);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
|
||||
throws IOException {
|
||||
return namesystem.getXAttrs(src, xAttrs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(String src, XAttr xAttr) throws IOException {
|
||||
namesystem.removeXAttr(src, xAttr);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
/**
|
||||
* Feature for extended attributes.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class XAttrFeature implements INode.Feature {
|
||||
public static final ImmutableList<XAttr> EMPTY_ENTRY_LIST =
|
||||
ImmutableList.of();
|
||||
|
||||
private final ImmutableList<XAttr> xAttrs;
|
||||
|
||||
public XAttrFeature(ImmutableList<XAttr> xAttrs) {
|
||||
this.xAttrs = xAttrs;
|
||||
}
|
||||
|
||||
public ImmutableList<XAttr> getXAttrs() {
|
||||
return xAttrs;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
/**
|
||||
* There are four types of extended attributes <XAttr> defined by the
|
||||
* following namespaces:
|
||||
* <br>
|
||||
* USER - extended user attributes: these can be assigned to files and
|
||||
* directories to store arbitrary additional information. The access
|
||||
* permissions for user attributes are defined by the file permission
|
||||
* bits.
|
||||
* <br>
|
||||
* TRUSTED - trusted extended attributes: these are visible/accessible
|
||||
* only to/by the super user.
|
||||
* <br>
|
||||
* SECURITY - extended security attributes: these are used by the HDFS
|
||||
* core for security purposes and are not available through admin/user
|
||||
* API.
|
||||
* <br>
|
||||
* SYSTEM - extended system attributes: these are used by the HDFS
|
||||
* core and are not available through admin/user API.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class XAttrPermissionFilter {
|
||||
|
||||
static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr)
|
||||
throws AccessControlException {
|
||||
if (xAttr.getNameSpace() == XAttr.NameSpace.USER ||
|
||||
(xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED &&
|
||||
pc.isSuperUser())) {
|
||||
return;
|
||||
}
|
||||
throw new AccessControlException("User doesn't have permission for xattr: "
|
||||
+ XAttrHelper.getPrefixName(xAttr));
|
||||
}
|
||||
|
||||
static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc,
|
||||
List<XAttr> xAttrs) {
|
||||
assert xAttrs != null : "xAttrs can not be null";
|
||||
if (xAttrs == null || xAttrs.isEmpty()) {
|
||||
return xAttrs;
|
||||
}
|
||||
|
||||
List<XAttr> filteredXAttrs = Lists.newArrayListWithCapacity(xAttrs.size());
|
||||
for (XAttr xAttr : xAttrs) {
|
||||
if (xAttr.getNameSpace() == XAttr.NameSpace.USER) {
|
||||
filteredXAttrs.add(xAttr);
|
||||
} else if (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED &&
|
||||
pc.isSuperUser()) {
|
||||
filteredXAttrs.add(xAttr);
|
||||
}
|
||||
}
|
||||
|
||||
return filteredXAttrs;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
/**
|
||||
* XAttrStorage is used to read and set xattrs for an inode.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class XAttrStorage {
|
||||
|
||||
/**
|
||||
* Reads the existing extended attributes of an inode. If the
|
||||
* inode does not have an <code>XAttr</code>, then this method
|
||||
* returns an empty list.
|
||||
* @param inode INode to read
|
||||
* @param snapshotId
|
||||
* @return List<XAttr> <code>XAttr</code> list.
|
||||
*/
|
||||
public static List<XAttr> readINodeXAttrs(INode inode, int snapshotId) {
|
||||
XAttrFeature f = inode.getXAttrFeature(snapshotId);
|
||||
return f == null ? ImmutableList.<XAttr> of() : f.getXAttrs();
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads the existing extended attributes of an inode.
|
||||
* @param inode INode to read.
|
||||
* @return List<XAttr> <code>XAttr</code> list.
|
||||
*/
|
||||
public static List<XAttr> readINodeXAttrs(INode inode) {
|
||||
XAttrFeature f = inode.getXAttrFeature();
|
||||
return f == null ? ImmutableList.<XAttr> of() : f.getXAttrs();
|
||||
}
|
||||
|
||||
/**
|
||||
* Update xattrs of inode.
|
||||
* @param inode INode to update
|
||||
* @param xAttrs to update xAttrs.
|
||||
* @param snapshotId id of the latest snapshot of the inode
|
||||
*/
|
||||
public static void updateINodeXAttrs(INode inode,
|
||||
List<XAttr> xAttrs, int snapshotId) throws QuotaExceededException {
|
||||
if (xAttrs == null || xAttrs.isEmpty()) {
|
||||
if (inode.getXAttrFeature() != null) {
|
||||
inode.removeXAttrFeature(snapshotId);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
ImmutableList<XAttr> newXAttrs = ImmutableList.copyOf(xAttrs);
|
||||
if (inode.getXAttrFeature() != null) {
|
||||
inode.removeXAttrFeature(snapshotId);
|
||||
}
|
||||
inode.addXAttrFeature(new XAttrFeature(newXAttrs), snapshotId);
|
||||
}
|
||||
}
|
|
@ -65,6 +65,7 @@ import org.apache.hadoop.hdfs.server.namenode.SaveNamespaceContext;
|
|||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -215,11 +216,16 @@ public class FSImageFormatPBSnapshot {
|
|||
acl = new AclFeature(FSImageFormatPBINode.Loader.loadAclEntries(
|
||||
fileInPb.getAcl(), state.getStringTable()));
|
||||
}
|
||||
XAttrFeature xAttrs = null;
|
||||
if (fileInPb.hasXAttrs()) {
|
||||
xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.loadXAttrs(
|
||||
fileInPb.getXAttrs(), state.getStringTable()));
|
||||
}
|
||||
|
||||
copy = new INodeFileAttributes.SnapshotCopy(pbf.getName()
|
||||
.toByteArray(), permission, acl, fileInPb.getModificationTime(),
|
||||
fileInPb.getAccessTime(), (short) fileInPb.getReplication(),
|
||||
fileInPb.getPreferredBlockSize());
|
||||
fileInPb.getPreferredBlockSize(), xAttrs);
|
||||
}
|
||||
|
||||
FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null,
|
||||
|
@ -310,16 +316,21 @@ public class FSImageFormatPBSnapshot {
|
|||
acl = new AclFeature(FSImageFormatPBINode.Loader.loadAclEntries(
|
||||
dirCopyInPb.getAcl(), state.getStringTable()));
|
||||
}
|
||||
XAttrFeature xAttrs = null;
|
||||
if (dirCopyInPb.hasXAttrs()) {
|
||||
xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.loadXAttrs(
|
||||
dirCopyInPb.getXAttrs(), state.getStringTable()));
|
||||
}
|
||||
|
||||
long modTime = dirCopyInPb.getModificationTime();
|
||||
boolean noQuota = dirCopyInPb.getNsQuota() == -1
|
||||
&& dirCopyInPb.getDsQuota() == -1;
|
||||
|
||||
copy = noQuota ? new INodeDirectoryAttributes.SnapshotCopy(name,
|
||||
permission, acl, modTime)
|
||||
permission, acl, modTime, xAttrs)
|
||||
: new INodeDirectoryAttributes.CopyWithQuota(name, permission,
|
||||
acl, modTime, dirCopyInPb.getNsQuota(),
|
||||
dirCopyInPb.getDsQuota());
|
||||
dirCopyInPb.getDsQuota(), xAttrs);
|
||||
}
|
||||
// load created list
|
||||
List<INode> clist = loadCreatedList(in, dir,
|
||||
|
|
|
@ -34,8 +34,10 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
|
|||
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
|
@ -144,9 +146,20 @@ public class Snapshot implements Comparable<byte[]> {
|
|||
/** The root directory of the snapshot. */
|
||||
static public class Root extends INodeDirectory {
|
||||
Root(INodeDirectory other) {
|
||||
// Always preserve ACL.
|
||||
// Always preserve ACL, XAttr.
|
||||
super(other, false, Lists.newArrayList(
|
||||
Iterables.filter(Arrays.asList(other.getFeatures()), AclFeature.class))
|
||||
Iterables.filter(Arrays.asList(other.getFeatures()), new Predicate<Feature>() {
|
||||
|
||||
@Override
|
||||
public boolean apply(Feature input) {
|
||||
if (AclFeature.class.isInstance(input)
|
||||
|| XAttrFeature.class.isInstance(input)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}))
|
||||
.toArray(new Feature[0]));
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.net.URISyntaxException;
|
|||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
|
||||
import javax.servlet.ServletContext;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
|
@ -53,8 +54,10 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.hdfs.StorageType;
|
||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
|
@ -88,6 +91,7 @@ import org.apache.hadoop.hdfs.web.resources.LengthParam;
|
|||
import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.OffsetParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.OldSnapshotNameParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.OverwriteParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.OwnerParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.Param;
|
||||
|
@ -98,9 +102,14 @@ import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
|
|||
import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.RenewerParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.SnapshotNameParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.UserParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.XAttrNameParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
|
||||
|
@ -341,12 +350,23 @@ public class NamenodeWebHdfsMethods {
|
|||
@QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT)
|
||||
final TokenArgumentParam delegationTokenArgument,
|
||||
@QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT)
|
||||
final AclPermissionParam aclPermission
|
||||
final AclPermissionParam aclPermission,
|
||||
@QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT)
|
||||
final XAttrNameParam xattrName,
|
||||
@QueryParam(XAttrValueParam.NAME) @DefaultValue(XAttrValueParam.DEFAULT)
|
||||
final XAttrValueParam xattrValue,
|
||||
@QueryParam(XAttrSetFlagParam.NAME) @DefaultValue(XAttrSetFlagParam.DEFAULT)
|
||||
final XAttrSetFlagParam xattrSetFlag,
|
||||
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
|
||||
final SnapshotNameParam snapshotName,
|
||||
@QueryParam(OldSnapshotNameParam.NAME) @DefaultValue(OldSnapshotNameParam.DEFAULT)
|
||||
final OldSnapshotNameParam oldSnapshotName
|
||||
)throws IOException, InterruptedException {
|
||||
return put(ugi, delegation, username, doAsUser, ROOT, op, destination,
|
||||
owner, group, permission, overwrite, bufferSize, replication,
|
||||
blockSize, modificationTime, accessTime, renameOptions, createParent,
|
||||
delegationTokenArgument,aclPermission);
|
||||
delegationTokenArgument, aclPermission, xattrName, xattrValue,
|
||||
xattrSetFlag, snapshotName, oldSnapshotName);
|
||||
}
|
||||
|
||||
/** Handle HTTP PUT request. */
|
||||
|
@ -392,12 +412,24 @@ public class NamenodeWebHdfsMethods {
|
|||
@QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT)
|
||||
final TokenArgumentParam delegationTokenArgument,
|
||||
@QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT)
|
||||
final AclPermissionParam aclPermission
|
||||
final AclPermissionParam aclPermission,
|
||||
@QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT)
|
||||
final XAttrNameParam xattrName,
|
||||
@QueryParam(XAttrValueParam.NAME) @DefaultValue(XAttrValueParam.DEFAULT)
|
||||
final XAttrValueParam xattrValue,
|
||||
@QueryParam(XAttrSetFlagParam.NAME) @DefaultValue(XAttrSetFlagParam.DEFAULT)
|
||||
final XAttrSetFlagParam xattrSetFlag,
|
||||
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
|
||||
final SnapshotNameParam snapshotName,
|
||||
@QueryParam(OldSnapshotNameParam.NAME) @DefaultValue(OldSnapshotNameParam.DEFAULT)
|
||||
final OldSnapshotNameParam oldSnapshotName
|
||||
) throws IOException, InterruptedException {
|
||||
|
||||
init(ugi, delegation, username, doAsUser, path, op, destination, owner,
|
||||
group, permission, overwrite, bufferSize, replication, blockSize,
|
||||
modificationTime, accessTime, renameOptions, delegationTokenArgument,aclPermission);
|
||||
modificationTime, accessTime, renameOptions, delegationTokenArgument,
|
||||
aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName,
|
||||
oldSnapshotName);
|
||||
|
||||
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
|
||||
@Override
|
||||
|
@ -407,7 +439,8 @@ public class NamenodeWebHdfsMethods {
|
|||
path.getAbsolutePath(), op, destination, owner, group,
|
||||
permission, overwrite, bufferSize, replication, blockSize,
|
||||
modificationTime, accessTime, renameOptions, createParent,
|
||||
delegationTokenArgument,aclPermission);
|
||||
delegationTokenArgument, aclPermission, xattrName, xattrValue,
|
||||
xattrSetFlag, snapshotName, oldSnapshotName);
|
||||
} finally {
|
||||
reset();
|
||||
}
|
||||
|
@ -435,7 +468,12 @@ public class NamenodeWebHdfsMethods {
|
|||
final RenameOptionSetParam renameOptions,
|
||||
final CreateParentParam createParent,
|
||||
final TokenArgumentParam delegationTokenArgument,
|
||||
final AclPermissionParam aclPermission
|
||||
final AclPermissionParam aclPermission,
|
||||
final XAttrNameParam xattrName,
|
||||
final XAttrValueParam xattrValue,
|
||||
final XAttrSetFlagParam xattrSetFlag,
|
||||
final SnapshotNameParam snapshotName,
|
||||
final OldSnapshotNameParam oldSnapshotName
|
||||
) throws IOException, URISyntaxException {
|
||||
|
||||
final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
|
||||
|
@ -535,6 +573,28 @@ public class NamenodeWebHdfsMethods {
|
|||
np.setAcl(fullpath, aclPermission.getAclPermission(true));
|
||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||
}
|
||||
case SETXATTR: {
|
||||
np.setXAttr(
|
||||
fullpath,
|
||||
XAttrHelper.buildXAttr(xattrName.getXAttrName(),
|
||||
xattrValue.getXAttrValue()), xattrSetFlag.getFlag());
|
||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||
}
|
||||
case REMOVEXATTR: {
|
||||
np.removeXAttr(fullpath, XAttrHelper.buildXAttr(xattrName.getXAttrName()));
|
||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||
}
|
||||
case CREATESNAPSHOT: {
|
||||
String snapshotPath = np.createSnapshot(fullpath, snapshotName.getValue());
|
||||
final String js = JsonUtil.toJsonString(
|
||||
org.apache.hadoop.fs.Path.class.getSimpleName(), snapshotPath);
|
||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||
}
|
||||
case RENAMESNAPSHOT: {
|
||||
np.renameSnapshot(fullpath, oldSnapshotName.getValue(),
|
||||
snapshotName.getValue());
|
||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||
}
|
||||
default:
|
||||
throw new UnsupportedOperationException(op + " is not supported");
|
||||
}
|
||||
|
@ -650,10 +710,14 @@ public class NamenodeWebHdfsMethods {
|
|||
@QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
|
||||
final RenewerParam renewer,
|
||||
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
|
||||
final BufferSizeParam bufferSize
|
||||
final BufferSizeParam bufferSize,
|
||||
@QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT)
|
||||
final XAttrNameParam xattrName,
|
||||
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
|
||||
final XAttrEncodingParam xattrEncoding
|
||||
) throws IOException, InterruptedException {
|
||||
return get(ugi, delegation, username, doAsUser, ROOT, op,
|
||||
offset, length, renewer, bufferSize);
|
||||
return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
|
||||
renewer, bufferSize, xattrName, xattrEncoding);
|
||||
}
|
||||
|
||||
/** Handle HTTP GET request. */
|
||||
|
@ -678,18 +742,23 @@ public class NamenodeWebHdfsMethods {
|
|||
@QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
|
||||
final RenewerParam renewer,
|
||||
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
|
||||
final BufferSizeParam bufferSize
|
||||
final BufferSizeParam bufferSize,
|
||||
@QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT)
|
||||
final XAttrNameParam xattrName,
|
||||
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
|
||||
final XAttrEncodingParam xattrEncoding
|
||||
) throws IOException, InterruptedException {
|
||||
|
||||
init(ugi, delegation, username, doAsUser, path, op,
|
||||
offset, length, renewer, bufferSize);
|
||||
init(ugi, delegation, username, doAsUser, path, op, offset, length,
|
||||
renewer, bufferSize, xattrName, xattrEncoding);
|
||||
|
||||
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
|
||||
@Override
|
||||
public Response run() throws IOException, URISyntaxException {
|
||||
try {
|
||||
return get(ugi, delegation, username, doAsUser,
|
||||
path.getAbsolutePath(), op, offset, length, renewer, bufferSize);
|
||||
path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
|
||||
xattrName, xattrEncoding);
|
||||
} finally {
|
||||
reset();
|
||||
}
|
||||
|
@ -707,7 +776,9 @@ public class NamenodeWebHdfsMethods {
|
|||
final OffsetParam offset,
|
||||
final LengthParam length,
|
||||
final RenewerParam renewer,
|
||||
final BufferSizeParam bufferSize
|
||||
final BufferSizeParam bufferSize,
|
||||
final XAttrNameParam xattrName,
|
||||
final XAttrEncodingParam xattrEncoding
|
||||
) throws IOException, URISyntaxException {
|
||||
final NameNode namenode = (NameNode)context.getAttribute("name.node");
|
||||
final NamenodeProtocols np = getRPCServer(namenode);
|
||||
|
@ -782,6 +853,19 @@ public class NamenodeWebHdfsMethods {
|
|||
final String js = JsonUtil.toJsonString(status);
|
||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||
}
|
||||
case GETXATTR: {
|
||||
XAttr xAttr = XAttrHelper.getFirstXAttr(np.getXAttrs(fullpath,
|
||||
XAttrHelper.buildXAttrAsList(xattrName.getXAttrName())));
|
||||
final String js = JsonUtil.toJsonString(xAttr,
|
||||
xattrEncoding.getEncoding());
|
||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||
}
|
||||
case GETXATTRS: {
|
||||
List<XAttr> xAttrs = np.getXAttrs(fullpath, null);
|
||||
final String js = JsonUtil.toJsonString(xAttrs,
|
||||
xattrEncoding.getEncoding());
|
||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||
}
|
||||
default:
|
||||
throw new UnsupportedOperationException(op + " is not supported");
|
||||
}
|
||||
|
@ -865,9 +949,12 @@ public class NamenodeWebHdfsMethods {
|
|||
@QueryParam(DeleteOpParam.NAME) @DefaultValue(DeleteOpParam.DEFAULT)
|
||||
final DeleteOpParam op,
|
||||
@QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT)
|
||||
final RecursiveParam recursive
|
||||
final RecursiveParam recursive,
|
||||
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
|
||||
final SnapshotNameParam snapshotName
|
||||
) throws IOException, InterruptedException {
|
||||
return delete(ugi, delegation, username, doAsUser, ROOT, op, recursive);
|
||||
return delete(ugi, delegation, username, doAsUser, ROOT, op, recursive,
|
||||
snapshotName);
|
||||
}
|
||||
|
||||
/** Handle HTTP DELETE request. */
|
||||
|
@ -886,17 +973,19 @@ public class NamenodeWebHdfsMethods {
|
|||
@QueryParam(DeleteOpParam.NAME) @DefaultValue(DeleteOpParam.DEFAULT)
|
||||
final DeleteOpParam op,
|
||||
@QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT)
|
||||
final RecursiveParam recursive
|
||||
final RecursiveParam recursive,
|
||||
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
|
||||
final SnapshotNameParam snapshotName
|
||||
) throws IOException, InterruptedException {
|
||||
|
||||
init(ugi, delegation, username, doAsUser, path, op, recursive);
|
||||
init(ugi, delegation, username, doAsUser, path, op, recursive, snapshotName);
|
||||
|
||||
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
|
||||
@Override
|
||||
public Response run() throws IOException {
|
||||
try {
|
||||
return delete(ugi, delegation, username, doAsUser,
|
||||
path.getAbsolutePath(), op, recursive);
|
||||
path.getAbsolutePath(), op, recursive, snapshotName);
|
||||
} finally {
|
||||
reset();
|
||||
}
|
||||
|
@ -911,17 +1000,22 @@ public class NamenodeWebHdfsMethods {
|
|||
final DoAsParam doAsUser,
|
||||
final String fullpath,
|
||||
final DeleteOpParam op,
|
||||
final RecursiveParam recursive
|
||||
final RecursiveParam recursive,
|
||||
final SnapshotNameParam snapshotName
|
||||
) throws IOException {
|
||||
final NameNode namenode = (NameNode)context.getAttribute("name.node");
|
||||
final NamenodeProtocols np = getRPCServer(namenode);
|
||||
|
||||
switch(op.getValue()) {
|
||||
case DELETE:
|
||||
{
|
||||
final boolean b = getRPCServer(namenode).delete(fullpath, recursive.getValue());
|
||||
case DELETE: {
|
||||
final boolean b = np.delete(fullpath, recursive.getValue());
|
||||
final String js = JsonUtil.toJsonString("boolean", b);
|
||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||
}
|
||||
case DELETESNAPSHOT: {
|
||||
np.deleteSnapshot(fullpath, snapshotName.getValue());
|
||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||
}
|
||||
default:
|
||||
throw new UnsupportedOperationException(op + " is not supported");
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.apache.hadoop.fs.permission.AclEntry;
|
|||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||
import org.apache.hadoop.hdfs.protocol.*;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
|
@ -34,6 +35,8 @@ import org.apache.hadoop.util.DataChecksum;
|
|||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
|
@ -661,4 +664,125 @@ public class JsonUtil {
|
|||
aclStatusBuilder.addEntries(aclEntryList);
|
||||
return aclStatusBuilder.build();
|
||||
}
|
||||
|
||||
public static String toJsonString(final XAttr xAttr,
|
||||
final XAttrCodec encoding) throws IOException {
|
||||
if (xAttr == null) {
|
||||
return "{}";
|
||||
}
|
||||
final Map<String, Object> m = new TreeMap<String, Object>();
|
||||
m.put("name", XAttrHelper.getPrefixName(xAttr));
|
||||
m.put("value", xAttr.getValue() != null ?
|
||||
XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null);
|
||||
final Map<String, Map<String, Object>> finalMap =
|
||||
new TreeMap<String, Map<String, Object>>();
|
||||
finalMap.put(XAttr.class.getSimpleName(), m);
|
||||
return JSON.toString(finalMap);
|
||||
}
|
||||
|
||||
private static Map<String, Object> toJsonMap(final XAttr xAttr,
|
||||
final XAttrCodec encoding) throws IOException {
|
||||
if (xAttr == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final Map<String, Object> m = new TreeMap<String, Object>();
|
||||
m.put("name", XAttrHelper.getPrefixName(xAttr));
|
||||
m.put("value", xAttr.getValue() != null ?
|
||||
XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null);
|
||||
return m;
|
||||
}
|
||||
|
||||
private static Object[] toJsonArray(final List<XAttr> array,
|
||||
final XAttrCodec encoding) throws IOException {
|
||||
if (array == null) {
|
||||
return null;
|
||||
} else if (array.size() == 0) {
|
||||
return EMPTY_OBJECT_ARRAY;
|
||||
} else {
|
||||
final Object[] a = new Object[array.size()];
|
||||
for(int i = 0; i < array.size(); i++) {
|
||||
a[i] = toJsonMap(array.get(i), encoding);
|
||||
}
|
||||
return a;
|
||||
}
|
||||
}
|
||||
|
||||
public static String toJsonString(final List<XAttr> xAttrs,
|
||||
final XAttrCodec encoding) throws IOException {
|
||||
final Map<String, Object> finalMap = new TreeMap<String, Object>();
|
||||
finalMap.put("XAttrs", toJsonArray(xAttrs, encoding));
|
||||
return JSON.toString(finalMap);
|
||||
}
|
||||
|
||||
public static XAttr toXAttr(final Map<?, ?> json) throws IOException {
|
||||
if (json == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Map<?, ?> m = (Map<?, ?>) json.get(XAttr.class.getSimpleName());
|
||||
if (m == null) {
|
||||
return null;
|
||||
}
|
||||
String name = (String) m.get("name");
|
||||
String value = (String) m.get("value");
|
||||
return XAttrHelper.buildXAttr(name, decodeXAttrValue(value));
|
||||
}
|
||||
|
||||
public static Map<String, byte[]> toXAttrs(final Map<?, ?> json)
|
||||
throws IOException {
|
||||
if (json == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return toXAttrMap((Object[])json.get("XAttrs"));
|
||||
}
|
||||
|
||||
public static Map<String, byte[]> toXAttrs(final Map<?, ?> json,
|
||||
List<String> names) throws IOException {
|
||||
if (json == null || names == null) {
|
||||
return null;
|
||||
}
|
||||
if (names.isEmpty()) {
|
||||
return Maps.newHashMap();
|
||||
}
|
||||
Map<String, byte[]> xAttrs = toXAttrs(json);
|
||||
if (xAttrs == null || xAttrs.isEmpty()) {
|
||||
return xAttrs;
|
||||
}
|
||||
|
||||
Map<String, byte[]> result = Maps.newHashMap();
|
||||
for (String name : names) {
|
||||
if (xAttrs.containsKey(name)) {
|
||||
result.put(name, xAttrs.get(name));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private static Map<String, byte[]> toXAttrMap(final Object[] objects)
|
||||
throws IOException {
|
||||
if (objects == null) {
|
||||
return null;
|
||||
} else if (objects.length == 0) {
|
||||
return Maps.newHashMap();
|
||||
} else {
|
||||
final Map<String, byte[]> xAttrs = Maps.newHashMap();
|
||||
for(int i = 0; i < objects.length; i++) {
|
||||
Map<?, ?> m = (Map<?, ?>) objects[i];
|
||||
String name = (String) m.get("name");
|
||||
String value = (String) m.get("value");
|
||||
xAttrs.put(name, decodeXAttrValue(value));
|
||||
}
|
||||
return xAttrs;
|
||||
}
|
||||
}
|
||||
|
||||
private static byte[] decodeXAttrValue(String value) throws IOException {
|
||||
if (value != null) {
|
||||
return XAttrCodec.decodeValue(value);
|
||||
} else {
|
||||
return new byte[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import java.net.URI;
|
|||
import java.net.URL;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.StringTokenizer;
|
||||
|
@ -49,6 +50,9 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrCodec;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -813,6 +817,66 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
new RenameOptionSetParam(options)
|
||||
).run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path p, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
statistics.incrementWriteOps(1);
|
||||
final HttpOpParam.Op op = PutOpParam.Op.SETXATTR;
|
||||
if (value != null) {
|
||||
new FsPathRunner(op, p, new XAttrNameParam(name), new XAttrValueParam(
|
||||
XAttrCodec.encodeValue(value, XAttrCodec.HEX)),
|
||||
new XAttrSetFlagParam(flag)).run();
|
||||
} else {
|
||||
new FsPathRunner(op, p, new XAttrNameParam(name),
|
||||
new XAttrSetFlagParam(flag)).run();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getXAttr(Path p, String name) throws IOException {
|
||||
final HttpOpParam.Op op = GetOpParam.Op.GETXATTR;
|
||||
return new FsPathResponseRunner<byte[]>(op, p, new XAttrNameParam(name),
|
||||
new XAttrEncodingParam(XAttrCodec.HEX)) {
|
||||
@Override
|
||||
byte[] decodeResponse(Map<?, ?> json) throws IOException {
|
||||
XAttr xAttr = JsonUtil.toXAttr(json);
|
||||
return xAttr != null ? xAttr.getValue() : null;
|
||||
}
|
||||
}.run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path p) throws IOException {
|
||||
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
|
||||
return new FsPathResponseRunner<Map<String, byte[]>>(op, p,
|
||||
new XAttrEncodingParam(XAttrCodec.HEX)) {
|
||||
@Override
|
||||
Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
|
||||
return JsonUtil.toXAttrs(json);
|
||||
}
|
||||
}.run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path p, final List<String> names)
|
||||
throws IOException {
|
||||
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
|
||||
return new FsPathResponseRunner<Map<String, byte[]>>(op, p,
|
||||
new XAttrEncodingParam(XAttrCodec.HEX)) {
|
||||
@Override
|
||||
Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
|
||||
return JsonUtil.toXAttrs(json, names);
|
||||
}
|
||||
}.run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(Path p, String name) throws IOException {
|
||||
statistics.incrementWriteOps(1);
|
||||
final HttpOpParam.Op op = PutOpParam.Op.REMOVEXATTR;
|
||||
new FsPathRunner(op, p, new XAttrNameParam(name)).run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setOwner(final Path p, final String owner, final String group
|
||||
|
@ -874,6 +938,38 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
new FsPathRunner(op, p, new AclPermissionParam(aclSpec)).run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Path createSnapshot(final Path path, final String snapshotName)
|
||||
throws IOException {
|
||||
statistics.incrementWriteOps(1);
|
||||
final HttpOpParam.Op op = PutOpParam.Op.CREATESNAPSHOT;
|
||||
Path spath = new FsPathResponseRunner<Path>(op, path,
|
||||
new SnapshotNameParam(snapshotName)) {
|
||||
@Override
|
||||
Path decodeResponse(Map<?,?> json) {
|
||||
return new Path((String) json.get(Path.class.getSimpleName()));
|
||||
}
|
||||
}.run();
|
||||
return spath;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteSnapshot(final Path path, final String snapshotName)
|
||||
throws IOException {
|
||||
statistics.incrementWriteOps(1);
|
||||
final HttpOpParam.Op op = DeleteOpParam.Op.DELETESNAPSHOT;
|
||||
new FsPathRunner(op, path, new SnapshotNameParam(snapshotName)).run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void renameSnapshot(final Path path, final String snapshotOldName,
|
||||
final String snapshotNewName) throws IOException {
|
||||
statistics.incrementWriteOps(1);
|
||||
final HttpOpParam.Op op = PutOpParam.Op.RENAMESNAPSHOT;
|
||||
new FsPathRunner(op, path, new OldSnapshotNameParam(snapshotOldName),
|
||||
new SnapshotNameParam(snapshotNewName)).run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean setReplication(final Path p, final short replication
|
||||
) throws IOException {
|
||||
|
|
|
@ -24,6 +24,7 @@ public class DeleteOpParam extends HttpOpParam<DeleteOpParam.Op> {
|
|||
/** Delete operations. */
|
||||
public static enum Op implements HttpOpParam.Op {
|
||||
DELETE(HttpURLConnection.HTTP_OK),
|
||||
DELETESNAPSHOT(HttpURLConnection.HTTP_OK),
|
||||
|
||||
NULL(HttpURLConnection.HTTP_NOT_IMPLEMENTED);
|
||||
|
||||
|
|
|
@ -36,6 +36,8 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
|
|||
/** GET_BLOCK_LOCATIONS is a private unstable op. */
|
||||
GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
|
||||
GETACLSTATUS(false, HttpURLConnection.HTTP_OK),
|
||||
GETXATTR(false, HttpURLConnection.HTTP_OK),
|
||||
GETXATTRS(false, HttpURLConnection.HTTP_OK),
|
||||
|
||||
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
|
||||
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.web.resources;
|
||||
|
||||
/**
|
||||
* The old snapshot name parameter for renameSnapshot operation.
|
||||
*/
|
||||
public class OldSnapshotNameParam extends StringParam {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "oldsnapshotname";
|
||||
|
||||
/** Default parameter value. */
|
||||
public static final String DEFAULT = "";
|
||||
|
||||
private static final Domain DOMAIN = new Domain(NAME, null);
|
||||
|
||||
public OldSnapshotNameParam(final String str) {
|
||||
super(DOMAIN, str != null && !str.equals(DEFAULT) ? str : null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return NAME;
|
||||
}
|
||||
}
|
|
@ -43,6 +43,12 @@ public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
|
|||
REMOVEACL(false, HttpURLConnection.HTTP_OK),
|
||||
SETACL(false, HttpURLConnection.HTTP_OK),
|
||||
|
||||
SETXATTR(false, HttpURLConnection.HTTP_OK),
|
||||
REMOVEXATTR(false, HttpURLConnection.HTTP_OK),
|
||||
|
||||
CREATESNAPSHOT(false, HttpURLConnection.HTTP_OK),
|
||||
RENAMESNAPSHOT(false, HttpURLConnection.HTTP_OK),
|
||||
|
||||
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
|
||||
|
||||
final boolean doOutputAndRedirect;
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.web.resources;
|
||||
|
||||
/**
|
||||
* The snapshot name parameter for createSnapshot and deleteSnapshot operation.
|
||||
* Also used to indicate the new snapshot name for renameSnapshot operation.
|
||||
*/
|
||||
public class SnapshotNameParam extends StringParam {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "snapshotname";
|
||||
|
||||
/** Default parameter value. */
|
||||
public static final String DEFAULT = "";
|
||||
|
||||
private static final Domain DOMAIN = new Domain(NAME, null);
|
||||
|
||||
public SnapshotNameParam(final String str) {
|
||||
super(DOMAIN, str != null && !str.equals(DEFAULT) ? str : null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return NAME;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.web.resources;
|
||||
|
||||
import org.apache.hadoop.fs.XAttrCodec;
|
||||
|
||||
public class XAttrEncodingParam extends EnumParam<XAttrCodec> {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "encoding";
|
||||
/** Default parameter value. */
|
||||
public static final String DEFAULT = "";
|
||||
|
||||
private static final Domain<XAttrCodec> DOMAIN =
|
||||
new Domain<XAttrCodec>(NAME, XAttrCodec.class);
|
||||
|
||||
public XAttrEncodingParam(final XAttrCodec encoding) {
|
||||
super(DOMAIN, encoding);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param str a string representation of the parameter value.
|
||||
*/
|
||||
public XAttrEncodingParam(final String str) {
|
||||
super(DOMAIN, str != null && !str.isEmpty() ? DOMAIN.parse(str) : null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getValueString() {
|
||||
return value.toString();
|
||||
}
|
||||
|
||||
public XAttrCodec getEncoding() {
|
||||
return getValue();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.web.resources;
|
||||
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class XAttrNameParam extends StringParam {
|
||||
/** Parameter name. **/
|
||||
public static final String NAME = "xattr.name";
|
||||
/** Default parameter value. **/
|
||||
public static final String DEFAULT = "";
|
||||
|
||||
private static Domain DOMAIN = new Domain(NAME,
|
||||
Pattern.compile("^(user\\.|trusted\\.|system\\.|security\\.).+"));
|
||||
|
||||
public XAttrNameParam(final String str) {
|
||||
super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
public String getXAttrName() {
|
||||
final String v = getValue();
|
||||
return v;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.web.resources;
|
||||
|
||||
import java.util.EnumSet;
|
||||
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
|
||||
public class XAttrSetFlagParam extends EnumSetParam<XAttrSetFlag> {
|
||||
/** Parameter name. */
|
||||
public static final String NAME = "flag";
|
||||
/** Default parameter value. */
|
||||
public static final String DEFAULT = "";
|
||||
|
||||
private static final Domain<XAttrSetFlag> DOMAIN = new Domain<XAttrSetFlag>(
|
||||
NAME, XAttrSetFlag.class);
|
||||
|
||||
public XAttrSetFlagParam(final EnumSet<XAttrSetFlag> flag) {
|
||||
super(DOMAIN, flag);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param str a string representation of the parameter value.
|
||||
*/
|
||||
public XAttrSetFlagParam(final String str) {
|
||||
super(DOMAIN, DOMAIN.parse(str));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
public EnumSet<XAttrSetFlag> getFlag() {
|
||||
return getValue();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.web.resources;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.fs.XAttrCodec;
|
||||
|
||||
public class XAttrValueParam extends StringParam {
|
||||
/** Parameter name. **/
|
||||
public static final String NAME = "xattr.value";
|
||||
/** Default parameter value. **/
|
||||
public static final String DEFAULT = "";
|
||||
|
||||
private static Domain DOMAIN = new Domain(NAME, null);
|
||||
|
||||
public XAttrValueParam(final String str) {
|
||||
super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
public byte[] getXAttrValue() throws IOException {
|
||||
final String v = getValue();
|
||||
return XAttrCodec.decodeValue(v);
|
||||
}
|
||||
}
|
|
@ -16,111 +16,228 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
#include <hdfs.h>
|
||||
#include <inttypes.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <strings.h>
|
||||
|
||||
#include "fuse_context_handle.h"
|
||||
#include "fuse_dfs.h"
|
||||
#include "fuse_trash.h"
|
||||
#include "fuse_context_handle.h"
|
||||
|
||||
|
||||
const char *const TrashPrefixDir = "/user/root/.Trash";
|
||||
const char *const TrashDir = "/user/root/.Trash/Current";
|
||||
#include "fuse_users.h"
|
||||
|
||||
#define TRASH_RENAME_TRIES 100
|
||||
#define ALREADY_IN_TRASH_ERR 9000
|
||||
|
||||
/**
|
||||
* Split a path into a parent directory and a base path component.
|
||||
*
|
||||
* @param abs_path The absolute path.
|
||||
* @param pcomp (out param) Will be set to the last path component.
|
||||
* Malloced.
|
||||
* @param parent_dir (out param) Will be set to the parent directory.
|
||||
* Malloced.
|
||||
*
|
||||
* @return 0 on success.
|
||||
* On success, both *pcomp and *parent_dir will contain
|
||||
* malloc'ed strings.
|
||||
* EINVAL if the path wasn't absolute.
|
||||
* EINVAL if there is no parent directory (i.e. abs_path=/)
|
||||
* ENOMEM if we ran out of memory.
|
||||
*/
|
||||
static int get_parent_dir(const char *abs_path, char **pcomp,
|
||||
char **parent_dir)
|
||||
{
|
||||
int ret;
|
||||
char *pdir = NULL, *pc = NULL, *last_slash;
|
||||
|
||||
pdir = strdup(abs_path);
|
||||
if (!pdir) {
|
||||
ret = ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
last_slash = rindex(pdir, '/');
|
||||
if (!last_slash) {
|
||||
ERROR("get_parent_dir(%s): expected absolute path.\n", abs_path);
|
||||
ret = EINVAL;
|
||||
goto done;
|
||||
}
|
||||
if (last_slash[1] == '\0') {
|
||||
*last_slash = '\0';
|
||||
last_slash = rindex(pdir, '/');
|
||||
if (!last_slash) {
|
||||
ERROR("get_parent_dir(%s): there is no parent dir.\n", abs_path);
|
||||
ret = EINVAL;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
pc = strdup(last_slash + 1);
|
||||
if (!pc) {
|
||||
ret = ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
*last_slash = '\0';
|
||||
ret = 0;
|
||||
done:
|
||||
if (ret) {
|
||||
free(pdir);
|
||||
free(pc);
|
||||
return ret;
|
||||
}
|
||||
*pcomp = pc;
|
||||
*parent_dir = pdir;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the base path to the trash. This will depend on the user ID.
|
||||
* For example, a user whose ID maps to 'foo' will get back the path
|
||||
* "/user/foo/.Trash/Current".
|
||||
*
|
||||
* @param trash_base (out param) the base path to the trash.
|
||||
* Malloced.
|
||||
*
|
||||
* @return 0 on success; error code otherwise.
|
||||
*/
|
||||
static int get_trash_base(char **trash_base)
|
||||
{
|
||||
const char * const PREFIX = "/user/";
|
||||
const char * const SUFFIX = "/.Trash/Current";
|
||||
char *user_name = NULL, *base = NULL;
|
||||
uid_t uid = fuse_get_context()->uid;
|
||||
int ret;
|
||||
|
||||
user_name = getUsername(uid);
|
||||
if (!user_name) {
|
||||
ERROR("get_trash_base(): failed to get username for uid %"PRId64"\n",
|
||||
(uint64_t)uid);
|
||||
ret = EIO;
|
||||
goto done;
|
||||
}
|
||||
if (asprintf(&base, "%s%s%s", PREFIX, user_name, SUFFIX) < 0) {
|
||||
base = NULL;
|
||||
ret = ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
ret = 0;
|
||||
done:
|
||||
free(user_name);
|
||||
if (ret) {
|
||||
free(base);
|
||||
return ret;
|
||||
}
|
||||
*trash_base = base;
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// NOTE: this function is a c implementation of org.apache.hadoop.fs.Trash.moveToTrash(Path path).
|
||||
//
|
||||
int move_to_trash(const char *abs_path, hdfsFS userFS)
|
||||
{
|
||||
int ret;
|
||||
char *pcomp = NULL, *parent_dir = NULL, *trash_base = NULL;
|
||||
char *target_dir = NULL, *target = NULL;
|
||||
|
||||
int move_to_trash(const char *item, hdfsFS userFS) {
|
||||
|
||||
// retrieve dfs specific data
|
||||
dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
|
||||
|
||||
// check params and the context var
|
||||
assert(item);
|
||||
assert(dfs);
|
||||
assert('/' == *item);
|
||||
assert(rindex(item,'/') >= 0);
|
||||
|
||||
|
||||
char fname[4096]; // or last element of the directory path
|
||||
char parent_dir[4096]; // the directory the fname resides in
|
||||
|
||||
if (strlen(item) > sizeof(fname) - strlen(TrashDir)) {
|
||||
ERROR("Buffer too small to accomodate path of len %d", (int)strlen(item));
|
||||
return -EIO;
|
||||
ret = get_parent_dir(abs_path, &pcomp, &parent_dir);
|
||||
if (ret) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
// separate the file name and the parent directory of the item to be deleted
|
||||
{
|
||||
int length_of_parent_dir = rindex(item, '/') - item ;
|
||||
int length_of_fname = strlen(item) - length_of_parent_dir - 1; // the '/'
|
||||
|
||||
// note - the below strncpys should be safe from overflow because of the check on item's string length above.
|
||||
strncpy(parent_dir, item, length_of_parent_dir);
|
||||
parent_dir[length_of_parent_dir ] = 0;
|
||||
strncpy(fname, item + length_of_parent_dir + 1, strlen(item));
|
||||
fname[length_of_fname + 1] = 0;
|
||||
ret = get_trash_base(&trash_base);
|
||||
if (ret) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
// create the target trash directory
|
||||
char trash_dir[4096];
|
||||
if (snprintf(trash_dir, sizeof(trash_dir), "%s%s", TrashDir, parent_dir)
|
||||
>= sizeof trash_dir) {
|
||||
ERROR("Move to trash error target not big enough for %s", item);
|
||||
return -EIO;
|
||||
if (!strncmp(trash_base, abs_path, strlen(trash_base))) {
|
||||
INFO("move_to_trash(%s): file is already in the trash; deleting.",
|
||||
abs_path);
|
||||
ret = ALREADY_IN_TRASH_ERR;
|
||||
goto done;
|
||||
}
|
||||
fprintf(stderr, "trash_base='%s'\n", trash_base);
|
||||
if (asprintf(&target_dir, "%s%s", trash_base, parent_dir) < 0) {
|
||||
ret = ENOMEM;
|
||||
target_dir = NULL;
|
||||
goto done;
|
||||
}
|
||||
if (asprintf(&target, "%s/%s", target_dir, pcomp) < 0) {
|
||||
ret = ENOMEM;
|
||||
target = NULL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
// create the target trash directory in trash (if needed)
|
||||
if ( hdfsExists(userFS, trash_dir)) {
|
||||
if (hdfsExists(userFS, target_dir) != 0) {
|
||||
// make the directory to put it in in the Trash - NOTE
|
||||
// hdfsCreateDirectory also creates parents, so Current will be created if it does not exist.
|
||||
if (hdfsCreateDirectory(userFS, trash_dir)) {
|
||||
return -EIO;
|
||||
if (hdfsCreateDirectory(userFS, target_dir)) {
|
||||
ret = errno;
|
||||
ERROR("move_to_trash(%s) error: hdfsCreateDirectory(%s) failed with error %d",
|
||||
abs_path, target_dir, ret);
|
||||
goto done;
|
||||
}
|
||||
} else if (hdfsExists(userFS, target) == 0) {
|
||||
// If there is already a file in the trash with this path, append a number.
|
||||
int idx;
|
||||
for (idx = 1; idx < TRASH_RENAME_TRIES; idx++) {
|
||||
free(target);
|
||||
if (asprintf(&target, "%s%s.%d", target_dir, pcomp, idx) < 0) {
|
||||
target = NULL;
|
||||
ret = ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
if (hdfsExists(userFS, target) != 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (idx == TRASH_RENAME_TRIES) {
|
||||
ERROR("move_to_trash(%s) error: there are already %d files in the trash "
|
||||
"with this name.\n", abs_path, TRASH_RENAME_TRIES);
|
||||
ret = EINVAL;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// if the target path in Trash already exists, then append with
|
||||
// a number. Start from 1.
|
||||
//
|
||||
char target[4096];
|
||||
int j ;
|
||||
if ( snprintf(target, sizeof target,"%s/%s",trash_dir, fname) >= sizeof target) {
|
||||
ERROR("Move to trash error target not big enough for %s", item);
|
||||
return -EIO;
|
||||
if (hdfsRename(userFS, abs_path, target)) {
|
||||
ret = errno;
|
||||
ERROR("move_to_trash(%s): failed to rename the file to %s: error %d",
|
||||
abs_path, target, ret);
|
||||
goto done;
|
||||
}
|
||||
|
||||
// NOTE: this loop differs from the java version by capping the #of tries
|
||||
for (j = 1; ! hdfsExists(userFS, target) && j < TRASH_RENAME_TRIES ; j++) {
|
||||
if (snprintf(target, sizeof target,"%s/%s.%d",trash_dir, fname, j) >= sizeof target) {
|
||||
ERROR("Move to trash error target not big enough for %s", item);
|
||||
return -EIO;
|
||||
ret = 0;
|
||||
done:
|
||||
if ((ret != 0) && (ret != ALREADY_IN_TRASH_ERR)) {
|
||||
ERROR("move_to_trash(%s) failed with error %d", abs_path, ret);
|
||||
}
|
||||
free(pcomp);
|
||||
free(parent_dir);
|
||||
free(trash_base);
|
||||
free(target_dir);
|
||||
free(target);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hdfsDeleteWithTrash(hdfsFS userFS, const char *path, int useTrash)
|
||||
{
|
||||
int tried_to_move_to_trash = 0;
|
||||
if (useTrash) {
|
||||
tried_to_move_to_trash = 1;
|
||||
if (move_to_trash(path, userFS) == 0) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (hdfsRename(userFS, item, target)) {
|
||||
ERROR("Trying to rename %s to %s", item, target);
|
||||
return -EIO;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int hdfsDeleteWithTrash(hdfsFS userFS, const char *path, int useTrash) {
|
||||
|
||||
// move the file to the trash if this is enabled and its not actually in the trash.
|
||||
if (useTrash && strncmp(path, TrashPrefixDir, strlen(TrashPrefixDir)) != 0) {
|
||||
int ret= move_to_trash(path, userFS);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (hdfsDelete(userFS, path, 1)) {
|
||||
ERROR("Trying to delete the file %s", path);
|
||||
return -EIO;
|
||||
int err = errno;
|
||||
if (err < 0) {
|
||||
err = -err;
|
||||
}
|
||||
ERROR("hdfsDeleteWithTrash(%s): hdfsDelete failed: error %d.",
|
||||
path, err);
|
||||
return -err;
|
||||
}
|
||||
if (tried_to_move_to_trash) {
|
||||
ERROR("hdfsDeleteWithTrash(%s): deleted the file instead.\n", path);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <malloc.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
@ -48,7 +47,6 @@ static double timespec_to_double(const struct timespec *ts)
|
|||
struct stopwatch {
|
||||
struct timespec start;
|
||||
struct timespec stop;
|
||||
struct rusage rusage;
|
||||
};
|
||||
|
||||
static struct stopwatch *stopwatch_create(void)
|
||||
|
@ -66,12 +64,6 @@ static struct stopwatch *stopwatch_create(void)
|
|||
"error %d (%s)\n", err, strerror(err));
|
||||
goto error;
|
||||
}
|
||||
if (getrusage(RUSAGE_THREAD, &watch->rusage) < 0) {
|
||||
int err = errno;
|
||||
fprintf(stderr, "getrusage failed: error %d (%s)\n",
|
||||
err, strerror(err));
|
||||
goto error;
|
||||
}
|
||||
return watch;
|
||||
|
||||
error:
|
||||
|
|
|
@ -31,6 +31,7 @@ package hadoop.hdfs;
|
|||
import "Security.proto";
|
||||
import "hdfs.proto";
|
||||
import "acl.proto";
|
||||
import "xattr.proto";
|
||||
|
||||
/**
|
||||
* The ClientNamenodeProtocol Service defines the interface between a client
|
||||
|
@ -759,4 +760,10 @@ service ClientNamenodeProtocol {
|
|||
returns(SetAclResponseProto);
|
||||
rpc getAclStatus(GetAclStatusRequestProto)
|
||||
returns(GetAclStatusResponseProto);
|
||||
rpc setXAttr(SetXAttrRequestProto)
|
||||
returns(SetXAttrResponseProto);
|
||||
rpc getXAttrs(GetXAttrsRequestProto)
|
||||
returns(GetXAttrsResponseProto);
|
||||
rpc removeXAttr(RemoveXAttrRequestProto)
|
||||
returns(RemoveXAttrResponseProto);
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ package hadoop.hdfs.fsimage;
|
|||
|
||||
import "hdfs.proto";
|
||||
import "acl.proto";
|
||||
import "xattr.proto";
|
||||
|
||||
/**
|
||||
* This file defines the on-disk layout of the file system image. The
|
||||
|
@ -106,7 +107,23 @@ message INodeSection {
|
|||
*/
|
||||
repeated fixed32 entries = 2 [packed = true];
|
||||
}
|
||||
|
||||
|
||||
message XAttrCompactProto {
|
||||
/**
|
||||
*
|
||||
* [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
|
||||
* [2:26) -- the name of the entry, which is an ID that points to a
|
||||
* string in the StringTableSection.
|
||||
* [26:32) -- reserved for future uses.
|
||||
*/
|
||||
required fixed32 name = 1;
|
||||
optional bytes value = 2;
|
||||
}
|
||||
|
||||
message XAttrFeatureProto {
|
||||
repeated XAttrCompactProto xAttrs = 1;
|
||||
}
|
||||
|
||||
message INodeFile {
|
||||
optional uint32 replication = 1;
|
||||
optional uint64 modificationTime = 2;
|
||||
|
@ -116,6 +133,7 @@ message INodeSection {
|
|||
repeated BlockProto blocks = 6;
|
||||
optional FileUnderConstructionFeature fileUC = 7;
|
||||
optional AclFeatureProto acl = 8;
|
||||
optional XAttrFeatureProto xAttrs = 9;
|
||||
}
|
||||
|
||||
message INodeDirectory {
|
||||
|
@ -126,6 +144,7 @@ message INodeSection {
|
|||
optional uint64 dsQuota = 3;
|
||||
optional fixed64 permission = 4;
|
||||
optional AclFeatureProto acl = 5;
|
||||
optional XAttrFeatureProto xAttrs = 6;
|
||||
}
|
||||
|
||||
message INodeSymlink {
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
option java_package = "org.apache.hadoop.hdfs.protocol.proto";
|
||||
option java_outer_classname = "XAttrProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
package hadoop.hdfs;
|
||||
|
||||
message XAttrProto {
|
||||
enum XAttrNamespaceProto {
|
||||
USER = 0;
|
||||
TRUSTED = 1;
|
||||
SECURITY = 2;
|
||||
SYSTEM = 3;
|
||||
}
|
||||
|
||||
required XAttrNamespaceProto namespace = 1;
|
||||
required string name = 2;
|
||||
optional bytes value = 3;
|
||||
}
|
||||
|
||||
message XAttrEditLogProto {
|
||||
required string src = 1;
|
||||
optional XAttrProto xAttr = 2;
|
||||
}
|
||||
|
||||
enum XAttrSetFlagProto {
|
||||
XATTR_CREATE = 0x01;
|
||||
XATTR_REPLACE = 0x02;
|
||||
}
|
||||
|
||||
message SetXAttrRequestProto {
|
||||
required string src = 1;
|
||||
optional XAttrProto xAttr = 2;
|
||||
optional uint32 flag = 3; //bits set using XAttrSetFlagProto
|
||||
}
|
||||
|
||||
message SetXAttrResponseProto {
|
||||
}
|
||||
|
||||
message GetXAttrsRequestProto {
|
||||
required string src = 1;
|
||||
repeated XAttrProto xAttrs = 2;
|
||||
}
|
||||
|
||||
message GetXAttrsResponseProto {
|
||||
repeated XAttrProto xAttrs = 1;
|
||||
}
|
||||
|
||||
message RemoveXAttrRequestProto {
|
||||
required string src = 1;
|
||||
optional XAttrProto xAttr = 2;
|
||||
}
|
||||
|
||||
message RemoveXAttrResponseProto {
|
||||
}
|
|
@ -1317,6 +1317,17 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.nfs.allow.insecure.ports</name>
|
||||
<value>true</value>
|
||||
<description>
|
||||
When set to false, client connections originating from unprivileged ports
|
||||
(those above 1023) will be rejected. This is to ensure that clients
|
||||
connecting to this NFS Gateway must have had root privilege on the machine
|
||||
where they're connecting from.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.webhdfs.enabled</name>
|
||||
<value>true</value>
|
||||
|
@ -1895,4 +1906,28 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
<property>
|
||||
<name>dfs.namenode.xattrs.enabled</name>
|
||||
<value>true</value>
|
||||
<description>
|
||||
Whether support for extended attributes is enabled on the NameNode.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.fs-limits.max-xattrs-per-inode</name>
|
||||
<value>32</value>
|
||||
<description>
|
||||
Maximum number of extended attributes per inode.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.fs-limits.max-xattr-size</name>
|
||||
<value>16384</value>
|
||||
<description>
|
||||
The maximum combined size of the name and value of an extended attribute in bytes.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
~~ Licensed under the Apache License, Version 2.0 (the "License");
|
||||
~~ you may not use this file except in compliance with the License.
|
||||
~~ You may obtain a copy of the License at
|
||||
~~
|
||||
~~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~~
|
||||
~~ Unless required by applicable law or agreed to in writing, software
|
||||
~~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~~ See the License for the specific language governing permissions and
|
||||
~~ limitations under the License. See accompanying LICENSE file.
|
||||
|
||||
---
|
||||
Hadoop Distributed File System-${project.version} - Extended Attributes
|
||||
---
|
||||
---
|
||||
${maven.build.timestamp}
|
||||
|
||||
Extended Attributes in HDFS
|
||||
|
||||
\[ {{{../../index.html}Go Back}} \]
|
||||
|
||||
%{toc|section=1|fromDepth=2|toDepth=4}
|
||||
|
||||
* {Overview}
|
||||
|
||||
<Extended attributes> (abbreviated as <xattrs>) are a filesystem feature that allow user applications to associate additional metadata with a file or directory. Unlike system-level inode metadata such as file permissions or modification time, extended attributes are not interpreted by the system and are instead used by applications to store additional information about an inode. Extended attributes could be used, for instance, to specify the character encoding of a plain-text document.
|
||||
|
||||
** {HDFS extended attributes}
|
||||
|
||||
Extended attributes in HDFS are modeled after extended attributes in Linux (see the Linux manpage for {{{http://www.bestbits.at/acl/man/man5/attr.txt}attr(5)}} and {{{http://www.bestbits.at/acl/}related documentation}}). An extended attribute is a <name-value pair>, with a string name and binary value. Xattrs names must also be prefixed with a <namespace>. For example, an xattr named <myXattr> in the <user> namespace would be specified as <<user.myXattr>>. Multiple xattrs can be associated with a single inode.
|
||||
|
||||
** {Namespaces and Permissions}
|
||||
|
||||
In HDFS, as in Linux, there are four valid namespaces: <<<user>>>, <<<trusted>>>, <<<system>>>, and <<<security>>>. Each of these namespaces have different access restrictions.
|
||||
|
||||
The <<<user>>> namespace is the namespace that will commonly be used by client applications. Access to extended attributes in the user namespace is controlled by the corresponding file permissions.
|
||||
|
||||
The <<<trusted>>> namespace is available only to HDFS superusers.
|
||||
|
||||
The <<<system>>> namespace is reserved for internal HDFS use. This namespace is not accessible through userspace methods, and is reserved for implementing internal HDFS features.
|
||||
|
||||
The <<<security>>> namespace is reserved for internal HDFS use. This namespace is not accessible through userspace methods. It is currently unused.
|
||||
|
||||
* {Interacting with extended attributes}
|
||||
|
||||
The Hadoop shell has support for interacting with extended attributes via <<<hadoop fs -getfattr>>> and <<<hadoop fs -setfattr>>>. These commands are styled after the Linux {{{http://www.bestbits.at/acl/man/man1/getfattr.txt}getfattr(1)}} and {{{http://www.bestbits.at/acl/man/man1/setfattr.txt}setfattr(1)}} commands.
|
||||
|
||||
** {getfattr}
|
||||
|
||||
<<<hadoop fs -getfattr [-R] {-n name | -d} [-e en] <path>>>>
|
||||
|
||||
Displays the extended attribute names and values (if any) for a file or directory.
|
||||
|
||||
*--+--+
|
||||
-R | Recursively list the attributes for all files and directories.
|
||||
*--+--+
|
||||
-n name | Dump the named extended attribute value.
|
||||
*--+--+
|
||||
-d | Dump all extended attribute values associated with pathname.
|
||||
*--+--+
|
||||
-e \<encoding\> | Encode values after retrieving them. Valid encodings are "text", "hex", and "base64". Values encoded as text strings are enclosed in double quotes ("), and values encoded as hexadecimal and base64 are prefixed with 0x and 0s, respectively.
|
||||
*--+--+
|
||||
\<path\> | The file or directory.
|
||||
*--+--+
|
||||
|
||||
** {setfattr}
|
||||
|
||||
<<<hadoop fs -setfattr {-n name [-v value] | -x name} <path>>>>
|
||||
|
||||
Sets an extended attribute name and value for a file or directory.
|
||||
|
||||
*--+--+
|
||||
-n name | The extended attribute name.
|
||||
*--+--+
|
||||
-v value | The extended attribute value. There are three different encoding methods for the value. If the argument is enclosed in double quotes, then the value is the string inside the quotes. If the argument is prefixed with 0x or 0X, then it is taken as a hexadecimal number. If the argument begins with 0s or 0S, then it is taken as a base64 encoding.
|
||||
*--+--+
|
||||
-x name | Remove the extended attribute.
|
||||
*--+--+
|
||||
\<path\> | The file or directory.
|
||||
*--+--+
|
||||
|
||||
* {Configuration options}
|
||||
|
||||
|
||||
HDFS supports extended attributes out of the box, without additional configuration. Administrators could potentially be interested in the options limiting the number of xattrs per inode and the size of xattrs, since xattrs increase the on-disk and in-memory space consumption of an inode.
|
||||
|
||||
* <<<dfs.namenode.xattrs.enabled>>>
|
||||
|
||||
Whether support for extended attributes is enabled on the NameNode. By default, extended attributes are enabled.
|
||||
|
||||
* <<<dfs.namenode.fs-limits.max-xattrs-per-inode>>>
|
||||
|
||||
The maximum number of extended attributes per inode. By default, this limit is 32.
|
||||
|
||||
* <<<dfs.namenode.fs-limits.max-xattr-size>>>
|
||||
|
||||
The maximum combined size of the name and value of an extended attribute in bytes. By default, this limit is 16384 bytes.
|
|
@ -102,6 +102,12 @@ WebHDFS REST API
|
|||
* {{{Cancel Delegation Token}<<<CANCELDELEGATIONTOKEN>>>}}
|
||||
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.cancelDelegationToken)
|
||||
|
||||
* {{{Create Snapshot}<<<CREATESNAPSHOT>>>}}
|
||||
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.createSnapshot)
|
||||
|
||||
* {{{Rename Snapshot}<<<RENAMESNAPSHOT>>>}}
|
||||
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.renameSnapshot)
|
||||
|
||||
* HTTP POST
|
||||
|
||||
* {{{Append to a File}<<<APPEND>>>}}
|
||||
|
@ -114,6 +120,9 @@ WebHDFS REST API
|
|||
|
||||
* {{{Delete a File/Directory}<<<DELETE>>>}}
|
||||
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.delete)
|
||||
|
||||
* {{{Delete Snapshot}<<<DELETESNAPSHOT>>>}}
|
||||
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.deleteSnapshot)
|
||||
|
||||
** {FileSystem URIs vs HTTP URLs}
|
||||
|
||||
|
@ -900,6 +909,75 @@ Transfer-Encoding: chunked
|
|||
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus
|
||||
|
||||
|
||||
* {Snapshot Operations}
|
||||
|
||||
** {Create Snapshot}
|
||||
|
||||
* Submit a HTTP PUT request.
|
||||
|
||||
+---------------------------------
|
||||
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CREATESNAPSHOT[&snapshotname=<SNAPSHOTNAME>]"
|
||||
+---------------------------------
|
||||
|
||||
The client receives a response with a {{{Path JSON Schema}<<<Path>>> JSON object}}:
|
||||
|
||||
+---------------------------------
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
Transfer-Encoding: chunked
|
||||
|
||||
{"Path": "/user/szetszwo/.snapshot/s1"}
|
||||
+---------------------------------
|
||||
|
||||
[]
|
||||
|
||||
See also:
|
||||
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.createSnapshot
|
||||
|
||||
|
||||
** {Delete Snapshot}
|
||||
|
||||
* Submit a HTTP DELETE request.
|
||||
|
||||
+---------------------------------
|
||||
curl -i -X DELETE "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=DELETESNAPSHOT&snapshotname=<SNAPSHOTNAME>"
|
||||
+---------------------------------
|
||||
|
||||
The client receives a response with zero content length:
|
||||
|
||||
+---------------------------------
|
||||
HTTP/1.1 200 OK
|
||||
Content-Length: 0
|
||||
+---------------------------------
|
||||
|
||||
[]
|
||||
|
||||
See also:
|
||||
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.deleteSnapshot
|
||||
|
||||
|
||||
** {Rename Snapshot}
|
||||
|
||||
* Submit a HTTP PUT request.
|
||||
|
||||
+---------------------------------
|
||||
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=RENAMESNAPSHOT
|
||||
&oldsnapshotname=<SNAPSHOTNAME>&snapshotname=<SNAPSHOTNAME>"
|
||||
+---------------------------------
|
||||
|
||||
The client receives a response with zero content length:
|
||||
|
||||
+---------------------------------
|
||||
HTTP/1.1 200 OK
|
||||
Content-Length: 0
|
||||
+---------------------------------
|
||||
|
||||
[]
|
||||
|
||||
See also:
|
||||
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.renameSnapshot
|
||||
|
||||
|
||||
* {Delegation Token Operations}
|
||||
|
||||
** {Get Delegation Token}
|
||||
|
@ -1839,6 +1917,26 @@ var tokenProperties =
|
|||
{{{Open and Read a File}<<<OPEN>>>}}
|
||||
|
||||
|
||||
** {Old Snapshot Name}
|
||||
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Name | <<<oldsnapshotname>>> |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Description | The old name of the snapshot to be renamed. |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Type | String |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Default Value | null |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Valid Values | An existing snapshot name. |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Syntax | Any string. |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|
||||
See also:
|
||||
{{{Rename Snapshot}<<<RENAMESNAPSHOT>>>}}
|
||||
|
||||
|
||||
** {Op}
|
||||
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|
@ -1983,6 +2081,29 @@ var tokenProperties =
|
|||
{{{Set Replication Factor}<<<SETREPLICATION>>>}}
|
||||
|
||||
|
||||
** {Snapshot Name}
|
||||
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Name | <<<snapshotname>>> |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Description | The name of the snapshot to be created/deleted. |
|
||||
|| | Or the new name for snapshot rename. |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Type | String |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Default Value | null |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Valid Values | Any valid snapshot name. |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|| Syntax | Any string. |
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|
||||
See also:
|
||||
{{{Create Snapshot}<<<CREATESNAPSHOT>>>}},
|
||||
{{{Delete Snapshot}<<<DELETESNAPSHOT>>>}},
|
||||
{{{Rename Snapshot}<<<RENAMESNAPSHOT>>>}}
|
||||
|
||||
|
||||
** {Sources}
|
||||
|
||||
*----------------+-------------------------------------------------------------------+
|
||||
|
@ -2042,4 +2163,3 @@ var tokenProperties =
|
|||
|
||||
See also:
|
||||
{{Authentication}}
|
||||
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.cli;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import org.apache.hadoop.cli.util.CLICommand;
|
||||
import org.apache.hadoop.cli.util.CommandExecutor.Result;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.security.authorize.PolicyProvider;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestXAttrCLI extends CLITestHelperDFS {
|
||||
protected MiniDFSCluster dfsCluster = null;
|
||||
protected FileSystem fs = null;
|
||||
protected String namenode = null;
|
||||
|
||||
@Before
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
|
||||
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
|
||||
HDFSPolicyProvider.class, PolicyProvider.class);
|
||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
|
||||
|
||||
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||
dfsCluster.waitClusterUp();
|
||||
namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
|
||||
|
||||
username = System.getProperty("user.name");
|
||||
|
||||
fs = dfsCluster.getFileSystem();
|
||||
assertTrue("Not a HDFS: "+fs.getUri(),
|
||||
fs instanceof DistributedFileSystem);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getTestFile() {
|
||||
return "testXAttrConf.xml";
|
||||
}
|
||||
|
||||
@After
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
if (fs != null) {
|
||||
fs.close();
|
||||
}
|
||||
if (dfsCluster != null) {
|
||||
dfsCluster.shutdown();
|
||||
}
|
||||
Thread.sleep(2000);
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String expandCommand(final String cmd) {
|
||||
String expCmd = cmd;
|
||||
expCmd = expCmd.replaceAll("NAMENODE", namenode);
|
||||
expCmd = expCmd.replaceAll("#LF#",
|
||||
System.getProperty("line.separator"));
|
||||
expCmd = super.expandCommand(expCmd);
|
||||
return expCmd;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Result execute(CLICommand cmd) throws Exception {
|
||||
return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
|
||||
}
|
||||
|
||||
@Test
|
||||
@Override
|
||||
public void testAll () {
|
||||
super.testAll();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotSame;
|
||||
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* Tests for <code>XAttr</code> objects.
|
||||
*/
|
||||
public class TestXAttr {
|
||||
private static XAttr XATTR, XATTR1, XATTR2, XATTR3, XATTR4;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
byte[] value = {0x31, 0x32, 0x33};
|
||||
XATTR = new XAttr.Builder()
|
||||
.setName("name")
|
||||
.setValue(value)
|
||||
.build();
|
||||
XATTR1 = new XAttr.Builder()
|
||||
.setNameSpace(XAttr.NameSpace.USER)
|
||||
.setName("name")
|
||||
.setValue(value)
|
||||
.build();
|
||||
XATTR2 = new XAttr.Builder()
|
||||
.setNameSpace(XAttr.NameSpace.TRUSTED)
|
||||
.setName("name")
|
||||
.setValue(value)
|
||||
.build();
|
||||
XATTR3 = new XAttr.Builder()
|
||||
.setNameSpace(XAttr.NameSpace.SYSTEM)
|
||||
.setName("name")
|
||||
.setValue(value)
|
||||
.build();
|
||||
XATTR4 = new XAttr.Builder()
|
||||
.setNameSpace(XAttr.NameSpace.SECURITY)
|
||||
.setName("name")
|
||||
.setValue(value)
|
||||
.build();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testXAttrEquals() {
|
||||
assertNotSame(XATTR1, XATTR2);
|
||||
assertNotSame(XATTR2, XATTR3);
|
||||
assertNotSame(XATTR3, XATTR4);
|
||||
assertEquals(XATTR, XATTR1);
|
||||
assertEquals(XATTR1, XATTR1);
|
||||
assertEquals(XATTR2, XATTR2);
|
||||
assertEquals(XATTR3, XATTR3);
|
||||
assertEquals(XATTR4, XATTR4);
|
||||
assertFalse(XATTR1.equals(XATTR2));
|
||||
assertFalse(XATTR2.equals(XATTR3));
|
||||
assertFalse(XATTR3.equals(XATTR4));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testXAttrHashCode() {
|
||||
assertEquals(XATTR.hashCode(), XATTR1.hashCode());
|
||||
assertFalse(XATTR1.hashCode() == XATTR2.hashCode());
|
||||
assertFalse(XATTR2.hashCode() == XATTR3.hashCode());
|
||||
assertFalse(XATTR3.hashCode() == XATTR4.hashCode());
|
||||
}
|
||||
}
|
|
@ -1181,6 +1181,13 @@ public class DFSTestUtil {
|
|||
.setType(AclEntryType.OTHER)
|
||||
.build());
|
||||
filesystem.setAcl(pathConcatTarget, aclEntryList);
|
||||
// OP_SET_XATTR
|
||||
filesystem.setXAttr(pathConcatTarget, "user.a1",
|
||||
new byte[]{0x31, 0x32, 0x33});
|
||||
filesystem.setXAttr(pathConcatTarget, "user.a2",
|
||||
new byte[]{0x37, 0x38, 0x39});
|
||||
// OP_REMOVE_XATTR
|
||||
filesystem.removeXAttr(pathConcatTarget, "user.a2");
|
||||
}
|
||||
|
||||
public static void abortStream(DFSOutputStream out) throws IOException {
|
||||
|
|
|
@ -155,7 +155,7 @@ public class TestDFSShell {
|
|||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
public void testRecrusiveRm() throws IOException {
|
||||
public void testRecursiveRm() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
|
@ -1583,6 +1583,7 @@ public class TestDFSShell {
|
|||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
private static String runLsr(final FsShell shell, String root, int returnvalue
|
||||
) throws Exception {
|
||||
System.out.println("root=" + root + ", returnvalue=" + returnvalue);
|
||||
|
@ -1874,6 +1875,333 @@ public class TestDFSShell {
|
|||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
public void testSetXAttrPermission() throws Exception {
|
||||
UserGroupInformation user = UserGroupInformation.
|
||||
createUserForTesting("user", new String[] {"mygroup"});
|
||||
MiniDFSCluster cluster = null;
|
||||
PrintStream bak = null;
|
||||
try {
|
||||
final Configuration conf = new HdfsConfiguration();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||
cluster.waitActive();
|
||||
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
Path p = new Path("/foo");
|
||||
fs.mkdirs(p);
|
||||
bak = System.err;
|
||||
|
||||
final FsShell fshell = new FsShell(conf);
|
||||
final ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
System.setErr(new PrintStream(out));
|
||||
|
||||
// No permission to write xattr
|
||||
fs.setPermission(p, new FsPermission((short) 0700));
|
||||
user.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
|
||||
assertEquals("Returned should be 1", 1, ret);
|
||||
String str = out.toString();
|
||||
assertTrue("Permission denied printed",
|
||||
str.indexOf("Permission denied") != -1);
|
||||
out.reset();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
|
||||
assertEquals("Returned should be 0", 0, ret);
|
||||
out.reset();
|
||||
|
||||
// No permission to read and remove
|
||||
fs.setPermission(p, new FsPermission((short) 0750));
|
||||
user.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
// Read
|
||||
int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-getfattr", "-n", "user.a1", "/foo"});
|
||||
assertEquals("Returned should be 1", 1, ret);
|
||||
String str = out.toString();
|
||||
assertTrue("Permission denied printed",
|
||||
str.indexOf("Permission denied") != -1);
|
||||
out.reset();
|
||||
// Remove
|
||||
ret = ToolRunner.run(fshell, new String[]{
|
||||
"-setfattr", "-x", "user.a1", "/foo"});
|
||||
assertEquals("Returned should be 1", 1, ret);
|
||||
str = out.toString();
|
||||
assertTrue("Permission denied printed",
|
||||
str.indexOf("Permission denied") != -1);
|
||||
out.reset();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
} finally {
|
||||
if (bak != null) {
|
||||
System.setErr(bak);
|
||||
}
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* HDFS-6413 xattr names erroneously handled as case-insensitive */
|
||||
@Test (timeout = 30000)
|
||||
public void testSetXAttrCaseSensitivity() throws Exception {
|
||||
UserGroupInformation user = UserGroupInformation.
|
||||
createUserForTesting("user", new String[] {"mygroup"});
|
||||
MiniDFSCluster cluster = null;
|
||||
PrintStream bak = null;
|
||||
try {
|
||||
final Configuration conf = new HdfsConfiguration();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||
cluster.waitActive();
|
||||
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
Path p = new Path("/mydir");
|
||||
fs.mkdirs(p);
|
||||
bak = System.err;
|
||||
|
||||
final FsShell fshell = new FsShell(conf);
|
||||
final ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
System.setOut(new PrintStream(out));
|
||||
|
||||
doSetXattr(out, fshell,
|
||||
new String[] {"-setfattr", "-n", "User.Foo", "/mydir"},
|
||||
new String[] {"-getfattr", "-d", "/mydir"},
|
||||
new String[] {"user.Foo"},
|
||||
new String[] {});
|
||||
|
||||
doSetXattr(out, fshell,
|
||||
new String[] {"-setfattr", "-n", "user.FOO", "/mydir"},
|
||||
new String[] {"-getfattr", "-d", "/mydir"},
|
||||
new String[] {"user.Foo", "user.FOO"},
|
||||
new String[] {});
|
||||
|
||||
doSetXattr(out, fshell,
|
||||
new String[] {"-setfattr", "-n", "USER.foo", "/mydir"},
|
||||
new String[] {"-getfattr", "-d", "/mydir"},
|
||||
new String[] {"user.Foo", "user.FOO", "user.foo"},
|
||||
new String[] {});
|
||||
|
||||
doSetXattr(out, fshell,
|
||||
new String[] {"-setfattr", "-n", "USER.fOo", "-v", "myval", "/mydir"},
|
||||
new String[] {"-getfattr", "-d", "/mydir"},
|
||||
new String[] {"user.Foo", "user.FOO", "user.foo", "user.fOo=\"myval\""},
|
||||
new String[] {"user.Foo=", "user.FOO=", "user.foo="});
|
||||
|
||||
doSetXattr(out, fshell,
|
||||
new String[] {"-setfattr", "-x", "useR.foo", "/mydir"},
|
||||
new String[] {"-getfattr", "-d", "/mydir"},
|
||||
new String[] {"user.Foo", "user.FOO"},
|
||||
new String[] {"foo"});
|
||||
|
||||
doSetXattr(out, fshell,
|
||||
new String[] {"-setfattr", "-x", "USER.FOO", "/mydir"},
|
||||
new String[] {"-getfattr", "-d", "/mydir"},
|
||||
new String[] {"user.Foo"},
|
||||
new String[] {"FOO"});
|
||||
|
||||
doSetXattr(out, fshell,
|
||||
new String[] {"-setfattr", "-x", "useR.Foo", "/mydir"},
|
||||
new String[] {"-getfattr", "-n", "User.Foo", "/mydir"},
|
||||
new String[] {},
|
||||
new String[] {"Foo"});
|
||||
|
||||
} finally {
|
||||
if (bak != null) {
|
||||
System.setOut(bak);
|
||||
}
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void doSetXattr(ByteArrayOutputStream out, FsShell fshell,
|
||||
String[] setOp, String[] getOp, String[] expectArr,
|
||||
String[] dontExpectArr) throws Exception {
|
||||
int ret = ToolRunner.run(fshell, setOp);
|
||||
out.reset();
|
||||
ret = ToolRunner.run(fshell, getOp);
|
||||
final String str = out.toString();
|
||||
for (int i = 0; i < expectArr.length; i++) {
|
||||
final String expect = expectArr[i];
|
||||
final StringBuilder sb = new StringBuilder
|
||||
("Incorrect results from getfattr. Expected: ");
|
||||
sb.append(expect).append(" Full Result: ");
|
||||
sb.append(str);
|
||||
assertTrue(sb.toString(),
|
||||
str.indexOf(expect) != -1);
|
||||
}
|
||||
|
||||
for (int i = 0; i < dontExpectArr.length; i++) {
|
||||
String dontExpect = dontExpectArr[i];
|
||||
final StringBuilder sb = new StringBuilder
|
||||
("Incorrect results from getfattr. Didn't Expect: ");
|
||||
sb.append(dontExpect).append(" Full Result: ");
|
||||
sb.append(str);
|
||||
assertTrue(sb.toString(),
|
||||
str.indexOf(dontExpect) == -1);
|
||||
}
|
||||
out.reset();
|
||||
}
|
||||
|
||||
/**
|
||||
* HDFS-6374 setXAttr should require the user to be the owner of the file
|
||||
* or directory.
|
||||
*
|
||||
* Test to make sure that only the owner of a file or directory can set
|
||||
* or remove the xattrs.
|
||||
*
|
||||
* As user1:
|
||||
* Create a directory (/foo) as user1, chown it to user1 (and user1's group),
|
||||
* grant rwx to "other".
|
||||
*
|
||||
* As user2:
|
||||
* Set an xattr (should fail).
|
||||
*
|
||||
* As user1:
|
||||
* Set an xattr (should pass).
|
||||
*
|
||||
* As user2:
|
||||
* Read the xattr (should pass).
|
||||
* Remove the xattr (should fail).
|
||||
*
|
||||
* As user1:
|
||||
* Read the xattr (should pass).
|
||||
* Remove the xattr (should pass).
|
||||
*/
|
||||
@Test (timeout = 30000)
|
||||
public void testSetXAttrPermissionAsDifferentOwner() throws Exception {
|
||||
final String USER1 = "user1";
|
||||
final String GROUP1 = "mygroup1";
|
||||
final UserGroupInformation user1 = UserGroupInformation.
|
||||
createUserForTesting(USER1, new String[] {GROUP1});
|
||||
final UserGroupInformation user2 = UserGroupInformation.
|
||||
createUserForTesting("user2", new String[] {"mygroup2"});
|
||||
MiniDFSCluster cluster = null;
|
||||
PrintStream bak = null;
|
||||
try {
|
||||
final Configuration conf = new HdfsConfiguration();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||
cluster.waitActive();
|
||||
|
||||
final FileSystem fs = cluster.getFileSystem();
|
||||
fs.setOwner(new Path("/"), USER1, GROUP1);
|
||||
bak = System.err;
|
||||
|
||||
final FsShell fshell = new FsShell(conf);
|
||||
final ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
System.setErr(new PrintStream(out));
|
||||
|
||||
// mkdir foo as user1
|
||||
user1.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
final int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-mkdir", "/foo"});
|
||||
assertEquals("Return should be 0", 0, ret);
|
||||
out.reset();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
user1.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
// Give access to "other"
|
||||
final int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-chmod", "707", "/foo"});
|
||||
assertEquals("Return should be 0", 0, ret);
|
||||
out.reset();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
// No permission to write xattr for non-owning user (user2).
|
||||
user2.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
final int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
|
||||
assertEquals("Returned should be 1", 1, ret);
|
||||
final String str = out.toString();
|
||||
assertTrue("Permission denied printed",
|
||||
str.indexOf("Permission denied") != -1);
|
||||
out.reset();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
// But there should be permission to write xattr for
|
||||
// the owning user.
|
||||
user1.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
final int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
|
||||
assertEquals("Returned should be 0", 0, ret);
|
||||
out.reset();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
// There should be permission to read,but not to remove for
|
||||
// non-owning user (user2).
|
||||
user2.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
// Read
|
||||
int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-getfattr", "-n", "user.a1", "/foo"});
|
||||
assertEquals("Returned should be 0", 0, ret);
|
||||
out.reset();
|
||||
// Remove
|
||||
ret = ToolRunner.run(fshell, new String[]{
|
||||
"-setfattr", "-x", "user.a1", "/foo"});
|
||||
assertEquals("Returned should be 1", 1, ret);
|
||||
final String str = out.toString();
|
||||
assertTrue("Permission denied printed",
|
||||
str.indexOf("Permission denied") != -1);
|
||||
out.reset();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
// But there should be permission to read/remove for
|
||||
// the owning user.
|
||||
user1.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
// Read
|
||||
int ret = ToolRunner.run(fshell, new String[]{
|
||||
"-getfattr", "-n", "user.a1", "/foo"});
|
||||
assertEquals("Returned should be 0", 0, ret);
|
||||
out.reset();
|
||||
// Remove
|
||||
ret = ToolRunner.run(fshell, new String[]{
|
||||
"-setfattr", "-x", "user.a1", "/foo"});
|
||||
assertEquals("Returned should be 0", 0, ret);
|
||||
out.reset();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
} finally {
|
||||
if (bak != null) {
|
||||
System.setErr(bak);
|
||||
}
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that the server trash configuration is respected when
|
||||
|
|
|
@ -28,6 +28,7 @@ import static org.mockito.Mockito.spy;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -37,11 +38,15 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
|
@ -327,4 +332,70 @@ public class TestFileAppend4 {
|
|||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that an append with no locations fails with an exception
|
||||
* showing insufficient locations.
|
||||
*/
|
||||
@Test(timeout = 60000)
|
||||
public void testAppendInsufficientLocations() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
|
||||
// lower heartbeat interval for fast recognition of DN
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
|
||||
1000);
|
||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
|
||||
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4)
|
||||
.build();
|
||||
DistributedFileSystem fileSystem = null;
|
||||
try {
|
||||
// create a file with replication 3
|
||||
fileSystem = cluster.getFileSystem();
|
||||
Path f = new Path("/testAppend");
|
||||
FSDataOutputStream create = fileSystem.create(f, (short) 2);
|
||||
create.write("/testAppend".getBytes());
|
||||
create.close();
|
||||
|
||||
// Check for replications
|
||||
DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
|
||||
|
||||
// Shut down all DNs that have the last block location for the file
|
||||
LocatedBlocks lbs = fileSystem.dfs.getNamenode().
|
||||
getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
|
||||
List<DataNode> dnsOfCluster = cluster.getDataNodes();
|
||||
DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().
|
||||
getLocations();
|
||||
for( DataNode dn : dnsOfCluster) {
|
||||
for(DatanodeInfo loc: dnsWithLocations) {
|
||||
if(dn.getDatanodeId().equals(loc)){
|
||||
dn.shutdown();
|
||||
DFSTestUtil.waitForDatanodeDeath(dn);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wait till 0 replication is recognized
|
||||
DFSTestUtil.waitReplication(fileSystem, f, (short) 0);
|
||||
|
||||
// Append to the file, at this state there are 3 live DNs but none of them
|
||||
// have the block.
|
||||
try{
|
||||
fileSystem.append(f);
|
||||
fail("Append should fail because insufficient locations");
|
||||
} catch (IOException e){
|
||||
LOG.info("Expected exception: ", e);
|
||||
}
|
||||
FSDirectory dir = cluster.getNamesystem().getFSDirectory();
|
||||
final INodeFile inode = INodeFile.
|
||||
valueOf(dir.getINode("/testAppend"), "/testAppend");
|
||||
assertTrue("File should remain closed", !inode.isUnderConstruction());
|
||||
} finally {
|
||||
if (null != fileSystem) {
|
||||
fileSystem.close();
|
||||
}
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -73,6 +73,7 @@ public class TestSafeMode {
|
|||
conf = new HdfsConfiguration();
|
||||
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||
cluster.waitActive();
|
||||
fs = cluster.getFileSystem();
|
||||
|
@ -381,7 +382,19 @@ public class TestSafeMode {
|
|||
public void run(FileSystem fs) throws IOException {
|
||||
fs.setAcl(file1, Lists.<AclEntry>newArrayList());
|
||||
}});
|
||||
|
||||
|
||||
runFsFun("setXAttr while in SM", new FSRun() {
|
||||
@Override
|
||||
public void run(FileSystem fs) throws IOException {
|
||||
fs.setXAttr(file1, "user.a1", null);
|
||||
}});
|
||||
|
||||
runFsFun("removeXAttr while in SM", new FSRun() {
|
||||
@Override
|
||||
public void run(FileSystem fs) throws IOException {
|
||||
fs.removeXAttr(file1, "user.a1");
|
||||
}});
|
||||
|
||||
try {
|
||||
DFSTestUtil.readFile(fs, file1);
|
||||
} catch (IOException ioe) {
|
||||
|
|
|
@ -0,0 +1,475 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
/**
|
||||
* Tests NameNode interaction for all XAttr APIs.
|
||||
* This test suite covers restarting the NN, saving a new checkpoint.
|
||||
*/
|
||||
public class FSXAttrBaseTest {
|
||||
|
||||
private static final int MAX_SIZE = 16;
|
||||
|
||||
protected static MiniDFSCluster dfsCluster;
|
||||
protected static Configuration conf;
|
||||
private static int pathCount = 0;
|
||||
private static Path path;
|
||||
|
||||
// XAttrs
|
||||
protected static final String name1 = "user.a1";
|
||||
protected static final byte[] value1 = {0x31, 0x32, 0x33};
|
||||
protected static final byte[] newValue1 = {0x31, 0x31, 0x31};
|
||||
protected static final String name2 = "user.a2";
|
||||
protected static final byte[] value2 = {0x37, 0x38, 0x39};
|
||||
protected static final String name3 = "user.a3";
|
||||
protected static final String name4 = "user.a4";
|
||||
|
||||
protected FileSystem fs;
|
||||
|
||||
@BeforeClass
|
||||
public static void init() throws Exception {
|
||||
conf = new HdfsConfiguration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 3);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, MAX_SIZE);
|
||||
initCluster(true);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void shutdown() {
|
||||
if (dfsCluster != null) {
|
||||
dfsCluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
pathCount += 1;
|
||||
path = new Path("/p" + pathCount);
|
||||
initFileSystem();
|
||||
}
|
||||
|
||||
@After
|
||||
public void destroyFileSystems() {
|
||||
IOUtils.cleanup(null, fs);
|
||||
fs = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests for creating xattr
|
||||
* 1. Create an xattr using XAttrSetFlag.CREATE.
|
||||
* 2. Create an xattr which already exists and expect an exception.
|
||||
* 3. Create multiple xattrs.
|
||||
* 4. Restart NN and save checkpoint scenarios.
|
||||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testCreateXAttr() throws Exception {
|
||||
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 0);
|
||||
|
||||
// Create xattr which already exists.
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
try {
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
Assert.fail("Creating xattr which already exists should fail.");
|
||||
} catch (IOException e) {
|
||||
}
|
||||
fs.removeXAttr(path, name1);
|
||||
|
||||
// Create two xattrs
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, null, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
|
||||
|
||||
restart(false);
|
||||
initFileSystem();
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
|
||||
|
||||
restart(true);
|
||||
initFileSystem();
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests for replacing xattr
|
||||
* 1. Replace an xattr using XAttrSetFlag.REPLACE.
|
||||
* 2. Replace an xattr which doesn't exist and expect an exception.
|
||||
* 3. Create multiple xattrs and replace some.
|
||||
* 4. Restart NN and save checkpoint scenarios.
|
||||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testReplaceXAttr() throws Exception {
|
||||
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE));
|
||||
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(newValue1, xattrs.get(name1));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
|
||||
// Replace xattr which does not exist.
|
||||
try {
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.REPLACE));
|
||||
Assert.fail("Replacing xattr which does not exist should fail.");
|
||||
} catch (IOException e) {
|
||||
}
|
||||
|
||||
// Create two xattrs, then replace one
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, null, EnumSet.of(XAttrSetFlag.REPLACE));
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
|
||||
|
||||
restart(false);
|
||||
initFileSystem();
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
|
||||
|
||||
restart(true);
|
||||
initFileSystem();
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests for setting xattr
|
||||
* 1. Set xattr with XAttrSetFlag.CREATE|XAttrSetFlag.REPLACE flag.
|
||||
* 2. Set xattr with illegal name.
|
||||
* 3. Set xattr without XAttrSetFlag.
|
||||
* 4. Set xattr and total number exceeds max limit.
|
||||
* 5. Set xattr and name is too long.
|
||||
* 6. Set xattr and value is too long.
|
||||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testSetXAttr() throws Exception {
|
||||
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE,
|
||||
XAttrSetFlag.REPLACE));
|
||||
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
fs.removeXAttr(path, name1);
|
||||
|
||||
// Set xattr with null name
|
||||
try {
|
||||
fs.setXAttr(path, null, value1, EnumSet.of(XAttrSetFlag.CREATE,
|
||||
XAttrSetFlag.REPLACE));
|
||||
Assert.fail("Setting xattr with null name should fail.");
|
||||
} catch (NullPointerException e) {
|
||||
GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e);
|
||||
} catch (RemoteException e) {
|
||||
GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e);
|
||||
}
|
||||
|
||||
// Set xattr with empty name: "user."
|
||||
try {
|
||||
fs.setXAttr(path, "user.", value1, EnumSet.of(XAttrSetFlag.CREATE,
|
||||
XAttrSetFlag.REPLACE));
|
||||
Assert.fail("Setting xattr with empty name should fail.");
|
||||
} catch (HadoopIllegalArgumentException e) {
|
||||
GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
|
||||
} catch (IllegalArgumentException e) {
|
||||
GenericTestUtils.assertExceptionContains("Invalid value: \"user.\" does " +
|
||||
"not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e);
|
||||
}
|
||||
|
||||
// Set xattr with invalid name: "a1"
|
||||
try {
|
||||
fs.setXAttr(path, "a1", value1, EnumSet.of(XAttrSetFlag.CREATE,
|
||||
XAttrSetFlag.REPLACE));
|
||||
Assert.fail("Setting xattr with invalid name prefix or without " +
|
||||
"name prefix should fail.");
|
||||
} catch (HadoopIllegalArgumentException e) {
|
||||
GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
|
||||
} catch (IllegalArgumentException e) {
|
||||
GenericTestUtils.assertExceptionContains("Invalid value: \"a1\" does " +
|
||||
"not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e);
|
||||
}
|
||||
|
||||
// Set xattr without XAttrSetFlag
|
||||
fs.setXAttr(path, name1, value1);
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
fs.removeXAttr(path, name1);
|
||||
|
||||
// XAttr exists, and replace it using CREATE|REPLACE flag.
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.CREATE,
|
||||
XAttrSetFlag.REPLACE));
|
||||
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(newValue1, xattrs.get(name1));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
|
||||
// Total number exceeds max limit
|
||||
fs.setXAttr(path, name1, value1);
|
||||
fs.setXAttr(path, name2, value2);
|
||||
fs.setXAttr(path, name3, null);
|
||||
try {
|
||||
fs.setXAttr(path, name4, null);
|
||||
Assert.fail("Setting xattr should fail if total number of xattrs " +
|
||||
"for inode exceeds max limit.");
|
||||
} catch (IOException e) {
|
||||
GenericTestUtils.assertExceptionContains("Cannot add additional XAttr", e);
|
||||
}
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
fs.removeXAttr(path, name3);
|
||||
|
||||
// Name length exceeds max limit
|
||||
String longName = "user.0123456789abcdefX";
|
||||
try {
|
||||
fs.setXAttr(path, longName, null);
|
||||
Assert.fail("Setting xattr should fail if name is too long.");
|
||||
} catch (IOException e) {
|
||||
GenericTestUtils.assertExceptionContains("XAttr is too big", e);
|
||||
GenericTestUtils.assertExceptionContains("total size is 17", e);
|
||||
}
|
||||
|
||||
// Value length exceeds max limit
|
||||
byte[] longValue = new byte[MAX_SIZE];
|
||||
try {
|
||||
fs.setXAttr(path, "user.a", longValue);
|
||||
Assert.fail("Setting xattr should fail if value is too long.");
|
||||
} catch (IOException e) {
|
||||
GenericTestUtils.assertExceptionContains("XAttr is too big", e);
|
||||
GenericTestUtils.assertExceptionContains("total size is 17", e);
|
||||
}
|
||||
|
||||
// Name + value exactly equal the limit
|
||||
String name = "user.111";
|
||||
byte[] value = new byte[MAX_SIZE-3];
|
||||
fs.setXAttr(path, name, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests for getting xattr
|
||||
* 1. To get xattr which does not exist.
|
||||
* 2. To get multiple xattrs.
|
||||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testGetXAttrs() throws Exception {
|
||||
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
|
||||
// XAttr does not exist.
|
||||
byte[] value = fs.getXAttr(path, name3);
|
||||
Assert.assertEquals(value, null);
|
||||
|
||||
List<String> names = Lists.newArrayList();
|
||||
names.add(name1);
|
||||
names.add(name2);
|
||||
names.add(name3);
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(path, names);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests for removing xattr
|
||||
* 1. Remove xattr.
|
||||
* 2. Restart NN and save checkpoint scenarios.
|
||||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testRemoveXAttr() throws Exception {
|
||||
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
|
||||
|
||||
restart(false);
|
||||
initFileSystem();
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
|
||||
|
||||
restart(true);
|
||||
initFileSystem();
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 1);
|
||||
Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
|
||||
|
||||
fs.removeXAttr(path, name3);
|
||||
}
|
||||
|
||||
/**
|
||||
* Steps:
|
||||
* 1) Set xattrs on a file.
|
||||
* 2) Remove xattrs from that file.
|
||||
* 3) Save a checkpoint and restart NN.
|
||||
* 4) Set xattrs again on the same file.
|
||||
* 5) Remove xattrs from that file.
|
||||
* 6) Restart NN without saving a checkpoint.
|
||||
* 7) Set xattrs again on the same file.
|
||||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testCleanupXAttrs() throws Exception {
|
||||
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
|
||||
restart(true);
|
||||
initFileSystem();
|
||||
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
|
||||
restart(false);
|
||||
initFileSystem();
|
||||
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a FileSystem for the super-user.
|
||||
*
|
||||
* @return FileSystem for super-user
|
||||
* @throws Exception if creation fails
|
||||
*/
|
||||
protected FileSystem createFileSystem() throws Exception {
|
||||
return dfsCluster.getFileSystem();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes all FileSystem instances used in the tests.
|
||||
*
|
||||
* @throws Exception if initialization fails
|
||||
*/
|
||||
private void initFileSystem() throws Exception {
|
||||
fs = createFileSystem();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the cluster, wait for it to become active, and get FileSystem
|
||||
* instances for our test users.
|
||||
*
|
||||
* @param format if true, format the NameNode and DataNodes before starting up
|
||||
* @throws Exception if any step fails
|
||||
*/
|
||||
protected static void initCluster(boolean format) throws Exception {
|
||||
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
|
||||
.build();
|
||||
dfsCluster.waitActive();
|
||||
}
|
||||
|
||||
/**
|
||||
* Restart the cluster, optionally saving a new checkpoint.
|
||||
*
|
||||
* @param checkpoint boolean true to save a new checkpoint
|
||||
* @throws Exception if restart fails
|
||||
*/
|
||||
protected static void restart(boolean checkpoint) throws Exception {
|
||||
NameNode nameNode = dfsCluster.getNameNode();
|
||||
if (checkpoint) {
|
||||
NameNodeAdapter.enterSafeMode(nameNode, false);
|
||||
NameNodeAdapter.saveNamespace(nameNode);
|
||||
}
|
||||
shutdown();
|
||||
initCluster(false);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,139 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import java.util.EnumSet;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSOutputStream;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestDiskspaceQuotaUpdate {
|
||||
private static final int BLOCKSIZE = 1024;
|
||||
private static final short REPLICATION = 1;
|
||||
|
||||
private Configuration conf;
|
||||
private MiniDFSCluster cluster;
|
||||
private FSDirectory fsdir;
|
||||
private DistributedFileSystem dfs;
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
conf = new Configuration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
|
||||
fsdir = cluster.getNamesystem().getFSDirectory();
|
||||
dfs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test if the quota can be correctly updated for append
|
||||
*/
|
||||
@Test
|
||||
public void testUpdateQuotaForAppend() throws Exception {
|
||||
final Path foo = new Path("/foo");
|
||||
final Path bar = new Path(foo, "bar");
|
||||
DFSTestUtil.createFile(dfs, bar, BLOCKSIZE, REPLICATION, 0L);
|
||||
dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
|
||||
|
||||
// append half of the block data
|
||||
DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE / 2);
|
||||
|
||||
INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
|
||||
Quota.Counts quota = fooNode.getDirectoryWithQuotaFeature()
|
||||
.getSpaceConsumed();
|
||||
long ns = quota.get(Quota.NAMESPACE);
|
||||
long ds = quota.get(Quota.DISKSPACE);
|
||||
assertEquals(2, ns); // foo and bar
|
||||
assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);
|
||||
|
||||
// append another block
|
||||
DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);
|
||||
|
||||
quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
|
||||
ns = quota.get(Quota.NAMESPACE);
|
||||
ds = quota.get(Quota.DISKSPACE);
|
||||
assertEquals(2, ns); // foo and bar
|
||||
assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test if the quota can be correctly updated when file length is updated
|
||||
* through fsync
|
||||
*/
|
||||
@Test
|
||||
public void testUpdateQuotaForFSync() throws Exception {
|
||||
final Path foo = new Path("/foo");
|
||||
final Path bar = new Path(foo, "bar");
|
||||
DFSTestUtil.createFile(dfs, bar, BLOCKSIZE, REPLICATION, 0L);
|
||||
dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
|
||||
|
||||
FSDataOutputStream out = dfs.append(bar);
|
||||
out.write(new byte[BLOCKSIZE / 4]);
|
||||
((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet
|
||||
.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));
|
||||
|
||||
INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
|
||||
Quota.Counts quota = fooNode.getDirectoryWithQuotaFeature()
|
||||
.getSpaceConsumed();
|
||||
long ns = quota.get(Quota.NAMESPACE);
|
||||
long ds = quota.get(Quota.DISKSPACE);
|
||||
assertEquals(2, ns); // foo and bar
|
||||
assertEquals(BLOCKSIZE * 2 * REPLICATION, ds); // file is under construction
|
||||
|
||||
out.write(new byte[BLOCKSIZE / 4]);
|
||||
out.close();
|
||||
|
||||
fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
|
||||
quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
|
||||
ns = quota.get(Quota.NAMESPACE);
|
||||
ds = quota.get(Quota.DISKSPACE);
|
||||
assertEquals(2, ns);
|
||||
assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);
|
||||
|
||||
// append another block
|
||||
DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);
|
||||
|
||||
quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
|
||||
ns = quota.get(Quota.NAMESPACE);
|
||||
ds = quota.get(Quota.DISKSPACE);
|
||||
assertEquals(2, ns); // foo and bar
|
||||
assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.EnumSet;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* 1) save xattrs, restart NN, assert xattrs reloaded from edit log,
|
||||
* 2) save xattrs, create new checkpoint, restart NN, assert xattrs
|
||||
* reloaded from fsimage
|
||||
*/
|
||||
public class TestFSImageWithXAttr {
|
||||
private static Configuration conf;
|
||||
private static MiniDFSCluster cluster;
|
||||
|
||||
//xattrs
|
||||
private static final String name1 = "user.a1";
|
||||
private static final byte[] value1 = {0x31, 0x32, 0x33};
|
||||
private static final byte[] newValue1 = {0x31, 0x31, 0x31};
|
||||
private static final String name2 = "user.a2";
|
||||
private static final byte[] value2 = {0x37, 0x38, 0x39};
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws IOException {
|
||||
conf = new Configuration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||
cluster.waitActive();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
private void testXAttr(boolean persistNamespace) throws IOException {
|
||||
Path path = new Path("/p");
|
||||
DistributedFileSystem fs = cluster.getFileSystem();
|
||||
fs.create(path).close();
|
||||
|
||||
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
|
||||
|
||||
restart(fs, persistNamespace);
|
||||
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(value1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE));
|
||||
|
||||
restart(fs, persistNamespace);
|
||||
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 2);
|
||||
Assert.assertArrayEquals(newValue1, xattrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xattrs.get(name2));
|
||||
|
||||
fs.removeXAttr(path, name1);
|
||||
fs.removeXAttr(path, name2);
|
||||
|
||||
restart(fs, persistNamespace);
|
||||
xattrs = fs.getXAttrs(path);
|
||||
Assert.assertEquals(xattrs.size(), 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPersistXAttr() throws IOException {
|
||||
testXAttr(true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testXAttrEditLog() throws IOException {
|
||||
testXAttr(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Restart the NameNode, optionally saving a new checkpoint.
|
||||
*
|
||||
* @param fs DistributedFileSystem used for saving namespace
|
||||
* @param persistNamespace boolean true to save a new checkpoint
|
||||
* @throws IOException if restart fails
|
||||
*/
|
||||
private void restart(DistributedFileSystem fs, boolean persistNamespace)
|
||||
throws IOException {
|
||||
if (persistNamespace) {
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||
fs.saveNamespace();
|
||||
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
}
|
||||
|
||||
cluster.restartNameNode();
|
||||
cluster.waitActive();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,96 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
/**
|
||||
* Tests of XAttr operations using FileContext APIs.
|
||||
*/
|
||||
public class TestFileContextXAttr extends FSXAttrBaseTest {
|
||||
|
||||
@Override
|
||||
protected FileSystem createFileSystem() throws Exception {
|
||||
FileContextFS fcFs = new FileContextFS();
|
||||
fcFs.initialize(FileSystem.getDefaultUri(conf), conf);
|
||||
return fcFs;
|
||||
}
|
||||
|
||||
/**
|
||||
* This reuses FSXAttrBaseTest's testcases by creating a filesystem
|
||||
* implementation which uses FileContext by only overriding the xattr related
|
||||
* methods. Other operations will use the normal filesystem.
|
||||
*/
|
||||
public static class FileContextFS extends DistributedFileSystem {
|
||||
|
||||
private FileContext fc;
|
||||
|
||||
@Override
|
||||
public void initialize(URI uri, Configuration conf) throws IOException {
|
||||
super.initialize(uri, conf);
|
||||
fc = FileContext.getFileContext(conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, final String name, final byte[] value)
|
||||
throws IOException {
|
||||
fc.setXAttr(path, name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setXAttr(Path path, final String name, final byte[] value,
|
||||
final EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
fc.setXAttr(path, name, value, flag);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getXAttr(Path path, final String name) throws IOException {
|
||||
return fc.getXAttr(path, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
|
||||
return fc.getXAttrs(path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, byte[]> getXAttrs(Path path, final List<String> names)
|
||||
throws IOException {
|
||||
return fc.getXAttrs(path, names);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeXAttr(Path path, final String name) throws IOException {
|
||||
fc.removeXAttr(path, name);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -129,4 +129,44 @@ public class TestHostsFiles {
|
|||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testHostsIncludeForDeadCount() throws Exception {
|
||||
Configuration conf = getConf();
|
||||
|
||||
// Configure an excludes file
|
||||
FileSystem localFileSys = FileSystem.getLocal(conf);
|
||||
Path workingDir = localFileSys.getWorkingDirectory();
|
||||
Path dir = new Path(workingDir, "build/test/data/temp/decommission");
|
||||
Path excludeFile = new Path(dir, "exclude");
|
||||
Path includeFile = new Path(dir, "include");
|
||||
assertTrue(localFileSys.mkdirs(dir));
|
||||
StringBuilder includeHosts = new StringBuilder();
|
||||
includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777")
|
||||
.append("\n");
|
||||
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
|
||||
DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
|
||||
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
|
||||
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
|
||||
|
||||
MiniDFSCluster cluster = null;
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
|
||||
assertTrue(ns.getNumDeadDataNodes() == 2);
|
||||
assertTrue(ns.getNumLiveDataNodes() == 0);
|
||||
|
||||
// Testing using MBeans
|
||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
ObjectName mxbeanName = new ObjectName(
|
||||
"Hadoop:service=NameNode,name=FSNamesystemState");
|
||||
String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + "";
|
||||
assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2);
|
||||
assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0);
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,6 +46,7 @@ import org.apache.hadoop.fs.Options.Rename;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.DFSClient;
|
||||
|
@ -67,6 +68,8 @@ import org.apache.hadoop.util.Time;
|
|||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
public class TestINodeFile {
|
||||
public static final Log LOG = LogFactory.getLog(TestINodeFile.class);
|
||||
|
||||
|
@ -1077,4 +1080,22 @@ public class TestINodeFile {
|
|||
file.toCompleteFile(Time.now());
|
||||
assertFalse(file.isUnderConstruction());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testXAttrFeature() {
|
||||
replication = 3;
|
||||
preferredBlockSize = 128*1024*1024;
|
||||
INodeFile inf = createINodeFile(replication, preferredBlockSize);
|
||||
ImmutableList.Builder<XAttr> builder = new ImmutableList.Builder<XAttr>();
|
||||
XAttr xAttr = new XAttr.Builder().setNameSpace(XAttr.NameSpace.USER).
|
||||
setName("a1").setValue(new byte[]{0x31, 0x32, 0x33}).build();
|
||||
builder.add(xAttr);
|
||||
XAttrFeature f = new XAttrFeature(builder.build());
|
||||
inf.addXAttrFeature(f);
|
||||
XAttrFeature f1 = inf.getXAttrFeature();
|
||||
assertEquals(xAttr, f1.getXAttrs().get(0));
|
||||
inf.removeXAttrFeature();
|
||||
f1 = inf.getXAttrFeature();
|
||||
assertEquals(f1, null);
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue