Merge from trunk to branch
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1614553 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
c3fa0b5884
|
@ -460,6 +460,9 @@ Release 2.6.0 - UNRELEASED
|
|||
|
||||
HADOOP-10882. Move DirectBufferPool into common util. (todd)
|
||||
|
||||
HADOOP-8069. Enable TCP_NODELAY by default for IPC. (Todd Lipcon via
|
||||
Arpit Agarwal)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -491,6 +494,9 @@ Release 2.6.0 - UNRELEASED
|
|||
HADOOP-10830. Missing lock in JavaKeyStoreProvider.createCredentialEntry.
|
||||
(Benoy Antony via umamahesh)
|
||||
|
||||
HADOOP-10876. The constructor of Path should not take an empty URL as a
|
||||
parameter. (Zhihai Xu via wang)
|
||||
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -1040,21 +1040,10 @@ public abstract class AbstractFileSystem {
|
|||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
|
@ -1069,21 +1058,10 @@ public abstract class AbstractFileSystem {
|
|||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
|
@ -1099,18 +1077,10 @@ public abstract class AbstractFileSystem {
|
|||
|
||||
/**
|
||||
* Get an xattr for a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only get an xattr for the "user" namespace.
|
||||
* The super user can get an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* An xattr will only be returned when the logged-in user has the correct permissions.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attribute
|
||||
* @param name xattr name.
|
||||
|
@ -1127,13 +1097,7 @@ public abstract class AbstractFileSystem {
|
|||
* Only those xattrs for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattr of "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @return Map<String, byte[]> describing the XAttrs of the file or directory
|
||||
|
@ -1149,13 +1113,7 @@ public abstract class AbstractFileSystem {
|
|||
* Only those xattrs for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattr of "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @param names XAttr names.
|
||||
|
@ -1173,14 +1131,7 @@ public abstract class AbstractFileSystem {
|
|||
* Only the xattr names for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattr names for the "user" namespace.
|
||||
* The super user can only get xattr names for the "user" and "trusted"
|
||||
* namespaces.
|
||||
* The xattr names in the "security" and "system" namespaces are only
|
||||
* used/exposed internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @return Map<String, byte[]> describing the XAttrs of the file or directory
|
||||
|
@ -1194,21 +1145,10 @@ public abstract class AbstractFileSystem {
|
|||
|
||||
/**
|
||||
* Remove an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only remove an xattr for the "user" namespace.
|
||||
* The super user can remove an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to remove extended attribute
|
||||
* @param name xattr name
|
||||
|
|
|
@ -207,7 +207,7 @@ public class CommonConfigurationKeysPublic {
|
|||
public static final String IPC_CLIENT_TCPNODELAY_KEY =
|
||||
"ipc.client.tcpnodelay";
|
||||
/** Defalt value for IPC_CLIENT_TCPNODELAY_KEY */
|
||||
public static final boolean IPC_CLIENT_TCPNODELAY_DEFAULT = false;
|
||||
public static final boolean IPC_CLIENT_TCPNODELAY_DEFAULT = true;
|
||||
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
|
||||
public static final String IPC_SERVER_LISTEN_QUEUE_SIZE_KEY =
|
||||
"ipc.server.listen.queue.size";
|
||||
|
@ -226,7 +226,7 @@ public class CommonConfigurationKeysPublic {
|
|||
public static final String IPC_SERVER_TCPNODELAY_KEY =
|
||||
"ipc.server.tcpnodelay";
|
||||
/** Default value for IPC_SERVER_TCPNODELAY_KEY */
|
||||
public static final boolean IPC_SERVER_TCPNODELAY_DEFAULT = false;
|
||||
public static final boolean IPC_SERVER_TCPNODELAY_DEFAULT = true;
|
||||
|
||||
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
|
||||
public static final String HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY =
|
||||
|
|
|
@ -2297,21 +2297,10 @@ public final class FileContext {
|
|||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
|
@ -2326,21 +2315,10 @@ public final class FileContext {
|
|||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
|
@ -2363,19 +2341,10 @@ public final class FileContext {
|
|||
|
||||
/**
|
||||
* Get an xattr for a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
* <p/>
|
||||
*
|
||||
* A regular user can only get an xattr for the "user" namespace.
|
||||
* The super user can get an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* An xattr will only be returned when the logged-in user has the correct permissions.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attribute
|
||||
* @param name xattr name.
|
||||
|
@ -2398,13 +2367,7 @@ public final class FileContext {
|
|||
* Only those xattrs for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattr of "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @return Map<String, byte[]> describing the XAttrs of the file or directory
|
||||
|
@ -2426,13 +2389,7 @@ public final class FileContext {
|
|||
* Only those xattrs for which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattr of "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @param names XAttr names.
|
||||
|
@ -2453,21 +2410,10 @@ public final class FileContext {
|
|||
|
||||
/**
|
||||
* Remove an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only remove an xattr for the "user" namespace.
|
||||
* The super user can remove an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to remove extended attribute
|
||||
* @param name xattr name
|
||||
|
@ -2490,14 +2436,7 @@ public final class FileContext {
|
|||
* Only those xattr names which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattr names for the "user" namespace.
|
||||
* The super user can only get xattr names for "user" and "trusted"
|
||||
* namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only
|
||||
* used/exposed internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @return List<String> of the XAttr names of the file or directory
|
||||
|
|
|
@ -2364,21 +2364,10 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
|
@ -2393,21 +2382,10 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
|
||||
/**
|
||||
* Set an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only set an xattr for the "user" namespace.
|
||||
* The super user can set an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set if the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it is replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to modify
|
||||
* @param name xattr name.
|
||||
|
@ -2423,20 +2401,10 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
|
||||
/**
|
||||
* Get an xattr name and value for a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
* <p/>
|
||||
*
|
||||
* A regular user can only get an xattr for the "user" namespace.
|
||||
* The super user can get an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* An xattr will only be returned if the logged-in user has the
|
||||
* correct permissions.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attribute
|
||||
* @param name xattr name.
|
||||
|
@ -2453,13 +2421,7 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
* Only those xattrs which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @return Map<String, byte[]> describing the XAttrs of the file or directory
|
||||
|
@ -2475,13 +2437,7 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
* Only those xattrs which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattrs for the "user" namespace.
|
||||
* The super user can only get xattrs for "user" and "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @param names XAttr names.
|
||||
|
@ -2499,14 +2455,7 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
* Only those xattr names which the logged-in user has permissions to view
|
||||
* are returned.
|
||||
* <p/>
|
||||
* A regular user can only get xattr names for the "user" namespace.
|
||||
* The super user can only get xattr names for "user" and "trusted"
|
||||
* namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only
|
||||
* used/exposed internally by/to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to get extended attributes
|
||||
* @return List<String> of the XAttr names of the file or directory
|
||||
|
@ -2519,21 +2468,10 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
|
||||
/**
|
||||
* Remove an xattr of a file or directory.
|
||||
* The name must be prefixed with user/trusted/security/system and
|
||||
* followed by ".". For example, "user.attr".
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
* <p/>
|
||||
* A regular user can only remove an xattr for the "user" namespace.
|
||||
* The super user can remove an xattr of either the "user" or "trusted" namespaces.
|
||||
* The xattrs of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by/to the FS impl.
|
||||
* <p/>
|
||||
* The access permissions of an xattr in the "user" namespace are
|
||||
* defined by the file and directory permission bits.
|
||||
* An xattr can only be set when the logged-in user has the correct permissions.
|
||||
* If the xattr exists, it will be replaced.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param path Path to remove extended attribute
|
||||
* @param name xattr name
|
||||
|
|
|
@ -128,7 +128,20 @@ public class Path implements Comparable {
|
|||
"Can not create a Path from an empty string");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** check URI parameter of Path constructor. */
|
||||
private void checkPathArg(URI aUri) throws IllegalArgumentException {
|
||||
// disallow construction of a Path from an empty URI
|
||||
if (aUri == null) {
|
||||
throw new IllegalArgumentException(
|
||||
"Can not create a Path from a null URI");
|
||||
}
|
||||
if (aUri.toString().isEmpty()) {
|
||||
throw new IllegalArgumentException(
|
||||
"Can not create a Path from an empty URI");
|
||||
}
|
||||
}
|
||||
|
||||
/** Construct a path from a String. Path strings are URIs, but with
|
||||
* unescaped elements and some additional normalization. */
|
||||
public Path(String pathString) throws IllegalArgumentException {
|
||||
|
@ -176,6 +189,7 @@ public class Path implements Comparable {
|
|||
* Construct a path from a URI
|
||||
*/
|
||||
public Path(URI aUri) {
|
||||
checkPathArg(aUri);
|
||||
uri = aUri.normalize();
|
||||
}
|
||||
|
||||
|
|
|
@ -807,25 +807,6 @@ for ldap providers in the same way as above does.
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.server.tcpnodelay</name>
|
||||
<value>false</value>
|
||||
<description>Turn on/off Nagle's algorithm for the TCP socket connection on
|
||||
the server. Setting to true disables the algorithm and may decrease latency
|
||||
with a cost of more/smaller packets.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.tcpnodelay</name>
|
||||
<value>false</value>
|
||||
<description>Turn on/off Nagle's algorithm for the TCP socket connection on
|
||||
the client. Setting to true disables the algorithm and may decrease latency
|
||||
with a cost of more/smaller packets.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
||||
<!-- Proxy Configuration -->
|
||||
|
||||
<property>
|
||||
|
|
|
@ -26,11 +26,13 @@ import java.util.Arrays;
|
|||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.AvroTestUtil;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class TestPath extends TestCase {
|
||||
/**
|
||||
|
@ -305,6 +307,28 @@ public class TestPath extends TestCase {
|
|||
// if the child uri is absolute path
|
||||
assertEquals("foo://bar/fud#boo", new Path(new Path(new URI(
|
||||
"foo://bar/baz#bud")), new Path(new URI("/fud#boo"))).toString());
|
||||
|
||||
// empty URI
|
||||
URI uri3 = new URI("");
|
||||
assertEquals("", uri3.toString());
|
||||
try {
|
||||
path = new Path(uri3);
|
||||
fail("Expected exception for empty URI");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expect to receive an IllegalArgumentException
|
||||
GenericTestUtils.assertExceptionContains("Can not create a Path"
|
||||
+ " from an empty URI", e);
|
||||
}
|
||||
// null URI
|
||||
uri3 = null;
|
||||
try {
|
||||
path = new Path(uri3);
|
||||
fail("Expected exception for null URI");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expect to receive an IllegalArgumentException
|
||||
GenericTestUtils.assertExceptionContains("Can not create a Path"
|
||||
+ " from a null URI", e);
|
||||
}
|
||||
}
|
||||
|
||||
/** Test URIs created from Path objects */
|
||||
|
|
|
@ -324,6 +324,14 @@ Release 2.6.0 - UNRELEASED
|
|||
HDFS-6750. The DataNode should use its shared memory segment to mark
|
||||
short-circuit replicas that have been unlinked as stale (cmccabe)
|
||||
|
||||
HDFS-6739. Add getDatanodeStorageReport to ClientProtocol. (szetszwo)
|
||||
|
||||
HDFS-6665. Add tests for XAttrs in combination with viewfs.
|
||||
(Stephen Chu via wang)
|
||||
|
||||
HDFS-6778. The extended attributes javadoc should simply refer to the
|
||||
user docs. (clamb via wang)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
||||
|
@ -938,6 +946,9 @@ Release 2.5.0 - UNRELEASED
|
|||
HDFS-6723. New NN webUI no longer displays decommissioned state for dead node.
|
||||
(Ming Ma via wheat9)
|
||||
|
||||
HDFS-6717. JIRA HDFS-5804 breaks default nfs-gateway behavior for unsecured config
|
||||
(brandonli)
|
||||
|
||||
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
|
||||
|
|
|
@ -35,19 +35,21 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_
|
|||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
|
||||
|
@ -63,8 +65,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_
|
|||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT;
|
||||
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.DataInputStream;
|
||||
|
@ -95,7 +95,6 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
|
||||
import javax.net.SocketFactory;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
@ -122,22 +121,22 @@ import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
|
|||
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
|
||||
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.VolumeId;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||
import org.apache.hadoop.hdfs.protocol.AclException;
|
||||
import org.apache.hadoop.hdfs.net.Peer;
|
||||
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
||||
import org.apache.hadoop.hdfs.protocol.AclException;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator;
|
||||
|
@ -169,8 +168,8 @@ import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
|
|||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
|
||||
|
@ -186,6 +185,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||
import org.apache.hadoop.io.DataOutputBuffer;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
@ -211,6 +211,7 @@ import org.apache.hadoop.util.Time;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.net.InetAddresses;
|
||||
|
||||
/********************************************************
|
||||
|
@ -2285,6 +2286,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
return namenode.getDatanodeReport(type);
|
||||
}
|
||||
|
||||
public DatanodeStorageReport[] getDatanodeStorageReport(
|
||||
DatanodeReportType type) throws IOException {
|
||||
return namenode.getDatanodeStorageReport(type);
|
||||
}
|
||||
|
||||
/**
|
||||
* Enter, leave or get safe mode.
|
||||
*
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.util.List;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.crypto.CipherSuite;
|
||||
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
|
||||
import org.apache.hadoop.fs.CacheFlag;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
|
@ -32,11 +33,10 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
|
|||
import org.apache.hadoop.fs.FsServerDefaults;
|
||||
import org.apache.hadoop.fs.InvalidPathException;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
|
||||
import org.apache.hadoop.fs.Options.Rename;
|
||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
|
@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
|
|||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.retry.AtMostOnce;
|
||||
|
@ -656,6 +657,13 @@ public interface ClientProtocol {
|
|||
public DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Get a report on the current datanode storages.
|
||||
*/
|
||||
@Idempotent
|
||||
public DatanodeStorageReport[] getDatanodeStorageReport(
|
||||
HdfsConstants.DatanodeReportType type) throws IOException;
|
||||
|
||||
/**
|
||||
* Get the block size for the given file.
|
||||
* @param filename The name of the file
|
||||
|
@ -1280,17 +1288,11 @@ public interface ClientProtocol {
|
|||
|
||||
/**
|
||||
* Set xattr of a file or directory.
|
||||
* A regular user only can set xattr of "user" namespace.
|
||||
* A super user can set xattr of "user" and "trusted" namespace.
|
||||
* XAttr of "security" and "system" namespace is only used/exposed
|
||||
* internally to the FS impl.
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
* <p/>
|
||||
* For xattr of "user" namespace, its access permissions are
|
||||
* defined by the file or directory permission bits.
|
||||
* XAttr will be set only when login user has correct permissions.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param src file or directory
|
||||
* @param xAttr <code>XAttr</code> to set
|
||||
* @param flag set flag
|
||||
|
@ -1301,18 +1303,13 @@ public interface ClientProtocol {
|
|||
throws IOException;
|
||||
|
||||
/**
|
||||
* Get xattrs of file or directory. Values in xAttrs parameter are ignored.
|
||||
* If xattrs is null or empty, equals getting all xattrs of the file or
|
||||
* directory.
|
||||
* Only xattrs which login user has correct permissions will be returned.
|
||||
* Get xattrs of a file or directory. Values in xAttrs parameter are ignored.
|
||||
* If xAttrs is null or empty, this is the same as getting all xattrs of the
|
||||
* file or directory. Only those xattrs for which the logged-in user has
|
||||
* permissions to view are returned.
|
||||
* <p/>
|
||||
* A regular user only can get xattr of "user" namespace.
|
||||
* A super user can get xattr of "user" and "trusted" namespace.
|
||||
* XAttr of "security" and "system" namespace is only used/exposed
|
||||
* internally to the FS impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param src file or directory
|
||||
* @param xAttrs xAttrs to get
|
||||
* @return List<XAttr> <code>XAttr</code> list
|
||||
|
@ -1327,13 +1324,8 @@ public interface ClientProtocol {
|
|||
* Only the xattr names for which the logged in user has the permissions to
|
||||
* access will be returned.
|
||||
* <p/>
|
||||
* A regular user only can get xattr names from the "user" namespace.
|
||||
* A super user can get xattr names of the "user" and "trusted" namespace.
|
||||
* XAttr names of the "security" and "system" namespaces are only used/exposed
|
||||
* internally by the file system impl.
|
||||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
* @param src file or directory
|
||||
* @return List<XAttr> <code>XAttr</code> list
|
||||
* @throws IOException
|
||||
|
@ -1344,8 +1336,14 @@ public interface ClientProtocol {
|
|||
|
||||
/**
|
||||
* Remove xattr of a file or directory.Value in xAttr parameter is ignored.
|
||||
<<<<<<< .working
|
||||
* Name must be prefixed with user/trusted/security/system/raw.
|
||||
=======
|
||||
* The name must be prefixed with the namespace followed by ".". For example,
|
||||
* "user.attr".
|
||||
>>>>>>> .merge-right.r1614550
|
||||
* <p/>
|
||||
<<<<<<< .working
|
||||
* A regular user only can remove xattr of "user" namespace.
|
||||
* A super user can remove xattr of "user" and "trusted" namespace.
|
||||
* XAttr of "security" and "system" namespace is only used/exposed
|
||||
|
@ -1356,6 +1354,10 @@ public interface ClientProtocol {
|
|||
* <p/>
|
||||
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
|
||||
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
|
||||
=======
|
||||
* Refer to the HDFS extended attributes user documentation for details.
|
||||
*
|
||||
>>>>>>> .merge-right.r1614550
|
||||
* @param src file or directory
|
||||
* @param xAttr <code>XAttr</code> to remove
|
||||
* @throws IOException
|
||||
|
|
|
@ -72,6 +72,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Create
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
|
||||
|
@ -93,6 +94,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDat
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
|
||||
|
@ -659,6 +662,21 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetDatanodeStorageReportResponseProto getDatanodeStorageReport(
|
||||
RpcController controller, GetDatanodeStorageReportRequestProto req)
|
||||
throws ServiceException {
|
||||
try {
|
||||
List<DatanodeStorageReportProto> reports = PBHelper.convertDatanodeStorageReports(
|
||||
server.getDatanodeStorageReport(PBHelper.convert(req.getType())));
|
||||
return GetDatanodeStorageReportResponseProto.newBuilder()
|
||||
.addAllDatanodeStorageReports(reports)
|
||||
.build();
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetPreferredBlockSizeResponseProto getPreferredBlockSize(
|
||||
RpcController controller, GetPreferredBlockSizeRequestProto req)
|
||||
|
|
|
@ -96,6 +96,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCon
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
|
||||
|
@ -155,6 +156,7 @@ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
|||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
|
@ -588,6 +590,20 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public DatanodeStorageReport[] getDatanodeStorageReport(DatanodeReportType type)
|
||||
throws IOException {
|
||||
final GetDatanodeStorageReportRequestProto req
|
||||
= GetDatanodeStorageReportRequestProto.newBuilder()
|
||||
.setType(PBHelper.convert(type)).build();
|
||||
try {
|
||||
return PBHelper.convertDatanodeStorageReports(
|
||||
rpcProxy.getDatanodeStorageReport(null, req).getDatanodeStorageReportsList());
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getPreferredBlockSize(String filename) throws IOException,
|
||||
UnresolvedLinkException {
|
||||
|
|
|
@ -21,18 +21,13 @@ package org.apache.hadoop.hdfs.protocolPB;
|
|||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto;
|
||||
|
@ -51,7 +46,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlo
|
|||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
|
@ -61,14 +55,10 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
|||
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||
import org.apache.hadoop.io.retry.RetryPolicies;
|
||||
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||
import org.apache.hadoop.io.retry.RetryProxy;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.ipc.RpcClientUtil;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
@ -137,9 +127,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
|
|||
.setRegistration(PBHelper.convert(registration))
|
||||
.setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
|
||||
.setFailedVolumes(failedVolumes);
|
||||
for (StorageReport r : reports) {
|
||||
builder.addReports(PBHelper.convert(r));
|
||||
}
|
||||
builder.addAllReports(PBHelper.convertStorageReports(reports));
|
||||
if (cacheCapacity != 0) {
|
||||
builder.setCacheCapacity(cacheCapacity);
|
||||
}
|
||||
|
|
|
@ -93,6 +93,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheP
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto;
|
||||
|
@ -105,14 +106,11 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdComma
|
|||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
|
||||
|
@ -130,6 +128,8 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
|
|||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
|
||||
|
@ -154,6 +154,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto;
|
|||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
|
||||
|
@ -187,6 +188,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
|||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
|
||||
|
@ -625,6 +627,41 @@ public class PBHelper {
|
|||
return builder.build();
|
||||
}
|
||||
|
||||
public static DatanodeStorageReportProto convertDatanodeStorageReport(
|
||||
DatanodeStorageReport report) {
|
||||
return DatanodeStorageReportProto.newBuilder()
|
||||
.setDatanodeInfo(convert(report.getDatanodeInfo()))
|
||||
.addAllStorageReports(convertStorageReports(report.getStorageReports()))
|
||||
.build();
|
||||
}
|
||||
|
||||
public static List<DatanodeStorageReportProto> convertDatanodeStorageReports(
|
||||
DatanodeStorageReport[] reports) {
|
||||
final List<DatanodeStorageReportProto> protos
|
||||
= new ArrayList<DatanodeStorageReportProto>(reports.length);
|
||||
for(int i = 0; i < reports.length; i++) {
|
||||
protos.add(convertDatanodeStorageReport(reports[i]));
|
||||
}
|
||||
return protos;
|
||||
}
|
||||
|
||||
public static DatanodeStorageReport convertDatanodeStorageReport(
|
||||
DatanodeStorageReportProto proto) {
|
||||
return new DatanodeStorageReport(
|
||||
convert(proto.getDatanodeInfo()),
|
||||
convertStorageReports(proto.getStorageReportsList()));
|
||||
}
|
||||
|
||||
public static DatanodeStorageReport[] convertDatanodeStorageReports(
|
||||
List<DatanodeStorageReportProto> protos) {
|
||||
final DatanodeStorageReport[] reports
|
||||
= new DatanodeStorageReport[protos.size()];
|
||||
for(int i = 0; i < reports.length; i++) {
|
||||
reports[i] = convertDatanodeStorageReport(protos.get(i));
|
||||
}
|
||||
return reports;
|
||||
}
|
||||
|
||||
public static AdminStates convert(AdminState adminState) {
|
||||
switch(adminState) {
|
||||
case DECOMMISSION_INPROGRESS:
|
||||
|
@ -1728,6 +1765,15 @@ public class PBHelper {
|
|||
return report;
|
||||
}
|
||||
|
||||
public static List<StorageReportProto> convertStorageReports(StorageReport[] storages) {
|
||||
final List<StorageReportProto> protos = new ArrayList<StorageReportProto>(
|
||||
storages.length);
|
||||
for(int i = 0; i < storages.length; i++) {
|
||||
protos.add(convert(storages[i]));
|
||||
}
|
||||
return protos;
|
||||
}
|
||||
|
||||
public static JournalInfo convert(JournalInfoProto info) {
|
||||
int lv = info.hasLayoutVersion() ? info.getLayoutVersion() : 0;
|
||||
int nsID = info.hasNamespaceID() ? info.getNamespaceID() : 0;
|
||||
|
|
|
@ -259,6 +259,15 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
|||
}
|
||||
}
|
||||
|
||||
public StorageReport[] getStorageReports() {
|
||||
final StorageReport[] reports = new StorageReport[storageMap.size()];
|
||||
final DatanodeStorageInfo[] infos = getStorageInfos();
|
||||
for(int i = 0; i < infos.length; i++) {
|
||||
reports[i] = infos[i].toStorageReport();
|
||||
}
|
||||
return reports;
|
||||
}
|
||||
|
||||
boolean hasStaleStorages() {
|
||||
synchronized (storageMap) {
|
||||
for (DatanodeStorageInfo storage : storageMap.values()) {
|
||||
|
|
|
@ -291,6 +291,12 @@ public class DatanodeStorageInfo {
|
|||
public String toString() {
|
||||
return "[" + storageType + "]" + storageID + ":" + state;
|
||||
}
|
||||
|
||||
StorageReport toStorageReport() {
|
||||
return new StorageReport(
|
||||
new DatanodeStorage(storageID, state, storageType),
|
||||
false, capacity, dfsUsed, remaining, blockPoolUsed);
|
||||
}
|
||||
|
||||
/** @return the first {@link DatanodeStorageInfo} corresponding to
|
||||
* the given datanode
|
||||
|
|
|
@ -65,6 +65,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CAC
|
|||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY;
|
||||
|
@ -86,9 +88,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROU
|
|||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT;
|
||||
|
||||
import static org.apache.hadoop.util.Time.now;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
|
@ -244,6 +243,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
|
|||
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
|
||||
|
@ -5146,6 +5146,28 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
}
|
||||
|
||||
DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type
|
||||
) throws AccessControlException, StandbyException {
|
||||
checkSuperuserPrivilege();
|
||||
checkOperation(OperationCategory.UNCHECKED);
|
||||
readLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.UNCHECKED);
|
||||
final DatanodeManager dm = getBlockManager().getDatanodeManager();
|
||||
final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type);
|
||||
|
||||
DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
|
||||
for (int i = 0; i < reports.length; i++) {
|
||||
final DatanodeDescriptor d = datanodes.get(i);
|
||||
reports[i] = new DatanodeStorageReport(new DatanodeInfo(d),
|
||||
d.getStorageReports());
|
||||
}
|
||||
return reports;
|
||||
} finally {
|
||||
readUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save namespace image.
|
||||
* This will save current namespace into fsimage file and empty edits file.
|
||||
|
|
|
@ -117,6 +117,7 @@ import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
|||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
|
||||
|
@ -833,11 +834,23 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
throws IOException {
|
||||
DatanodeInfo results[] = namesystem.datanodeReport(type);
|
||||
if (results == null ) {
|
||||
throw new IOException("Cannot find datanode report");
|
||||
throw new IOException("Failed to get datanode report for " + type
|
||||
+ " datanodes.");
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
@Override // ClientProtocol
|
||||
public DatanodeStorageReport[] getDatanodeStorageReport(
|
||||
DatanodeReportType type) throws IOException {
|
||||
final DatanodeStorageReport[] reports = namesystem.getDatanodeStorageReport(type);
|
||||
if (reports == null ) {
|
||||
throw new IOException("Failed to get datanode storage report for " + type
|
||||
+ " datanodes.");
|
||||
}
|
||||
return reports;
|
||||
}
|
||||
|
||||
@Override // ClientProtocol
|
||||
public boolean setSafeMode(SafeModeAction action, boolean isChecked)
|
||||
throws IOException {
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.protocol;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
|
||||
/**
|
||||
* Class captures information of a datanode and its storages.
|
||||
*/
|
||||
public class DatanodeStorageReport {
|
||||
final DatanodeInfo datanodeInfo;
|
||||
final StorageReport[] storageReports;
|
||||
|
||||
public DatanodeStorageReport(DatanodeInfo datanodeInfo,
|
||||
StorageReport[] storageReports) {
|
||||
this.datanodeInfo = datanodeInfo;
|
||||
this.storageReports = storageReports;
|
||||
}
|
||||
|
||||
public DatanodeInfo getDatanodeInfo() {
|
||||
return datanodeInfo;
|
||||
}
|
||||
|
||||
public StorageReport[] getStorageReports() {
|
||||
return storageReports;
|
||||
}
|
||||
}
|
|
@ -283,6 +283,19 @@ message GetDatanodeReportResponseProto {
|
|||
repeated DatanodeInfoProto di = 1;
|
||||
}
|
||||
|
||||
message GetDatanodeStorageReportRequestProto {
|
||||
required DatanodeReportTypeProto type = 1;
|
||||
}
|
||||
|
||||
message DatanodeStorageReportProto {
|
||||
required DatanodeInfoProto datanodeInfo = 1;
|
||||
repeated StorageReportProto storageReports = 2;
|
||||
}
|
||||
|
||||
message GetDatanodeStorageReportResponseProto {
|
||||
repeated DatanodeStorageReportProto datanodeStorageReports = 1;
|
||||
}
|
||||
|
||||
message GetPreferredBlockSizeRequestProto {
|
||||
required string filename = 1;
|
||||
}
|
||||
|
@ -674,6 +687,8 @@ service ClientNamenodeProtocol {
|
|||
rpc getFsStats(GetFsStatusRequestProto) returns(GetFsStatsResponseProto);
|
||||
rpc getDatanodeReport(GetDatanodeReportRequestProto)
|
||||
returns(GetDatanodeReportResponseProto);
|
||||
rpc getDatanodeStorageReport(GetDatanodeStorageReportRequestProto)
|
||||
returns(GetDatanodeStorageReportResponseProto);
|
||||
rpc getPreferredBlockSize(GetPreferredBlockSizeRequestProto)
|
||||
returns(GetPreferredBlockSizeResponseProto);
|
||||
rpc setSafeMode(SetSafeModeRequestProto)
|
||||
|
|
|
@ -44,20 +44,6 @@ message DatanodeRegistrationProto {
|
|||
required string softwareVersion = 4; // Software version of the DN, e.g. "2.0.0"
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a storage available on the datanode
|
||||
*/
|
||||
message DatanodeStorageProto {
|
||||
enum StorageState {
|
||||
NORMAL = 0;
|
||||
READ_ONLY_SHARED = 1;
|
||||
}
|
||||
|
||||
required string storageUuid = 1;
|
||||
optional StorageState state = 2 [default = NORMAL];
|
||||
optional StorageTypeProto storageType = 3 [default = DISK];
|
||||
}
|
||||
|
||||
/**
|
||||
* Commands sent from namenode to the datanodes
|
||||
*/
|
||||
|
@ -196,16 +182,6 @@ message HeartbeatRequestProto {
|
|||
optional uint64 cacheUsed = 7 [default = 0 ];
|
||||
}
|
||||
|
||||
message StorageReportProto {
|
||||
required string storageUuid = 1 [ deprecated = true ];
|
||||
optional bool failed = 2 [ default = false ];
|
||||
optional uint64 capacity = 3 [ default = 0 ];
|
||||
optional uint64 dfsUsed = 4 [ default = 0 ];
|
||||
optional uint64 remaining = 5 [ default = 0 ];
|
||||
optional uint64 blockPoolUsed = 6 [ default = 0 ];
|
||||
optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
|
||||
}
|
||||
|
||||
/**
|
||||
* state - State the NN is in when returning response to the DN
|
||||
* txid - Highest transaction ID this NN has seen
|
||||
|
|
|
@ -99,6 +99,30 @@ message DatanodeInfoProto {
|
|||
optional uint64 cacheUsed = 12 [default = 0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a storage available on the datanode
|
||||
*/
|
||||
message DatanodeStorageProto {
|
||||
enum StorageState {
|
||||
NORMAL = 0;
|
||||
READ_ONLY_SHARED = 1;
|
||||
}
|
||||
|
||||
required string storageUuid = 1;
|
||||
optional StorageState state = 2 [default = NORMAL];
|
||||
optional StorageTypeProto storageType = 3 [default = DISK];
|
||||
}
|
||||
|
||||
message StorageReportProto {
|
||||
required string storageUuid = 1 [ deprecated = true ];
|
||||
optional bool failed = 2 [ default = false ];
|
||||
optional uint64 capacity = 3 [ default = 0 ];
|
||||
optional uint64 dfsUsed = 4 [ default = 0 ];
|
||||
optional uint64 remaining = 5 [ default = 0 ];
|
||||
optional uint64 blockPoolUsed = 6 [ default = 0 ];
|
||||
optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
|
||||
}
|
||||
|
||||
/**
|
||||
* Summary of a file or directory
|
||||
*/
|
||||
|
|
|
@ -44,10 +44,13 @@ HDFS NFS Gateway
|
|||
|
||||
* {Configuration}
|
||||
|
||||
The user running the NFS-gateway must be able to proxy all the users using the NFS mounts.
|
||||
For instance, if user 'nfsserver' is running the gateway, and users belonging to the groups 'nfs-users1'
|
||||
and 'nfs-users2' use the NFS mounts, then in core-site.xml of the namenode, the following must be set
|
||||
(NOTE: replace 'nfsserver' with the user name starting the gateway in your cluster):
|
||||
The NFS-gateway uses proxy user to proxy all the users accessing the NFS mounts.
|
||||
In non-secure mode, the user running the gateway is the proxy user, while in secure mode the
|
||||
user in Kerberos keytab is the proxy user. Suppose the proxy user is 'nfsserver'
|
||||
and users belonging to the groups 'nfs-users1'
|
||||
and 'nfs-users2' use the NFS mounts, then in core-site.xml of the NameNode, the following
|
||||
two properities must be set and only NameNode needs restart after the configuration change
|
||||
(NOTE: replace the string 'nfsserver' with the proxy user name in your cluster):
|
||||
|
||||
----
|
||||
<property>
|
||||
|
@ -72,7 +75,9 @@ HDFS NFS Gateway
|
|||
----
|
||||
|
||||
The above are the only required configuration for the NFS gateway in non-secure mode. For Kerberized
|
||||
hadoop clusters, the following configurations need to be added to hdfs-site.xml:
|
||||
hadoop clusters, the following configurations need to be added to hdfs-site.xml for the gateway (NOTE: replace
|
||||
string "nfsserver" with the proxy user name and ensure the user contained in the keytab is
|
||||
also the same proxy user):
|
||||
|
||||
----
|
||||
<property>
|
||||
|
@ -87,6 +92,8 @@ HDFS NFS Gateway
|
|||
<value>nfsserver/_HOST@YOUR-REALM.COM</value>
|
||||
</property>
|
||||
----
|
||||
|
||||
The rest of the NFS gateway configurations are optional for both secure and non-secure mode.
|
||||
|
||||
The AIX NFS client has a {{{https://issues.apache.org/jira/browse/HDFS-6549}few known issues}}
|
||||
that prevent it from working correctly by default with the HDFS NFS
|
||||
|
@ -108,7 +115,7 @@ HDFS NFS Gateway
|
|||
have been committed.
|
||||
|
||||
It's strongly recommended for the users to update a few configuration properties based on their use
|
||||
cases. All the related configuration properties can be added or updated in hdfs-site.xml.
|
||||
cases. All the following configuration properties can be added or updated in hdfs-site.xml.
|
||||
|
||||
* If the client mounts the export with access time update allowed, make sure the following
|
||||
property is not disabled in the configuration file. Only NameNode needs to restart after
|
||||
|
@ -145,36 +152,6 @@ HDFS NFS Gateway
|
|||
</property>
|
||||
----
|
||||
|
||||
* For optimal performance, it is recommended that rtmax be updated to
|
||||
1MB. However, note that this 1MB is a per client allocation, and not
|
||||
from a shared memory pool, and therefore a larger value may adversely
|
||||
affect small reads, consuming a lot of memory. The maximum value of
|
||||
this property is 1MB.
|
||||
|
||||
----
|
||||
<property>
|
||||
<name>nfs.rtmax</name>
|
||||
<value>1048576</value>
|
||||
<description>This is the maximum size in bytes of a READ request
|
||||
supported by the NFS gateway. If you change this, make sure you
|
||||
also update the nfs mount's rsize(add rsize= # of bytes to the
|
||||
mount directive).
|
||||
</description>
|
||||
</property>
|
||||
----
|
||||
|
||||
----
|
||||
<property>
|
||||
<name>nfs.wtmax</name>
|
||||
<value>65536</value>
|
||||
<description>This is the maximum size in bytes of a WRITE request
|
||||
supported by the NFS gateway. If you change this, make sure you
|
||||
also update the nfs mount's wsize(add wsize= # of bytes to the
|
||||
mount directive).
|
||||
</description>
|
||||
</property>
|
||||
----
|
||||
|
||||
* By default, the export can be mounted by any client. To better control the access,
|
||||
users can update the following property. The value string contains machine name and
|
||||
access privilege, separated by whitespace
|
||||
|
@ -238,8 +215,10 @@ HDFS NFS Gateway
|
|||
|
||||
[[3]] Start mountd and nfsd.
|
||||
|
||||
No root privileges are required for this command. However, ensure that the user starting
|
||||
the Hadoop cluster and the user starting the NFS gateway are same.
|
||||
No root privileges are required for this command. In non-secure mode, the NFS gateway
|
||||
should be started by the proxy user mentioned at the beginning of this user guide.
|
||||
While in secure mode, any user can start NFS gateway
|
||||
as long as the user has read access to the Kerberos keytab defined in "nfs.keytab.file".
|
||||
|
||||
-------------------------
|
||||
hadoop nfs3
|
||||
|
@ -339,7 +318,10 @@ HDFS NFS Gateway
|
|||
-------------------------------------------------------------------
|
||||
|
||||
Then the users can access HDFS as part of the local file system except that,
|
||||
hard link and random write are not supported yet.
|
||||
hard link and random write are not supported yet. To optimize the performance
|
||||
of large file I/O, one can increase the NFS transfer size(rsize and wsize) during mount.
|
||||
By default, NFS gateway supports 1MB as the maximum transfer size. For larger data
|
||||
transfer size, one needs to update "nfs.rtmax" and "nfs.rtmax" in hdfs-site.xml.
|
||||
|
||||
* {Allow mounts from unprivileged clients}
|
||||
|
||||
|
|
|
@ -0,0 +1,151 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.viewfs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileSystemTestHelper;
|
||||
import org.apache.hadoop.fs.FsConstants;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
/**
|
||||
* Verify XAttrs through ViewFileSystem functionality.
|
||||
*/
|
||||
public class TestViewFileSystemWithXAttrs {
|
||||
|
||||
private static MiniDFSCluster cluster;
|
||||
private static Configuration clusterConf = new Configuration();
|
||||
private static FileSystem fHdfs;
|
||||
private static FileSystem fHdfs2;
|
||||
private FileSystem fsView;
|
||||
private Configuration fsViewConf;
|
||||
private FileSystem fsTarget, fsTarget2;
|
||||
private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2;
|
||||
private FileSystemTestHelper fileSystemTestHelper =
|
||||
new FileSystemTestHelper("/tmp/TestViewFileSystemWithXAttrs");
|
||||
|
||||
// XAttrs
|
||||
protected static final String name1 = "user.a1";
|
||||
protected static final byte[] value1 = {0x31, 0x32, 0x33};
|
||||
protected static final String name2 = "user.a2";
|
||||
protected static final byte[] value2 = {0x37, 0x38, 0x39};
|
||||
|
||||
@BeforeClass
|
||||
public static void clusterSetupAtBeginning() throws IOException {
|
||||
cluster = new MiniDFSCluster.Builder(clusterConf)
|
||||
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
|
||||
.numDataNodes(2)
|
||||
.build();
|
||||
cluster.waitClusterUp();
|
||||
|
||||
fHdfs = cluster.getFileSystem(0);
|
||||
fHdfs2 = cluster.getFileSystem(1);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void ClusterShutdownAtEnd() throws Exception {
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
fsTarget = fHdfs;
|
||||
fsTarget2 = fHdfs2;
|
||||
targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
|
||||
targetTestRoot2 = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget2);
|
||||
|
||||
fsTarget.delete(targetTestRoot, true);
|
||||
fsTarget2.delete(targetTestRoot2, true);
|
||||
fsTarget.mkdirs(targetTestRoot);
|
||||
fsTarget2.mkdirs(targetTestRoot2);
|
||||
|
||||
fsViewConf = ViewFileSystemTestSetup.createConfig();
|
||||
setupMountPoints();
|
||||
fsView = FileSystem.get(FsConstants.VIEWFS_URI, fsViewConf);
|
||||
}
|
||||
|
||||
private void setupMountPoints() {
|
||||
mountOnNn1 = new Path("/mountOnNn1");
|
||||
mountOnNn2 = new Path("/mountOnNn2");
|
||||
ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(),
|
||||
targetTestRoot.toUri());
|
||||
ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(),
|
||||
targetTestRoot2.toUri());
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
|
||||
fsTarget2.delete(fileSystemTestHelper.getTestRootPath(fsTarget2), true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify a ViewFileSystem wrapped over multiple federated NameNodes will
|
||||
* dispatch the XAttr operations to the correct NameNode.
|
||||
*/
|
||||
@Test
|
||||
public void testXAttrOnMountEntry() throws Exception {
|
||||
// Set XAttrs on the first namespace and verify they are correct
|
||||
fsView.setXAttr(mountOnNn1, name1, value1);
|
||||
fsView.setXAttr(mountOnNn1, name2, value2);
|
||||
assertEquals(2, fsView.getXAttrs(mountOnNn1).size());
|
||||
assertArrayEquals(value1, fsView.getXAttr(mountOnNn1, name1));
|
||||
assertArrayEquals(value2, fsView.getXAttr(mountOnNn1, name2));
|
||||
// Double-check by getting the XAttrs using FileSystem
|
||||
// instead of ViewFileSystem
|
||||
assertArrayEquals(value1, fHdfs.getXAttr(targetTestRoot, name1));
|
||||
assertArrayEquals(value2, fHdfs.getXAttr(targetTestRoot, name2));
|
||||
|
||||
// Paranoid check: verify the other namespace does not
|
||||
// have XAttrs set on the same path.
|
||||
assertEquals(0, fsView.getXAttrs(mountOnNn2).size());
|
||||
assertEquals(0, fHdfs2.getXAttrs(targetTestRoot2).size());
|
||||
|
||||
// Remove the XAttr entries on the first namespace
|
||||
fsView.removeXAttr(mountOnNn1, name1);
|
||||
fsView.removeXAttr(mountOnNn1, name2);
|
||||
assertEquals(0, fsView.getXAttrs(mountOnNn1).size());
|
||||
assertEquals(0, fHdfs.getXAttrs(targetTestRoot).size());
|
||||
|
||||
// Now set XAttrs on the second namespace
|
||||
fsView.setXAttr(mountOnNn2, name1, value1);
|
||||
fsView.setXAttr(mountOnNn2, name2, value2);
|
||||
assertEquals(2, fsView.getXAttrs(mountOnNn2).size());
|
||||
assertArrayEquals(value1, fsView.getXAttr(mountOnNn2, name1));
|
||||
assertArrayEquals(value2, fsView.getXAttr(mountOnNn2, name2));
|
||||
assertArrayEquals(value1, fHdfs2.getXAttr(targetTestRoot2, name1));
|
||||
assertArrayEquals(value2, fHdfs2.getXAttr(targetTestRoot2, name2));
|
||||
|
||||
fsView.removeXAttr(mountOnNn2, name1);
|
||||
fsView.removeXAttr(mountOnNn2, name2);
|
||||
assertEquals(0, fsView.getXAttrs(mountOnNn2).size());
|
||||
assertEquals(0, fHdfs2.getXAttrs(targetTestRoot2).size());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,148 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.viewfs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
import org.apache.hadoop.fs.FileContextTestHelper;
|
||||
import org.apache.hadoop.fs.FsConstants;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
/**
|
||||
* Verify XAttrs through ViewFs functionality.
|
||||
*/
|
||||
public class TestViewFsWithXAttrs {
|
||||
|
||||
private static MiniDFSCluster cluster;
|
||||
private static Configuration clusterConf = new Configuration();
|
||||
private static FileContext fc, fc2;
|
||||
private FileContext fcView, fcTarget, fcTarget2;
|
||||
private Configuration fsViewConf;
|
||||
private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2;
|
||||
private FileContextTestHelper fileContextTestHelper =
|
||||
new FileContextTestHelper("/tmp/TestViewFsWithXAttrs");
|
||||
|
||||
// XAttrs
|
||||
protected static final String name1 = "user.a1";
|
||||
protected static final byte[] value1 = {0x31, 0x32, 0x33};
|
||||
protected static final String name2 = "user.a2";
|
||||
protected static final byte[] value2 = {0x37, 0x38, 0x39};
|
||||
|
||||
@BeforeClass
|
||||
public static void clusterSetupAtBeginning() throws IOException {
|
||||
cluster = new MiniDFSCluster.Builder(clusterConf)
|
||||
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
|
||||
.numDataNodes(2)
|
||||
.build();
|
||||
cluster.waitClusterUp();
|
||||
|
||||
fc = FileContext.getFileContext(cluster.getURI(0), clusterConf);
|
||||
fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void ClusterShutdownAtEnd() throws Exception {
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
fcTarget = fc;
|
||||
fcTarget2 = fc2;
|
||||
targetTestRoot = fileContextTestHelper.getAbsoluteTestRootPath(fc);
|
||||
targetTestRoot2 = fileContextTestHelper.getAbsoluteTestRootPath(fc2);
|
||||
|
||||
fcTarget.delete(targetTestRoot, true);
|
||||
fcTarget2.delete(targetTestRoot2, true);
|
||||
fcTarget.mkdir(targetTestRoot, new FsPermission((short) 0750), true);
|
||||
fcTarget2.mkdir(targetTestRoot2, new FsPermission((short) 0750), true);
|
||||
|
||||
fsViewConf = ViewFileSystemTestSetup.createConfig();
|
||||
setupMountPoints();
|
||||
fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, fsViewConf);
|
||||
}
|
||||
|
||||
private void setupMountPoints() {
|
||||
mountOnNn1 = new Path("/mountOnNn1");
|
||||
mountOnNn2 = new Path("/mountOnNn2");
|
||||
ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
|
||||
ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true);
|
||||
fcTarget2.delete(fileContextTestHelper.getTestRootPath(fcTarget2), true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify a ViewFs wrapped over multiple federated NameNodes will
|
||||
* dispatch the XAttr operations to the correct NameNode.
|
||||
*/
|
||||
@Test
|
||||
public void testXAttrOnMountEntry() throws Exception {
|
||||
// Set XAttrs on the first namespace and verify they are correct
|
||||
fcView.setXAttr(mountOnNn1, name1, value1);
|
||||
fcView.setXAttr(mountOnNn1, name2, value2);
|
||||
assertEquals(2, fcView.getXAttrs(mountOnNn1).size());
|
||||
assertArrayEquals(value1, fcView.getXAttr(mountOnNn1, name1));
|
||||
assertArrayEquals(value2, fcView.getXAttr(mountOnNn1, name2));
|
||||
// Double-check by getting the XAttrs using FileSystem
|
||||
// instead of ViewFs
|
||||
assertArrayEquals(value1, fc.getXAttr(targetTestRoot, name1));
|
||||
assertArrayEquals(value2, fc.getXAttr(targetTestRoot, name2));
|
||||
|
||||
// Paranoid check: verify the other namespace does not
|
||||
// have XAttrs set on the same path.
|
||||
assertEquals(0, fcView.getXAttrs(mountOnNn2).size());
|
||||
assertEquals(0, fc2.getXAttrs(targetTestRoot2).size());
|
||||
|
||||
// Remove the XAttr entries on the first namespace
|
||||
fcView.removeXAttr(mountOnNn1, name1);
|
||||
fcView.removeXAttr(mountOnNn1, name2);
|
||||
assertEquals(0, fcView.getXAttrs(mountOnNn1).size());
|
||||
assertEquals(0, fc.getXAttrs(targetTestRoot).size());
|
||||
|
||||
// Now set XAttrs on the second namespace
|
||||
fcView.setXAttr(mountOnNn2, name1, value1);
|
||||
fcView.setXAttr(mountOnNn2, name2, value2);
|
||||
assertEquals(2, fcView.getXAttrs(mountOnNn2).size());
|
||||
assertArrayEquals(value1, fcView.getXAttr(mountOnNn2, name1));
|
||||
assertArrayEquals(value2, fcView.getXAttr(mountOnNn2, name2));
|
||||
assertArrayEquals(value1, fc2.getXAttr(targetTestRoot2, name1));
|
||||
assertArrayEquals(value2, fc2.getXAttr(targetTestRoot2, name2));
|
||||
|
||||
fcView.removeXAttr(mountOnNn2, name1);
|
||||
fcView.removeXAttr(mountOnNn2, name2);
|
||||
assertEquals(0, fcView.getXAttrs(mountOnNn2).size());
|
||||
assertEquals(0, fc2.getXAttrs(targetTestRoot2).size());
|
||||
}
|
||||
}
|
|
@ -21,19 +21,26 @@ import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
|
|||
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* This test ensures the all types of data node report work correctly.
|
||||
*/
|
||||
public class TestDatanodeReport {
|
||||
static final Log LOG = LogFactory.getLog(TestDatanodeReport.class);
|
||||
final static private Configuration conf = new HdfsConfiguration();
|
||||
final static private int NUM_OF_DATANODES = 4;
|
||||
|
||||
|
@ -50,20 +57,18 @@ public class TestDatanodeReport {
|
|||
try {
|
||||
//wait until the cluster is up
|
||||
cluster.waitActive();
|
||||
final String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||
final List<DataNode> datanodes = cluster.getDataNodes();
|
||||
final DFSClient client = cluster.getFileSystem().dfs;
|
||||
|
||||
InetSocketAddress addr = new InetSocketAddress("localhost",
|
||||
cluster.getNameNodePort());
|
||||
DFSClient client = new DFSClient(addr, conf);
|
||||
|
||||
assertEquals(client.datanodeReport(DatanodeReportType.ALL).length,
|
||||
NUM_OF_DATANODES);
|
||||
assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length,
|
||||
NUM_OF_DATANODES);
|
||||
assertEquals(client.datanodeReport(DatanodeReportType.DEAD).length, 0);
|
||||
assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, bpid);
|
||||
assertReports(NUM_OF_DATANODES, DatanodeReportType.LIVE, client, datanodes, bpid);
|
||||
assertReports(0, DatanodeReportType.DEAD, client, datanodes, bpid);
|
||||
|
||||
// bring down one datanode
|
||||
ArrayList<DataNode> datanodes = cluster.getDataNodes();
|
||||
datanodes.remove(datanodes.size()-1).shutdown();
|
||||
final DataNode last = datanodes.get(datanodes.size() - 1);
|
||||
LOG.info("XXX shutdown datanode " + last.getDatanodeUuid());
|
||||
last.shutdown();
|
||||
|
||||
DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
|
||||
while (nodeInfo.length != 1) {
|
||||
|
@ -74,22 +79,59 @@ public class TestDatanodeReport {
|
|||
nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
|
||||
}
|
||||
|
||||
assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length,
|
||||
NUM_OF_DATANODES-1);
|
||||
assertEquals(client.datanodeReport(DatanodeReportType.ALL).length,
|
||||
NUM_OF_DATANODES);
|
||||
assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, null);
|
||||
assertReports(NUM_OF_DATANODES - 1, DatanodeReportType.LIVE, client, datanodes, null);
|
||||
assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
|
||||
|
||||
Thread.sleep(5000);
|
||||
assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
|
||||
}finally {
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
new TestDatanodeReport().testDatanodeReport();
|
||||
|
||||
final static Comparator<StorageReport> CMP = new Comparator<StorageReport>() {
|
||||
@Override
|
||||
public int compare(StorageReport left, StorageReport right) {
|
||||
return left.getStorage().getStorageID().compareTo(
|
||||
right.getStorage().getStorageID());
|
||||
}
|
||||
};
|
||||
|
||||
static void assertReports(int numDatanodes, DatanodeReportType type,
|
||||
DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
|
||||
final DatanodeInfo[] infos = client.datanodeReport(type);
|
||||
assertEquals(numDatanodes, infos.length);
|
||||
final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
|
||||
assertEquals(numDatanodes, reports.length);
|
||||
|
||||
for(int i = 0; i < infos.length; i++) {
|
||||
assertEquals(infos[i], reports[i].getDatanodeInfo());
|
||||
|
||||
final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
|
||||
if (bpid != null) {
|
||||
//check storage
|
||||
final StorageReport[] computed = reports[i].getStorageReports();
|
||||
Arrays.sort(computed, CMP);
|
||||
final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
|
||||
Arrays.sort(expected, CMP);
|
||||
|
||||
assertEquals(expected.length, computed.length);
|
||||
for(int j = 0; j < expected.length; j++) {
|
||||
assertEquals(expected[j].getStorage().getStorageID(),
|
||||
computed[j].getStorage().getStorageID());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
static DataNode findDatanode(String id, List<DataNode> datanodes) {
|
||||
for(DataNode d : datanodes) {
|
||||
if (d.getDatanodeUuid().equals(id)) {
|
||||
return d;
|
||||
}
|
||||
}
|
||||
throw new IllegalStateException("Datnode " + id + " not in datanode list: "
|
||||
+ datanodes);
|
||||
}
|
||||
}
|
|
@ -31,25 +31,25 @@ import org.apache.hadoop.fs.permission.AclEntryScope;
|
|||
import org.apache.hadoop.fs.permission.AclEntryType;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.hdfs.StorageType;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.StorageType;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
|
||||
|
@ -67,9 +67,18 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
|||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
||||
import org.apache.hadoop.hdfs.server.protocol.*;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
|
|
@ -71,6 +71,9 @@ Release 2.6.0 - UNRELEASED
|
|||
YARN-2211. Persist AMRMToken master key in RMStateStore for RM recovery.
|
||||
(Xuan Gong via jianhe)
|
||||
|
||||
YARN-2328. FairScheduler: Verify update and continuous scheduling threads are
|
||||
stopped when the scheduler is stopped. (kasha)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -108,6 +111,9 @@ Release 2.6.0 - UNRELEASED
|
|||
|
||||
YARN-1796. container-executor shouldn't require o-r permissions (atm)
|
||||
|
||||
YARN-2354. DistributedShell may allocate more containers than client
|
||||
specified after AM restarts. (Li Lu via jianhe)
|
||||
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -147,6 +153,9 @@ Release 2.5.0 - UNRELEASED
|
|||
YARN-2233. Implemented ResourceManager web-services to create, renew and
|
||||
cancel delegation tokens. (Varun Vasudev via vinodkv)
|
||||
|
||||
YARN-2247. Made RM web services authenticate users via kerberos and delegation
|
||||
token. (Varun Vasudev via zjshen)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
YARN-1479. Invalid NaN values in Hadoop REST API JSON response (Chen He via
|
||||
|
|
|
@ -263,6 +263,17 @@ public class YarnConfiguration extends Configuration {
|
|||
public static final String RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY =
|
||||
RM_PREFIX + "webapp.spnego-keytab-file";
|
||||
|
||||
/**
|
||||
* Flag to enable override of the default kerberos authentication filter with
|
||||
* the RM authentication filter to allow authentication using delegation
|
||||
* tokens(fallback to kerberos if the tokens are missing). Only applicable
|
||||
* when the http authentication type is kerberos.
|
||||
*/
|
||||
public static final String RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER = RM_PREFIX
|
||||
+ "webapp.delegation-token-auth-filter.enabled";
|
||||
public static final boolean DEFAULT_RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER =
|
||||
true;
|
||||
|
||||
/** How long to wait until a container is considered dead.*/
|
||||
public static final String RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS =
|
||||
RM_PREFIX + "rm.container-allocation.expiry-interval-ms";
|
||||
|
|
|
@ -208,7 +208,8 @@ public class ApplicationMaster {
|
|||
|
||||
// App Master configuration
|
||||
// No. of containers to run shell command on
|
||||
private int numTotalContainers = 1;
|
||||
@VisibleForTesting
|
||||
protected int numTotalContainers = 1;
|
||||
// Memory to request for the container on which the shell command will run
|
||||
private int containerMemory = 10;
|
||||
// VirtualCores to request for the container on which the shell command will run
|
||||
|
@ -594,8 +595,8 @@ public class ApplicationMaster {
|
|||
|
||||
List<Container> previousAMRunningContainers =
|
||||
response.getContainersFromPreviousAttempts();
|
||||
LOG.info("Received " + previousAMRunningContainers.size()
|
||||
+ " previous AM's running containers on AM registration.");
|
||||
LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
|
||||
+ " previous attempts' running containers on AM registration.");
|
||||
numAllocatedContainers.addAndGet(previousAMRunningContainers.size());
|
||||
|
||||
int numTotalContainersToRequest =
|
||||
|
@ -610,7 +611,7 @@ public class ApplicationMaster {
|
|||
ContainerRequest containerAsk = setupContainerAskForRM();
|
||||
amRMClient.addContainerRequest(containerAsk);
|
||||
}
|
||||
numRequestedContainers.set(numTotalContainersToRequest);
|
||||
numRequestedContainers.set(numTotalContainers);
|
||||
try {
|
||||
publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(),
|
||||
DSEvent.DS_APP_ATTEMPT_END);
|
||||
|
@ -689,7 +690,7 @@ public class ApplicationMaster {
|
|||
LOG.info("Got response from RM for container ask, completedCnt="
|
||||
+ completedContainers.size());
|
||||
for (ContainerStatus containerStatus : completedContainers) {
|
||||
LOG.info("Got container status for containerID="
|
||||
LOG.info(appAttemptID + " got container status for containerID="
|
||||
+ containerStatus.getContainerId() + ", state="
|
||||
+ containerStatus.getState() + ", exitStatus="
|
||||
+ containerStatus.getExitStatus() + ", diagnostics="
|
||||
|
|
|
@ -36,9 +36,11 @@ public class TestDSFailedAppMaster extends ApplicationMaster {
|
|||
if (appAttemptID.getAttemptId() == 2) {
|
||||
// should reuse the earlier running container, so numAllocatedContainers
|
||||
// should be set to 1. And should ask no more containers, so
|
||||
// numRequestedContainers should be set to 0.
|
||||
// numRequestedContainers should be the same as numTotalContainers.
|
||||
// The only container is the container requested by the AM in the first
|
||||
// attempt.
|
||||
if (numAllocatedContainers.get() != 1
|
||||
|| numRequestedContainers.get() != 0) {
|
||||
|| numRequestedContainers.get() != numTotalContainers) {
|
||||
LOG.info("NumAllocatedContainers is " + numAllocatedContainers.get()
|
||||
+ " and NumRequestedContainers is " + numAllocatedContainers.get()
|
||||
+ ".Application Master failed. exiting");
|
||||
|
|
|
@ -194,6 +194,15 @@
|
|||
<value>/etc/krb5.keytab</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<description>Flag to enable override of the default kerberos authentication
|
||||
filter with the RM authentication filter to allow authentication using
|
||||
delegation tokens(fallback to kerberos if the tokens are missing). Only
|
||||
applicable when the http authentication type is kerberos.</description>
|
||||
<name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<description>How long to wait until a node manager is considered dead.</description>
|
||||
<name>yarn.nm.liveness-monitor.expiry-interval-ms</name>
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.security.http;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
import javax.servlet.FilterConfig;
|
||||
import javax.servlet.ServletException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public class RMAuthenticationFilter extends AuthenticationFilter {
|
||||
|
||||
public static final String AUTH_HANDLER_PROPERTY =
|
||||
"yarn.resourcemanager.authentication-handler";
|
||||
|
||||
public RMAuthenticationFilter() {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Properties getConfiguration(String configPrefix,
|
||||
FilterConfig filterConfig) throws ServletException {
|
||||
|
||||
// In yarn-site.xml, we can simply set type to "kerberos". However, we need
|
||||
// to replace the name here to use the customized Kerberos + DT service
|
||||
// instead of the standard Kerberos handler.
|
||||
|
||||
Properties properties = super.getConfiguration(configPrefix, filterConfig);
|
||||
String yarnAuthHandler = properties.getProperty(AUTH_HANDLER_PROPERTY);
|
||||
if (yarnAuthHandler == null || yarnAuthHandler.isEmpty()) {
|
||||
// if http auth type is simple, the default authentication filter
|
||||
// will handle it, else throw an exception
|
||||
if (!properties.getProperty(AUTH_TYPE).equals("simple")) {
|
||||
throw new ServletException("Authentication handler class is empty");
|
||||
}
|
||||
}
|
||||
if (properties.getProperty(AUTH_TYPE).equalsIgnoreCase("kerberos")) {
|
||||
properties.setProperty(AUTH_TYPE, yarnAuthHandler);
|
||||
}
|
||||
return properties;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,121 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.security.http;
|
||||
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.Reader;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.FilterContainer;
|
||||
import org.apache.hadoop.http.FilterInitializer;
|
||||
import org.apache.hadoop.http.HttpServer2;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
|
||||
|
||||
@Unstable
|
||||
public class RMAuthenticationFilterInitializer extends FilterInitializer {
|
||||
|
||||
String configPrefix;
|
||||
String signatureSecretFileProperty;
|
||||
String kerberosPrincipalProperty;
|
||||
String cookiePath;
|
||||
|
||||
public RMAuthenticationFilterInitializer() {
|
||||
this.configPrefix = "hadoop.http.authentication.";
|
||||
this.signatureSecretFileProperty =
|
||||
AuthenticationFilter.SIGNATURE_SECRET + ".file";
|
||||
this.kerberosPrincipalProperty = KerberosAuthenticationHandler.PRINCIPAL;
|
||||
this.cookiePath = "/";
|
||||
}
|
||||
|
||||
protected Map<String, String> createFilterConfig(Configuration conf) {
|
||||
Map<String, String> filterConfig = new HashMap<String, String>();
|
||||
|
||||
// setting the cookie path to root '/' so it is used for all resources.
|
||||
filterConfig.put(AuthenticationFilter.COOKIE_PATH, cookiePath);
|
||||
|
||||
for (Map.Entry<String, String> entry : conf) {
|
||||
String name = entry.getKey();
|
||||
if (name.startsWith(configPrefix)) {
|
||||
String value = conf.get(name);
|
||||
name = name.substring(configPrefix.length());
|
||||
filterConfig.put(name, value);
|
||||
}
|
||||
}
|
||||
|
||||
String signatureSecretFile = filterConfig.get(signatureSecretFileProperty);
|
||||
if (signatureSecretFile != null) {
|
||||
Reader reader = null;
|
||||
try {
|
||||
StringBuilder secret = new StringBuilder();
|
||||
reader =
|
||||
new InputStreamReader(new FileInputStream(signatureSecretFile),
|
||||
"UTF-8");
|
||||
int c = reader.read();
|
||||
while (c > -1) {
|
||||
secret.append((char) c);
|
||||
c = reader.read();
|
||||
}
|
||||
filterConfig.put(AuthenticationFilter.SIGNATURE_SECRET,
|
||||
secret.toString());
|
||||
} catch (IOException ex) {
|
||||
// if running in non-secure mode, this filter only gets added
|
||||
// because the user has not setup his own filter so just generate
|
||||
// a random secret. in secure mode, the user needs to setup security
|
||||
if (UserGroupInformation.isSecurityEnabled()) {
|
||||
throw new RuntimeException(
|
||||
"Could not read HTTP signature secret file: " + signatureSecretFile);
|
||||
}
|
||||
} finally {
|
||||
IOUtils.closeQuietly(reader);
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve _HOST into bind address
|
||||
String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
|
||||
String principal = filterConfig.get(kerberosPrincipalProperty);
|
||||
if (principal != null) {
|
||||
try {
|
||||
principal = SecurityUtil.getServerPrincipal(principal, bindAddress);
|
||||
} catch (IOException ex) {
|
||||
throw new RuntimeException(
|
||||
"Could not resolve Kerberos principal name: " + ex.toString(), ex);
|
||||
}
|
||||
filterConfig.put(KerberosAuthenticationHandler.PRINCIPAL, principal);
|
||||
}
|
||||
return filterConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initFilter(FilterContainer container, Configuration conf) {
|
||||
|
||||
Map<String, String> filterConfig = createFilterConfig(conf);
|
||||
container.addFilter("YARNAuthenticationFilter",
|
||||
RMAuthenticationFilter.class.getName(), filterConfig);
|
||||
}
|
||||
|
||||
}
|
|
@ -32,11 +32,13 @@ import org.apache.hadoop.classification.InterfaceAudience.Private;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
|
||||
import org.apache.hadoop.http.lib.StaticUserWebFilter;
|
||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
import org.apache.hadoop.metrics2.source.JvmMetrics;
|
||||
import org.apache.hadoop.security.Groups;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
|
||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||
import org.apache.hadoop.service.AbstractService;
|
||||
import org.apache.hadoop.service.CompositeService;
|
||||
|
@ -88,8 +90,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEv
|
|||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.security.RMAuthenticationHandler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp;
|
||||
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
|
||||
import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilter;
|
||||
import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer;
|
||||
import org.apache.hadoop.yarn.server.webproxy.AppReportFetcher;
|
||||
import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils;
|
||||
import org.apache.hadoop.yarn.server.webproxy.WebAppProxy;
|
||||
|
@ -789,6 +794,62 @@ public class ResourceManager extends CompositeService implements Recoverable {
|
|||
}
|
||||
|
||||
protected void startWepApp() {
|
||||
|
||||
// Use the customized yarn filter instead of the standard kerberos filter to
|
||||
// allow users to authenticate using delegation tokens
|
||||
// 3 conditions need to be satisfied -
|
||||
// 1. security is enabled
|
||||
// 2. http auth type is set to kerberos
|
||||
// 3. "yarn.resourcemanager.webapp.use-yarn-filter" override is set to true
|
||||
|
||||
Configuration conf = getConfig();
|
||||
boolean useYarnAuthenticationFilter =
|
||||
conf.getBoolean(
|
||||
YarnConfiguration.RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER,
|
||||
YarnConfiguration.DEFAULT_RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER);
|
||||
String authPrefix = "hadoop.http.authentication.";
|
||||
String authTypeKey = authPrefix + "type";
|
||||
String initializers = conf.get("hadoop.http.filter.initializers");
|
||||
if (UserGroupInformation.isSecurityEnabled()
|
||||
&& useYarnAuthenticationFilter
|
||||
&& conf.get(authTypeKey, "").equalsIgnoreCase(
|
||||
KerberosAuthenticationHandler.TYPE)) {
|
||||
LOG.info("Using RM authentication filter(kerberos/delegation-token)"
|
||||
+ " for RM webapp authentication");
|
||||
RMAuthenticationHandler
|
||||
.setSecretManager(getClientRMService().rmDTSecretManager);
|
||||
String yarnAuthKey =
|
||||
authPrefix + RMAuthenticationFilter.AUTH_HANDLER_PROPERTY;
|
||||
conf.setStrings(yarnAuthKey, RMAuthenticationHandler.class.getName());
|
||||
|
||||
initializers =
|
||||
initializers == null || initializers.isEmpty() ? "" : ","
|
||||
+ initializers;
|
||||
if (!initializers.contains(RMAuthenticationFilterInitializer.class
|
||||
.getName())) {
|
||||
conf.set("hadoop.http.filter.initializers",
|
||||
RMAuthenticationFilterInitializer.class.getName() + initializers);
|
||||
}
|
||||
}
|
||||
|
||||
// if security is not enabled and the default filter initializer has been
|
||||
// set, set the initializer to include the
|
||||
// RMAuthenticationFilterInitializer which in turn will set up the simple
|
||||
// auth filter.
|
||||
|
||||
if (!UserGroupInformation.isSecurityEnabled()) {
|
||||
if (initializers == null || initializers.isEmpty()) {
|
||||
conf.set("hadoop.http.filter.initializers",
|
||||
RMAuthenticationFilterInitializer.class.getName());
|
||||
conf.set(authTypeKey, "simple");
|
||||
} else if (initializers.equals(StaticUserWebFilter.class.getName())) {
|
||||
conf.set("hadoop.http.filter.initializers",
|
||||
RMAuthenticationFilterInitializer.class.getName() + ","
|
||||
+ initializers);
|
||||
conf.set(authTypeKey, "simple");
|
||||
}
|
||||
}
|
||||
|
||||
Builder<ApplicationMasterService> builder =
|
||||
WebApps
|
||||
.$for("cluster", ApplicationMasterService.class, masterService,
|
||||
|
|
|
@ -139,8 +139,11 @@ public class FairScheduler extends
|
|||
private final int UPDATE_DEBUG_FREQUENCY = 5;
|
||||
private int updatesToSkipForDebug = UPDATE_DEBUG_FREQUENCY;
|
||||
|
||||
private Thread updateThread;
|
||||
private Thread schedulingThread;
|
||||
@VisibleForTesting
|
||||
Thread updateThread;
|
||||
|
||||
@VisibleForTesting
|
||||
Thread schedulingThread;
|
||||
// timeout to join when we stop this service
|
||||
protected final long THREAD_JOIN_TIMEOUT_MS = 1000;
|
||||
|
||||
|
@ -243,16 +246,21 @@ public class FairScheduler extends
|
|||
}
|
||||
|
||||
/**
|
||||
* A runnable which calls {@link FairScheduler#update()} every
|
||||
* Thread which calls {@link FairScheduler#update()} every
|
||||
* <code>updateInterval</code> milliseconds.
|
||||
*/
|
||||
private class UpdateThread implements Runnable {
|
||||
private class UpdateThread extends Thread {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
while (true) {
|
||||
while (!Thread.currentThread().isInterrupted()) {
|
||||
try {
|
||||
Thread.sleep(updateInterval);
|
||||
update();
|
||||
preemptTasksIfNecessary();
|
||||
} catch (InterruptedException ie) {
|
||||
LOG.warn("Update thread interrupted. Exiting.");
|
||||
return;
|
||||
} catch (Exception e) {
|
||||
LOG.error("Exception in fair scheduler UpdateThread", e);
|
||||
}
|
||||
|
@ -260,6 +268,26 @@ public class FairScheduler extends
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Thread which attempts scheduling resources continuously,
|
||||
* asynchronous to the node heartbeats.
|
||||
*/
|
||||
private class ContinuousSchedulingThread extends Thread {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
while (!Thread.currentThread().isInterrupted()) {
|
||||
try {
|
||||
continuousSchedulingAttempt();
|
||||
Thread.sleep(getContinuousSchedulingSleepMs());
|
||||
} catch (InterruptedException e) {
|
||||
LOG.warn("Continuous scheduling thread interrupted. Exiting.", e);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Recompute the internal variables used by the scheduler - per-job weights,
|
||||
* fair shares, deficits, minimum slot allocations, and amount of used and
|
||||
|
@ -970,7 +998,7 @@ public class FairScheduler extends
|
|||
}
|
||||
}
|
||||
|
||||
void continuousSchedulingAttempt() {
|
||||
void continuousSchedulingAttempt() throws InterruptedException {
|
||||
List<NodeId> nodeIdList = new ArrayList<NodeId>(nodes.keySet());
|
||||
// Sort the nodes by space available on them, so that we offer
|
||||
// containers on emptier nodes first, facilitating an even spread. This
|
||||
|
@ -1229,30 +1257,14 @@ public class FairScheduler extends
|
|||
throw new IOException("Failed to start FairScheduler", e);
|
||||
}
|
||||
|
||||
updateThread = new Thread(new UpdateThread());
|
||||
updateThread = new UpdateThread();
|
||||
updateThread.setName("FairSchedulerUpdateThread");
|
||||
updateThread.setDaemon(true);
|
||||
|
||||
if (continuousSchedulingEnabled) {
|
||||
// start continuous scheduling thread
|
||||
schedulingThread = new Thread(
|
||||
new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
while (!Thread.currentThread().isInterrupted()) {
|
||||
try {
|
||||
continuousSchedulingAttempt();
|
||||
Thread.sleep(getContinuousSchedulingSleepMs());
|
||||
} catch (InterruptedException e) {
|
||||
LOG.error("Continuous scheduling thread interrupted. Exiting. ",
|
||||
e);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
schedulingThread.setName("ContinuousScheduling");
|
||||
schedulingThread = new ContinuousSchedulingThread();
|
||||
schedulingThread.setName("FairSchedulerContinuousScheduling");
|
||||
schedulingThread.setDaemon(true);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,157 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.security;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
|
||||
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
|
||||
|
||||
public class RMAuthenticationHandler extends KerberosAuthenticationHandler {
|
||||
|
||||
public static final String TYPE = "kerberos-dt";
|
||||
public static final String HEADER = "Hadoop-YARN-Auth-Delegation-Token";
|
||||
|
||||
static RMDelegationTokenSecretManager secretManager;
|
||||
static boolean secretManagerInitialized = false;
|
||||
|
||||
public RMAuthenticationHandler() {
|
||||
super();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns authentication type of the handler.
|
||||
*
|
||||
* @return <code>kerberos-dt</code>
|
||||
*/
|
||||
@Override
|
||||
public String getType() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean managementOperation(AuthenticationToken token,
|
||||
HttpServletRequest request, HttpServletResponse response) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Authenticates a request looking for the <code>delegation</code> header and
|
||||
* verifying it is a valid token. If the header is missing, it delegates the
|
||||
* authentication to the {@link KerberosAuthenticationHandler} unless it is
|
||||
* disabled.
|
||||
*
|
||||
* @param request
|
||||
* the HTTP client request.
|
||||
* @param response
|
||||
* the HTTP client response.
|
||||
*
|
||||
* @return the authentication token for the authenticated request.
|
||||
* @throws IOException
|
||||
* thrown if an IO error occurred.
|
||||
* @throws AuthenticationException
|
||||
* thrown if the authentication failed.
|
||||
*/
|
||||
@Override
|
||||
public AuthenticationToken authenticate(HttpServletRequest request,
|
||||
HttpServletResponse response) throws IOException, AuthenticationException {
|
||||
|
||||
AuthenticationToken token;
|
||||
String delegationParam = this.getEncodedDelegationTokenFromRequest(request);
|
||||
if (delegationParam != null) {
|
||||
Token<RMDelegationTokenIdentifier> dt =
|
||||
new Token<RMDelegationTokenIdentifier>();
|
||||
;
|
||||
dt.decodeFromUrlString(delegationParam);
|
||||
UserGroupInformation ugi = this.verifyToken(dt);
|
||||
if (ugi == null) {
|
||||
throw new AuthenticationException("Invalid token");
|
||||
}
|
||||
final String shortName = ugi.getShortUserName();
|
||||
token = new AuthenticationToken(shortName, ugi.getUserName(), getType());
|
||||
} else {
|
||||
token = super.authenticate(request, response);
|
||||
if (token != null) {
|
||||
// create a token with auth type set correctly
|
||||
token =
|
||||
new AuthenticationToken(token.getUserName(), token.getName(),
|
||||
super.getType());
|
||||
}
|
||||
}
|
||||
return token;
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies a delegation token.
|
||||
*
|
||||
* @param token
|
||||
* delegation token to verify.
|
||||
* @return the UGI for the token; null if the verification fails
|
||||
* @throws IOException
|
||||
* thrown if the token could not be verified.
|
||||
*/
|
||||
protected UserGroupInformation verifyToken(
|
||||
Token<RMDelegationTokenIdentifier> token) throws IOException {
|
||||
if (secretManagerInitialized == false) {
|
||||
throw new IllegalStateException("Secret manager not initialized");
|
||||
}
|
||||
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
|
||||
DataInputStream dis = new DataInputStream(buf);
|
||||
RMDelegationTokenIdentifier id = secretManager.createIdentifier();
|
||||
try {
|
||||
id.readFields(dis);
|
||||
secretManager.verifyToken(id, token.getPassword());
|
||||
} catch (Throwable t) {
|
||||
return null;
|
||||
} finally {
|
||||
dis.close();
|
||||
}
|
||||
return id.getUser();
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract encoded delegation token from request
|
||||
*
|
||||
* @param req
|
||||
* HTTPServletRequest object
|
||||
*
|
||||
* @return String containing the encoded token; null if encoded token not
|
||||
* found
|
||||
*
|
||||
*/
|
||||
protected String getEncodedDelegationTokenFromRequest(HttpServletRequest req) {
|
||||
String header = req.getHeader(HEADER);
|
||||
return header;
|
||||
}
|
||||
|
||||
public static void setSecretManager(RMDelegationTokenSecretManager manager) {
|
||||
secretManager = manager;
|
||||
secretManagerInitialized = true;
|
||||
}
|
||||
|
||||
}
|
|
@ -55,6 +55,7 @@ import org.apache.commons.codec.binary.Base64;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.io.DataOutputBuffer;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
|
@ -680,6 +681,11 @@ public class RMWebServices {
|
|||
throw new AuthorizationException(msg);
|
||||
}
|
||||
|
||||
if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
|
||||
String msg = "The default static user cannot carry out this operation.";
|
||||
return Response.status(Status.FORBIDDEN).entity(msg).build();
|
||||
}
|
||||
|
||||
String userName = callerUGI.getUserName();
|
||||
RMApp app = null;
|
||||
try {
|
||||
|
@ -800,6 +806,13 @@ public class RMWebServices {
|
|||
return callerUGI;
|
||||
}
|
||||
|
||||
private boolean isStaticUser(UserGroupInformation callerUGI) {
|
||||
String staticUser =
|
||||
conf.get(CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER,
|
||||
CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER);
|
||||
return staticUser.equals(callerUGI.getUserName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a new ApplicationId which is then sent to the client
|
||||
*
|
||||
|
@ -822,6 +835,10 @@ public class RMWebServices {
|
|||
throw new AuthorizationException("Unable to obtain user name, "
|
||||
+ "user not authenticated");
|
||||
}
|
||||
if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
|
||||
String msg = "The default static user cannot carry out this operation.";
|
||||
return Response.status(Status.FORBIDDEN).entity(msg).build();
|
||||
}
|
||||
|
||||
NewApplication appId = createNewApplication();
|
||||
return Response.status(Status.OK).entity(appId).build();
|
||||
|
@ -859,6 +876,11 @@ public class RMWebServices {
|
|||
+ "user not authenticated");
|
||||
}
|
||||
|
||||
if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
|
||||
String msg = "The default static user cannot carry out this operation.";
|
||||
return Response.status(Status.FORBIDDEN).entity(msg).build();
|
||||
}
|
||||
|
||||
ApplicationSubmissionContext appContext =
|
||||
createAppSubmissionContext(newApp);
|
||||
final SubmitApplicationRequest req =
|
||||
|
@ -975,7 +997,7 @@ public class RMWebServices {
|
|||
*
|
||||
* @param newApp
|
||||
* the information provided by the user
|
||||
* @return
|
||||
* @return created context
|
||||
* @throws BadRequestException
|
||||
* @throws IOException
|
||||
*/
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
|
|||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertSame;
|
||||
|
@ -3341,4 +3342,28 @@ public class TestFairScheduler extends FairSchedulerTestBase {
|
|||
scheduler.findLowestCommonAncestorQueue(a1Queue, b1Queue);
|
||||
assertEquals(ancestorQueue, queue1);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testThreadLifeCycle() throws InterruptedException {
|
||||
conf.setBoolean(
|
||||
FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED, true);
|
||||
scheduler.init(conf);
|
||||
scheduler.start();
|
||||
|
||||
Thread updateThread = scheduler.updateThread;
|
||||
Thread schedulingThread = scheduler.schedulingThread;
|
||||
|
||||
assertTrue(updateThread.isAlive());
|
||||
assertTrue(schedulingThread.isAlive());
|
||||
|
||||
scheduler.stop();
|
||||
|
||||
int numRetries = 100;
|
||||
while (numRetries-- > 0 &&
|
||||
(updateThread.isAlive() || schedulingThread.isAlive())) {
|
||||
Thread.sleep(50);
|
||||
}
|
||||
|
||||
assertNotEquals("One of the threads is still alive", 0, numRetries);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,354 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStream;
|
||||
import java.io.StringWriter;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.URL;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.Marshaller;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.minikdc.MiniKdc;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authentication.KerberosTestUtils;
|
||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
import org.codehaus.jettison.json.JSONObject;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import com.sun.jersey.api.client.ClientResponse.Status;
|
||||
|
||||
public class TestRMWebServicesDelegationTokenAuthentication {
|
||||
|
||||
private static final File testRootDir = new File("target",
|
||||
TestRMWebServicesDelegationTokenAuthentication.class.getName() + "-root");
|
||||
private static File httpSpnegoKeytabFile = new File(
|
||||
KerberosTestUtils.getKeytabFile());
|
||||
|
||||
private static String httpSpnegoPrincipal = KerberosTestUtils
|
||||
.getServerPrincipal();
|
||||
|
||||
private static boolean miniKDCStarted = false;
|
||||
private static MiniKdc testMiniKDC;
|
||||
private static MockRM rm;
|
||||
|
||||
// use published header name
|
||||
final static String DelegationTokenHeader =
|
||||
"Hadoop-YARN-Auth-Delegation-Token";
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() {
|
||||
try {
|
||||
testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir);
|
||||
setupKDC();
|
||||
setupAndStartRM();
|
||||
} catch (Exception e) {
|
||||
assertTrue("Couldn't create MiniKDC", false);
|
||||
}
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
if (testMiniKDC != null) {
|
||||
testMiniKDC.stop();
|
||||
}
|
||||
if (rm != null) {
|
||||
rm.stop();
|
||||
}
|
||||
}
|
||||
|
||||
public TestRMWebServicesDelegationTokenAuthentication() throws Exception {
|
||||
super();
|
||||
}
|
||||
|
||||
private static void setupAndStartRM() throws Exception {
|
||||
Configuration rmconf = new Configuration();
|
||||
rmconf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
|
||||
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
|
||||
rmconf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
|
||||
ResourceScheduler.class);
|
||||
rmconf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
|
||||
String httpPrefix = "hadoop.http.authentication.";
|
||||
rmconf.setStrings(httpPrefix + "type", "kerberos");
|
||||
rmconf.set(httpPrefix + KerberosAuthenticationHandler.PRINCIPAL,
|
||||
httpSpnegoPrincipal);
|
||||
rmconf.set(httpPrefix + KerberosAuthenticationHandler.KEYTAB,
|
||||
httpSpnegoKeytabFile.getAbsolutePath());
|
||||
// use any file for signature secret
|
||||
rmconf.set(httpPrefix + AuthenticationFilter.SIGNATURE_SECRET + ".file",
|
||||
httpSpnegoKeytabFile.getAbsolutePath());
|
||||
rmconf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
|
||||
"kerberos");
|
||||
rmconf.setBoolean(YarnConfiguration.RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER,
|
||||
true);
|
||||
rmconf.set(YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY,
|
||||
httpSpnegoPrincipal);
|
||||
rmconf.set(YarnConfiguration.RM_KEYTAB,
|
||||
httpSpnegoKeytabFile.getAbsolutePath());
|
||||
rmconf.set(YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY,
|
||||
httpSpnegoKeytabFile.getAbsolutePath());
|
||||
rmconf.set(YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY,
|
||||
httpSpnegoPrincipal);
|
||||
rmconf.set(YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY,
|
||||
httpSpnegoKeytabFile.getAbsolutePath());
|
||||
rmconf.setBoolean("mockrm.webapp.enabled", true);
|
||||
UserGroupInformation.setConfiguration(rmconf);
|
||||
rm = new MockRM(rmconf);
|
||||
rm.start();
|
||||
|
||||
}
|
||||
|
||||
private static void setupKDC() throws Exception {
|
||||
if (miniKDCStarted == false) {
|
||||
testMiniKDC.start();
|
||||
getKdc().createPrincipal(httpSpnegoKeytabFile, "HTTP/localhost",
|
||||
"client", UserGroupInformation.getLoginUser().getShortUserName());
|
||||
miniKDCStarted = true;
|
||||
}
|
||||
}
|
||||
|
||||
private static MiniKdc getKdc() {
|
||||
return testMiniKDC;
|
||||
}
|
||||
|
||||
// Test that you can authenticate with only delegation tokens
|
||||
// 1. Get a delegation token using Kerberos auth(this ends up
|
||||
// testing the fallback authenticator)
|
||||
// 2. Submit an app without kerberos or delegation-token
|
||||
// - we should get an UNAUTHORIZED response
|
||||
// 3. Submit same app with delegation-token
|
||||
// - we should get OK response
|
||||
// - confirm owner of the app is the user whose
|
||||
// delegation-token we used
|
||||
|
||||
@Test
|
||||
public void testDelegationTokenAuth() throws Exception {
|
||||
final String token = getDelegationToken("test");
|
||||
|
||||
ApplicationSubmissionContextInfo app =
|
||||
new ApplicationSubmissionContextInfo();
|
||||
String appid = "application_123_0";
|
||||
app.setApplicationId(appid);
|
||||
String requestBody = getMarshalledAppInfo(app);
|
||||
|
||||
URL url = new URL("http://localhost:8088/ws/v1/cluster/apps");
|
||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||
setupConn(conn, "POST", "application/xml", requestBody);
|
||||
|
||||
// this should fail with unauthorized because only
|
||||
// auth is kerberos or delegation token
|
||||
try {
|
||||
conn.getInputStream();
|
||||
fail("we should not be here");
|
||||
} catch (IOException e) {
|
||||
assertEquals(Status.UNAUTHORIZED.getStatusCode(), conn.getResponseCode());
|
||||
}
|
||||
|
||||
conn = (HttpURLConnection) url.openConnection();
|
||||
conn.setRequestProperty(DelegationTokenHeader, token);
|
||||
setupConn(conn, "POST", MediaType.APPLICATION_XML, requestBody);
|
||||
|
||||
// this should not fail
|
||||
conn.getInputStream();
|
||||
boolean appExists =
|
||||
rm.getRMContext().getRMApps()
|
||||
.containsKey(ConverterUtils.toApplicationId(appid));
|
||||
assertTrue(appExists);
|
||||
RMApp actualApp =
|
||||
rm.getRMContext().getRMApps()
|
||||
.get(ConverterUtils.toApplicationId(appid));
|
||||
String owner = actualApp.getUser();
|
||||
assertEquals("client", owner);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Test to make sure that cancelled delegation tokens
|
||||
// are rejected
|
||||
@Test
|
||||
public void testCancelledDelegationToken() throws Exception {
|
||||
String token = getDelegationToken("client");
|
||||
cancelDelegationToken(token);
|
||||
ApplicationSubmissionContextInfo app =
|
||||
new ApplicationSubmissionContextInfo();
|
||||
String appid = "application_123_0";
|
||||
app.setApplicationId(appid);
|
||||
String requestBody = getMarshalledAppInfo(app);
|
||||
|
||||
URL url = new URL("http://localhost:8088/ws/v1/cluster/apps");
|
||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||
conn.setRequestProperty(DelegationTokenHeader, token);
|
||||
setupConn(conn, "POST", MediaType.APPLICATION_XML, requestBody);
|
||||
|
||||
// this should fail with unauthorized because only
|
||||
// auth is kerberos or delegation token
|
||||
try {
|
||||
conn.getInputStream();
|
||||
fail("Authentication should fail with expired delegation tokens");
|
||||
} catch (IOException e) {
|
||||
assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode());
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Test to make sure that we can't do delegation token
|
||||
// functions using just delegation token auth
|
||||
@Test
|
||||
public void testDelegationTokenOps() throws Exception {
|
||||
String token = getDelegationToken("client");
|
||||
String createRequest = "{\"renewer\":\"test\"}";
|
||||
String renewRequest = "{\"token\": \"" + token + "\"}";
|
||||
|
||||
// first test create and renew
|
||||
String[] requests = { createRequest, renewRequest };
|
||||
for (String requestBody : requests) {
|
||||
URL url = new URL("http://localhost:8088/ws/v1/cluster/delegation-token");
|
||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||
conn.setRequestProperty(DelegationTokenHeader, token);
|
||||
setupConn(conn, "POST", MediaType.APPLICATION_JSON, requestBody);
|
||||
try {
|
||||
conn.getInputStream();
|
||||
fail("Creation/Renewing delegation tokens should not be "
|
||||
+ "allowed with token auth");
|
||||
} catch (IOException e) {
|
||||
assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode());
|
||||
}
|
||||
}
|
||||
|
||||
// test cancel
|
||||
URL url = new URL("http://localhost:8088/ws/v1/cluster/delegation-token");
|
||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||
conn.setRequestProperty(DelegationTokenHeader, token);
|
||||
conn.setRequestProperty(RMWebServices.DELEGATION_TOKEN_HEADER, token);
|
||||
setupConn(conn, "DELETE", null, null);
|
||||
try {
|
||||
conn.getInputStream();
|
||||
fail("Cancelling delegation tokens should not be allowed with token auth");
|
||||
} catch (IOException e) {
|
||||
assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode());
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
private String getDelegationToken(final String renewer) throws Exception {
|
||||
String token = KerberosTestUtils.doAsClient(new Callable<String>() {
|
||||
@Override
|
||||
public String call() throws Exception {
|
||||
String ret = null;
|
||||
String body = "{\"renewer\":\"" + renewer + "\"}";
|
||||
URL url =
|
||||
new URL("http://localhost:8088/ws/v1/cluster/delegation-token");
|
||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||
setupConn(conn, "POST", MediaType.APPLICATION_JSON, body);
|
||||
InputStream response = conn.getInputStream();
|
||||
assertEquals(Status.OK.getStatusCode(), conn.getResponseCode());
|
||||
BufferedReader reader = null;
|
||||
try {
|
||||
reader = new BufferedReader(new InputStreamReader(response, "UTF8"));
|
||||
for (String line; (line = reader.readLine()) != null;) {
|
||||
JSONObject obj = new JSONObject(line);
|
||||
if (obj.has("token")) {
|
||||
reader.close();
|
||||
response.close();
|
||||
ret = obj.getString("token");
|
||||
break;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
IOUtils.closeQuietly(reader);
|
||||
IOUtils.closeQuietly(response);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
});
|
||||
return token;
|
||||
}
|
||||
|
||||
private void cancelDelegationToken(final String tokenString) throws Exception {
|
||||
|
||||
KerberosTestUtils.doAsClient(new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
URL url =
|
||||
new URL("http://localhost:8088/ws/v1/cluster/delegation-token");
|
||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||
conn.setRequestProperty(RMWebServices.DELEGATION_TOKEN_HEADER,
|
||||
tokenString);
|
||||
setupConn(conn, "DELETE", null, null);
|
||||
InputStream response = conn.getInputStream();
|
||||
assertEquals(Status.OK.getStatusCode(), conn.getResponseCode());
|
||||
response.close();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
static String getMarshalledAppInfo(ApplicationSubmissionContextInfo appInfo)
|
||||
throws Exception {
|
||||
|
||||
StringWriter writer = new StringWriter();
|
||||
JAXBContext context =
|
||||
JAXBContext.newInstance(ApplicationSubmissionContextInfo.class);
|
||||
Marshaller m = context.createMarshaller();
|
||||
m.marshal(appInfo, writer);
|
||||
return writer.toString();
|
||||
}
|
||||
|
||||
static void setupConn(HttpURLConnection conn, String method,
|
||||
String contentType, String body) throws Exception {
|
||||
conn.setRequestMethod(method);
|
||||
conn.setDoOutput(true);
|
||||
conn.setRequestProperty("Accept-Charset", "UTF8");
|
||||
if (contentType != null && !contentType.isEmpty()) {
|
||||
conn.setRequestProperty("Content-Type", contentType + ";charset=UTF8");
|
||||
if (body != null && !body.isEmpty()) {
|
||||
OutputStream stream = conn.getOutputStream();
|
||||
stream.write(body.getBytes("UTF8"));
|
||||
stream.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,272 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.URL;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
||||
import javax.ws.rs.core.MediaType;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.minikdc.MiniKdc;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authentication.KerberosTestUtils;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
import org.junit.runners.Parameterized.Parameters;
|
||||
|
||||
import com.sun.jersey.api.client.ClientResponse.Status;
|
||||
|
||||
/* Just a simple test class to ensure that the RM handles the static web user
|
||||
* correctly for secure and un-secure modes
|
||||
*
|
||||
*/
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestRMWebappAuthentication {
|
||||
|
||||
private static MockRM rm;
|
||||
private static Configuration simpleConf;
|
||||
private static Configuration kerberosConf;
|
||||
|
||||
private static final File testRootDir = new File("target",
|
||||
TestRMWebServicesDelegationTokenAuthentication.class.getName() + "-root");
|
||||
private static File httpSpnegoKeytabFile = new File(
|
||||
KerberosTestUtils.getKeytabFile());
|
||||
|
||||
private static boolean miniKDCStarted = false;
|
||||
private static MiniKdc testMiniKDC;
|
||||
|
||||
static {
|
||||
simpleConf = new Configuration();
|
||||
simpleConf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
|
||||
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
|
||||
simpleConf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
|
||||
ResourceScheduler.class);
|
||||
simpleConf.setBoolean("mockrm.webapp.enabled", true);
|
||||
kerberosConf = new Configuration();
|
||||
kerberosConf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
|
||||
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
|
||||
kerberosConf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
|
||||
ResourceScheduler.class);
|
||||
kerberosConf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
|
||||
kerberosConf.set(
|
||||
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
|
||||
kerberosConf.set(YarnConfiguration.RM_KEYTAB,
|
||||
httpSpnegoKeytabFile.getAbsolutePath());
|
||||
kerberosConf.setBoolean("mockrm.webapp.enabled", true);
|
||||
}
|
||||
|
||||
@Parameters
|
||||
public static Collection params() {
|
||||
return Arrays.asList(new Object[][] { { 1, simpleConf },
|
||||
{ 2, kerberosConf } });
|
||||
}
|
||||
|
||||
public TestRMWebappAuthentication(int run, Configuration conf) {
|
||||
super();
|
||||
setupAndStartRM(conf);
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() {
|
||||
try {
|
||||
testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir);
|
||||
setupKDC();
|
||||
} catch (Exception e) {
|
||||
assertTrue("Couldn't create MiniKDC", false);
|
||||
}
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
if (testMiniKDC != null) {
|
||||
testMiniKDC.stop();
|
||||
}
|
||||
}
|
||||
|
||||
private static void setupKDC() throws Exception {
|
||||
if (!miniKDCStarted) {
|
||||
testMiniKDC.start();
|
||||
getKdc().createPrincipal(httpSpnegoKeytabFile, "HTTP/localhost",
|
||||
"client", UserGroupInformation.getLoginUser().getShortUserName());
|
||||
miniKDCStarted = true;
|
||||
}
|
||||
}
|
||||
|
||||
private static MiniKdc getKdc() {
|
||||
return testMiniKDC;
|
||||
}
|
||||
|
||||
private static void setupAndStartRM(Configuration conf) {
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
rm = new MockRM(conf);
|
||||
}
|
||||
|
||||
// ensure that in a non-secure cluster users can access
|
||||
// the web pages as earlier and submit apps as anonymous
|
||||
// user or by identifying themselves
|
||||
@Test
|
||||
public void testSimpleAuth() throws Exception {
|
||||
|
||||
rm.start();
|
||||
|
||||
// ensure users can access web pages
|
||||
// this should work for secure and non-secure clusters
|
||||
URL url = new URL("http://localhost:8088/cluster");
|
||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||
try {
|
||||
conn.getInputStream();
|
||||
assertEquals(Status.OK.getStatusCode(), conn.getResponseCode());
|
||||
} catch (Exception e) {
|
||||
fail("Fetching url failed");
|
||||
}
|
||||
|
||||
if (UserGroupInformation.isSecurityEnabled()) {
|
||||
testAnonymousKerberosUser();
|
||||
} else {
|
||||
testAnonymousSimpleUser();
|
||||
}
|
||||
|
||||
rm.stop();
|
||||
}
|
||||
|
||||
private void testAnonymousKerberosUser() throws Exception {
|
||||
|
||||
ApplicationSubmissionContextInfo app =
|
||||
new ApplicationSubmissionContextInfo();
|
||||
String appid = "application_123_0";
|
||||
app.setApplicationId(appid);
|
||||
String requestBody =
|
||||
TestRMWebServicesDelegationTokenAuthentication
|
||||
.getMarshalledAppInfo(app);
|
||||
|
||||
URL url =
|
||||
new URL("http://localhost:8088/ws/v1/cluster/apps/new-application");
|
||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||
TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, "POST",
|
||||
"application/xml", requestBody);
|
||||
|
||||
try {
|
||||
conn.getInputStream();
|
||||
fail("Anonymous users should not be allowed to get new application ids in secure mode.");
|
||||
} catch (IOException ie) {
|
||||
assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode());
|
||||
}
|
||||
|
||||
url = new URL("http://localhost:8088/ws/v1/cluster/apps");
|
||||
conn = (HttpURLConnection) url.openConnection();
|
||||
TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, "POST",
|
||||
"application/xml", requestBody);
|
||||
|
||||
try {
|
||||
conn.getInputStream();
|
||||
fail("Anonymous users should not be allowed to submit apps in secure mode.");
|
||||
} catch (IOException ie) {
|
||||
assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode());
|
||||
}
|
||||
|
||||
requestBody = "{ \"state\": \"KILLED\"}";
|
||||
url =
|
||||
new URL(
|
||||
"http://localhost:8088/ws/v1/cluster/apps/application_123_0/state");
|
||||
conn = (HttpURLConnection) url.openConnection();
|
||||
TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, "PUT",
|
||||
"application/json", requestBody);
|
||||
|
||||
try {
|
||||
conn.getInputStream();
|
||||
fail("Anonymous users should not be allowed to kill apps in secure mode.");
|
||||
} catch (IOException ie) {
|
||||
assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode());
|
||||
}
|
||||
}
|
||||
|
||||
private void testAnonymousSimpleUser() throws Exception {
|
||||
|
||||
ApplicationSubmissionContextInfo app =
|
||||
new ApplicationSubmissionContextInfo();
|
||||
String appid = "application_123_0";
|
||||
app.setApplicationId(appid);
|
||||
String requestBody =
|
||||
TestRMWebServicesDelegationTokenAuthentication
|
||||
.getMarshalledAppInfo(app);
|
||||
|
||||
URL url = new URL("http://localhost:8088/ws/v1/cluster/apps");
|
||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||
TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, "POST",
|
||||
"application/xml", requestBody);
|
||||
|
||||
conn.getInputStream();
|
||||
assertEquals(Status.ACCEPTED.getStatusCode(), conn.getResponseCode());
|
||||
boolean appExists =
|
||||
rm.getRMContext().getRMApps()
|
||||
.containsKey(ConverterUtils.toApplicationId(appid));
|
||||
assertTrue(appExists);
|
||||
RMApp actualApp =
|
||||
rm.getRMContext().getRMApps()
|
||||
.get(ConverterUtils.toApplicationId(appid));
|
||||
String owner = actualApp.getUser();
|
||||
assertEquals(
|
||||
rm.getConfig().get(CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER,
|
||||
CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER), owner);
|
||||
|
||||
appid = "application_123_1";
|
||||
app.setApplicationId(appid);
|
||||
requestBody =
|
||||
TestRMWebServicesDelegationTokenAuthentication
|
||||
.getMarshalledAppInfo(app);
|
||||
url = new URL("http://localhost:8088/ws/v1/cluster/apps?user.name=client");
|
||||
conn = (HttpURLConnection) url.openConnection();
|
||||
TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, "POST",
|
||||
MediaType.APPLICATION_XML, requestBody);
|
||||
|
||||
conn.getInputStream();
|
||||
appExists =
|
||||
rm.getRMContext().getRMApps()
|
||||
.containsKey(ConverterUtils.toApplicationId(appid));
|
||||
assertTrue(appExists);
|
||||
actualApp =
|
||||
rm.getRMContext().getRMApps()
|
||||
.get(ConverterUtils.toApplicationId(appid));
|
||||
owner = actualApp.getUser();
|
||||
assertEquals("client", owner);
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -2912,3 +2912,24 @@ Accept: application/xml
|
|||
+---+
|
||||
|
||||
No response body.
|
||||
|
||||
** Authentication using delegation tokens
|
||||
|
||||
This feature is in the alpha mode and may change in the future.
|
||||
|
||||
You can use delegation tokens to authenticate yourself when using YARN RM webservices. However, this requires setting the right configurations. The conditions for this are:
|
||||
|
||||
* Hadoop is setup in secure mode with the authentication type set to kerberos.
|
||||
|
||||
* Hadoop HTTP authentication is setup with the authentication type set to kerberos
|
||||
|
||||
Once setup, delegation tokens can be fetched using the web services listed above and used as shown in an example below:
|
||||
|
||||
+---+
|
||||
PUT http://<rm http address:port>/ws/v1/cluster/apps/application_1399397633663_0003/state
|
||||
Hadoop-YARN-Auth-Delegation-Token: MgASY2xpZW50QEVYQU1QTEUuQ09NDHRlc3QtcmVuZXdlcgCKAUbjqcHHigFHB7ZFxwQCFKWD3znCkDSy6SQIjRCLDydxbxvgE1JNX0RFTEVHQVRJT05fVE9LRU4A
|
||||
Content-Type: application/json; charset=UTF8
|
||||
{
|
||||
"state":"KILLED"
|
||||
}
|
||||
+---+
|
||||
|
|
Loading…
Reference in New Issue