diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index c2f6f91edaf..85aeaf40f45 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -192,6 +192,11 @@ Trunk (Unreleased) HADOOP-10891. Add EncryptedKeyVersion factory method to KeyProviderCryptoExtension. (wang) + HADOOP-10756. KMS audit log should consolidate successful similar requests. + (asuresh via tucu) + + HADOOP-10793. KeyShell args should use single-dash style. (wang) + BUG FIXES HADOOP-9451. Fault single-layer config if node group topology is enabled. @@ -405,6 +410,12 @@ Trunk (Unreleased) HADOOP-10881. Clarify usage of encryption and encrypted encryption key in KeyProviderCryptoExtension. (wang) + HADOOP-10920. site plugin couldn't parse hadoop-kms index.apt.vm. + (Akira Ajisaka via wang) + + HADOOP-10925. Compilation fails in native link0 function on Windows. + (cnauroth) + OPTIMIZATIONS HADOOP-7761. Improve the performance of raw comparisons. (todd) @@ -463,6 +474,14 @@ Release 2.6.0 - UNRELEASED HADOOP-8069. Enable TCP_NODELAY by default for IPC. (Todd Lipcon via Arpit Agarwal) + HADOOP-10902. Deletion of directories with snapshots will not output + reason for trash move failure. (Stephen Chu via wang) + + HADOOP-10900. CredentialShell args should use single-dash style. (wang) + + HADOOP-10903. Enhance hadoop classpath command to expand wildcards or write + classpath into jar manifest. (cnauroth) + OPTIMIZATIONS BUG FIXES @@ -497,6 +516,15 @@ Release 2.6.0 - UNRELEASED HADOOP-10876. The constructor of Path should not take an empty URL as a parameter. (Zhihai Xu via wang) + HADOOP-10928. Incorrect usage on `hadoop credential list`. + (Josh Elser via wang) + + HADOOP-10927. Fix CredentialShell help behavior and error codes. + (Josh Elser via wang) + + HADOOP-10937. Need to set version name correctly before decrypting EEK. + (Arun Suresh via wang) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -637,6 +665,8 @@ Release 2.5.0 - UNRELEASED BUG FIXES + HADOOP-10759. Remove hardcoded JAVA_HEAP_MAX. (Sam Liu via Eric Yang) + HADOOP-10378. Typo in help printed by hdfs dfs -help. (Mit Desai via suresh) @@ -813,6 +843,8 @@ Release 2.5.0 - UNRELEASED HADOOP-10894. Fix dead link in ToolRunner documentation. (Akira Ajisaka via Arpit Agarwal) + HADOOP-10910. Increase findbugs maxHeap size. (wang) + BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS HADOOP-10520. Extended attributes definition and FileSystem APIs for diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop b/hadoop-common-project/hadoop-common/src/main/bin/hadoop index 118534274ae..b1e2018f611 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop @@ -35,6 +35,7 @@ function print_usage(){ echo " distcp copy file or directories recursively" echo " archive -archiveName NAME -p * create a hadoop archive" echo " classpath prints the class path needed to get the" + echo " credential interact with credential providers" echo " Hadoop jar and the required libraries" echo " daemonlog get/set the log level for each daemon" echo " or" @@ -90,11 +91,6 @@ case $COMMAND in fi ;; - classpath) - echo $CLASSPATH - exit - ;; - #core commands *) # the core commands @@ -118,6 +114,14 @@ case $COMMAND in CLASSPATH=${CLASSPATH}:${TOOL_PATH} elif [ "$COMMAND" = "credential" ] ; then CLASS=org.apache.hadoop.security.alias.CredentialShell + elif [ "$COMMAND" = "classpath" ] ; then + if [ "$#" -eq 1 ]; then + # No need to bother starting up a JVM for this simple case. + echo $CLASSPATH + exit + else + CLASS=org.apache.hadoop.util.Classpath + fi elif [[ "$COMMAND" = -* ]] ; then # class and package names cannot begin with a - echo "Error: No command named \`$COMMAND' was found. Perhaps you meant \`hadoop ${COMMAND#-}'" diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh index 6581ab4ada1..a0fb9d0c99d 100644 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh @@ -149,8 +149,6 @@ if [[ -z $JAVA_HOME ]]; then fi JAVA=$JAVA_HOME/bin/java -# some Java parameters -JAVA_HEAP_MAX=-Xmx1000m # check envvars which might override default args if [ "$HADOOP_HEAPSIZE" != "" ]; then diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd b/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd index 54b81e364bf..04a302c0f38 100644 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd @@ -115,11 +115,14 @@ call :updatepath %HADOOP_BIN_PATH% ) if %hadoop-command% == classpath ( - @echo %CLASSPATH% - goto :eof + if not defined hadoop-command-arguments ( + @rem No need to bother starting up a JVM for this simple case. + @echo %CLASSPATH% + exit /b + ) ) - set corecommands=fs version jar checknative distcp daemonlog archive + set corecommands=fs version jar checknative distcp daemonlog archive classpath for %%i in ( %corecommands% ) do ( if %hadoop-command% == %%i set corecommand=true ) @@ -175,6 +178,10 @@ call :updatepath %HADOOP_BIN_PATH% set CLASSPATH=%CLASSPATH%;%TOOL_PATH% goto :eof +:classpath + set CLASS=org.apache.hadoop.util.Classpath + goto :eof + :updatepath set path_to_add=%* set current_path_comparable=%path% diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 22ccf63e9bd..31c40f60f30 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -1843,6 +1843,38 @@ public class Configuration implements Iterable>, return pass; } + /** + * Get the socket address for hostProperty as a + * InetSocketAddress. If hostProperty is + * null, addressProperty will be used. This + * is useful for cases where we want to differentiate between host + * bind address and address clients should use to establish connection. + * + * @param hostProperty bind host property name. + * @param addressProperty address property name. + * @param defaultAddressValue the default value + * @param defaultPort the default port + * @return InetSocketAddress + */ + public InetSocketAddress getSocketAddr( + String hostProperty, + String addressProperty, + String defaultAddressValue, + int defaultPort) { + + InetSocketAddress bindAddr = getSocketAddr( + addressProperty, defaultAddressValue, defaultPort); + + final String host = get(hostProperty); + + if (host == null || host.isEmpty()) { + return bindAddr; + } + + return NetUtils.createSocketAddr( + host, bindAddr.getPort(), hostProperty); + } + /** * Get the socket address for name property as a * InetSocketAddress. @@ -1864,6 +1896,40 @@ public class Configuration implements Iterable>, public void setSocketAddr(String name, InetSocketAddress addr) { set(name, NetUtils.getHostPortString(addr)); } + + /** + * Set the socket address a client can use to connect for the + * name property as a host:port. The wildcard + * address is replaced with the local host's address. If the host and address + * properties are configured the host component of the address will be combined + * with the port component of the addr to generate the address. This is to allow + * optional control over which host name is used in multi-home bind-host + * cases where a host can have multiple names + * @param hostProperty the bind-host configuration name + * @param addressProperty the service address configuration name + * @param defaultAddressValue the service default address configuration value + * @param addr InetSocketAddress of the service listener + * @return InetSocketAddress for clients to connect + */ + public InetSocketAddress updateConnectAddr( + String hostProperty, + String addressProperty, + String defaultAddressValue, + InetSocketAddress addr) { + + final String host = get(hostProperty); + final String connectHostPort = getTrimmed(addressProperty, defaultAddressValue); + + if (host == null || host.isEmpty() || connectHostPort == null || connectHostPort.isEmpty()) { + //not our case, fall back to original logic + return updateConnectAddr(addressProperty, addr); + } + + final String connectHost = connectHostPort.split(":")[0]; + // Create connect address using client address hostname and server port. + return updateConnectAddr(addressProperty, NetUtils.createSocketAddrForHost( + connectHost, addr.getPort())); + } /** * Set the socket address a client can use to connect for the diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java index 227e19b4841..e2b25ae8ed6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java @@ -21,11 +21,13 @@ package org.apache.hadoop.crypto.key; import java.io.IOException; import java.security.GeneralSecurityException; import java.security.SecureRandom; + import javax.crypto.Cipher; import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; import com.google.common.base.Preconditions; + import org.apache.hadoop.classification.InterfaceAudience; /** @@ -97,7 +99,7 @@ public class KeyProviderCryptoExtension extends public static EncryptedKeyVersion createForDecryption(String encryptionKeyVersionName, byte[] encryptedKeyIv, byte[] encryptedKeyMaterial) { - KeyVersion encryptedKeyVersion = new KeyVersion(null, null, + KeyVersion encryptedKeyVersion = new KeyVersion(null, EEK, encryptedKeyMaterial); return new EncryptedKeyVersion(null, encryptionKeyVersionName, encryptedKeyIv, encryptedKeyVersion); @@ -258,6 +260,13 @@ public class KeyProviderCryptoExtension extends keyProvider.getKeyVersion(encryptionKeyVersionName); Preconditions.checkNotNull(encryptionKey, "KeyVersion name '%s' does not exist", encryptionKeyVersionName); + Preconditions.checkArgument( + encryptedKeyVersion.getEncryptedKeyVersion().getVersionName() + .equals(KeyProviderCryptoExtension.EEK), + "encryptedKey version name must be '%s', is '%s'", + KeyProviderCryptoExtension.EEK, + encryptedKeyVersion.getEncryptedKeyVersion().getVersionName() + ); final byte[] encryptionKeyMaterial = encryptionKey.getMaterial(); // Encryption key IV is determined from encrypted key's IV final byte[] encryptionIV = diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java index fb01e5f7c5b..6d50c9168d8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java @@ -38,9 +38,9 @@ import org.apache.hadoop.util.ToolRunner; */ public class KeyShell extends Configured implements Tool { final static private String USAGE_PREFIX = "Usage: hadoop key " + - "[generic options]\n"; + "[generic options]\n"; final static private String COMMANDS = - " [--help]\n" + + " [-help]\n" + " [" + CreateCommand.USAGE + "]\n" + " [" + RollCommand.USAGE + "]\n" + " [" + DeleteCommand.USAGE + "]\n" + @@ -90,11 +90,11 @@ public class KeyShell extends Configured implements Tool { /** * Parse the command line arguments and initialize the data *
-   * % hadoop key create keyName [--size size] [--cipher algorithm]
-   *    [--provider providerPath]
-   * % hadoop key roll keyName [--provider providerPath]
+   * % hadoop key create keyName [-size size] [-cipher algorithm]
+   *    [-provider providerPath]
+   * % hadoop key roll keyName [-provider providerPath]
    * % hadoop key list [-provider providerPath]
-   * % hadoop key delete keyName [--provider providerPath] [-i]
+   * % hadoop key delete keyName [-provider providerPath] [-i]
    * 
* @param args Command line arguments. * @return 0 on success, 1 on failure. @@ -107,47 +107,47 @@ public class KeyShell extends Configured implements Tool { for (int i = 0; i < args.length; i++) { // parse command line boolean moreTokens = (i < args.length - 1); if (args[i].equals("create")) { - String keyName = "--help"; + String keyName = "-help"; if (moreTokens) { keyName = args[++i]; } command = new CreateCommand(keyName, options); - if ("--help".equals(keyName)) { + if ("-help".equals(keyName)) { printKeyShellUsage(); return 1; } } else if (args[i].equals("delete")) { - String keyName = "--help"; + String keyName = "-help"; if (moreTokens) { keyName = args[++i]; } command = new DeleteCommand(keyName); - if ("--help".equals(keyName)) { + if ("-help".equals(keyName)) { printKeyShellUsage(); return 1; } } else if (args[i].equals("roll")) { - String keyName = "--help"; + String keyName = "-help"; if (moreTokens) { keyName = args[++i]; } command = new RollCommand(keyName); - if ("--help".equals(keyName)) { + if ("-help".equals(keyName)) { printKeyShellUsage(); return 1; } } else if ("list".equals(args[i])) { command = new ListCommand(); - } else if ("--size".equals(args[i]) && moreTokens) { + } else if ("-size".equals(args[i]) && moreTokens) { options.setBitLength(Integer.parseInt(args[++i])); - } else if ("--cipher".equals(args[i]) && moreTokens) { + } else if ("-cipher".equals(args[i]) && moreTokens) { options.setCipher(args[++i]); - } else if ("--description".equals(args[i]) && moreTokens) { + } else if ("-description".equals(args[i]) && moreTokens) { options.setDescription(args[++i]); - } else if ("--attr".equals(args[i]) && moreTokens) { + } else if ("-attr".equals(args[i]) && moreTokens) { final String attrval[] = args[++i].split("=", 2); final String attr = attrval[0].trim(); final String val = attrval[1].trim(); @@ -164,14 +164,14 @@ public class KeyShell extends Configured implements Tool { return 1; } attributes.put(attr, val); - } else if ("--provider".equals(args[i]) && moreTokens) { + } else if ("-provider".equals(args[i]) && moreTokens) { userSuppliedProvider = true; getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]); - } else if ("--metadata".equals(args[i])) { + } else if ("-metadata".equals(args[i])) { getConf().setBoolean(LIST_METADATA, true); - } else if ("-i".equals(args[i]) || ("--interactive".equals(args[i]))) { + } else if ("-i".equals(args[i]) || ("-interactive".equals(args[i]))) { interactive = true; - } else if ("--help".equals(args[i])) { + } else if ("-help".equals(args[i])) { printKeyShellUsage(); return 1; } else { @@ -258,11 +258,11 @@ public class KeyShell extends Configured implements Tool { private class ListCommand extends Command { public static final String USAGE = - "list [--provider ] [--metadata] [--help]"; + "list [-provider ] [-metadata] [-help]"; public static final String DESC = "The list subcommand displays the keynames contained within\n" + "a particular provider as configured in core-site.xml or\n" + - "specified with the --provider argument. --metadata displays\n" + + "specified with the -provider argument. -metadata displays\n" + "the metadata."; private boolean metadata = false; @@ -272,9 +272,9 @@ public class KeyShell extends Configured implements Tool { provider = getKeyProvider(); if (provider == null) { out.println("There are no non-transient KeyProviders configured.\n" - + "Use the --provider option to specify a provider. If you\n" + + "Use the -provider option to specify a provider. If you\n" + "want to list a transient provider then you must use the\n" - + "--provider argument."); + + "-provider argument."); rc = false; } metadata = getConf().getBoolean(LIST_METADATA, false); @@ -310,10 +310,10 @@ public class KeyShell extends Configured implements Tool { } private class RollCommand extends Command { - public static final String USAGE = "roll [--provider ] [--help]"; + public static final String USAGE = "roll [-provider ] [-help]"; public static final String DESC = "The roll subcommand creates a new version for the specified key\n" + - "within the provider indicated using the --provider argument\n"; + "within the provider indicated using the -provider argument\n"; String keyName = null; @@ -326,13 +326,13 @@ public class KeyShell extends Configured implements Tool { provider = getKeyProvider(); if (provider == null) { out.println("There are no valid KeyProviders configured. The key\n" + - "has not been rolled. Use the --provider option to specify\n" + + "has not been rolled. Use the -provider option to specify\n" + "a provider."); rc = false; } if (keyName == null) { out.println("Please provide a .\n" + - "See the usage description by using --help."); + "See the usage description by using -help."); rc = false; } return rc; @@ -367,11 +367,11 @@ public class KeyShell extends Configured implements Tool { } private class DeleteCommand extends Command { - public static final String USAGE = "delete [--provider ] [--help]"; + public static final String USAGE = "delete [-provider ] [-help]"; public static final String DESC = "The delete subcommand deletes all versions of the key\n" + "specified by the argument from within the\n" + - "provider specified --provider."; + "provider specified -provider."; String keyName = null; boolean cont = true; @@ -385,12 +385,12 @@ public class KeyShell extends Configured implements Tool { provider = getKeyProvider(); if (provider == null) { out.println("There are no valid KeyProviders configured. Nothing\n" - + "was deleted. Use the --provider option to specify a provider."); + + "was deleted. Use the -provider option to specify a provider."); return false; } if (keyName == null) { out.println("There is no keyName specified. Please specify a " + - ". See the usage description with --help."); + ". See the usage description with -help."); return false; } if (interactive) { @@ -436,19 +436,19 @@ public class KeyShell extends Configured implements Tool { private class CreateCommand extends Command { public static final String USAGE = - "create [--cipher ] [--size ]\n" + - " [--description ]\n" + - " [--attr ]\n" + - " [--provider ] [--help]"; + "create [-cipher ] [-size ]\n" + + " [-description ]\n" + + " [-attr ]\n" + + " [-provider ] [-help]"; public static final String DESC = "The create subcommand creates a new key for the name specified\n" + "by the argument within the provider specified by the\n" + - "--provider argument. You may specify a cipher with the --cipher\n" + + "-provider argument. You may specify a cipher with the -cipher\n" + "argument. The default cipher is currently \"AES/CTR/NoPadding\".\n" + "The default keysize is 256. You may specify the requested key\n" + - "length using the --size argument. Arbitrary attribute=value\n" + - "style attributes may be specified using the --attr argument.\n" + - "--attr may be specified multiple times, once per attribute.\n"; + "length using the -size argument. Arbitrary attribute=value\n" + + "style attributes may be specified using the -attr argument.\n" + + "-attr may be specified multiple times, once per attribute.\n"; final String keyName; final Options options; @@ -463,13 +463,13 @@ public class KeyShell extends Configured implements Tool { provider = getKeyProvider(); if (provider == null) { out.println("There are no valid KeyProviders configured. No key\n" + - " was created. You can use the --provider option to specify\n" + + " was created. You can use the -provider option to specify\n" + " a provider to use."); rc = false; } if (keyName == null) { out.println("Please provide a . See the usage description" + - " with --help."); + " with -help."); rc = false; } return rc; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java index 06521a43591..c5624ee1c41 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java @@ -653,7 +653,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension { encryptedKeyVersion.getEncryptedKeyVersion().getVersionName() .equals(KeyProviderCryptoExtension.EEK), "encryptedKey version name must be '%s', is '%s'", - KeyProviderCryptoExtension.EK, + KeyProviderCryptoExtension.EEK, encryptedKeyVersion.getEncryptedKeyVersion().getVersionName() ); checkNotNull(encryptedKeyVersion.getEncryptedKeyVersion(), "encryptedKey"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index 5b456b1eff7..a9a19cdc29b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -43,6 +43,7 @@ import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.security.AccessControlException; @@ -803,6 +804,18 @@ public abstract class AbstractFileSystem { throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException; + /** + * The specification of this method matches that of + * {@link FileContext#access(Path, FsAction)} + * except that an UnresolvedLinkException may be thrown if a symlink is + * encountered in the path. + */ + @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"}) + public void access(Path path, FsAction mode) throws AccessControlException, + FileNotFoundException, UnresolvedLinkException, IOException { + FileSystem.checkAccessPermissions(this.getFileStatus(path), mode); + } + /** * The specification of this method matches that of * {@link FileContext#getFileLinkStatus(Path)} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index 808709859a9..c9c8fa8ffdd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -44,6 +44,7 @@ import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT; @@ -1108,6 +1109,55 @@ public final class FileContext { }.resolve(this, absF); } + /** + * Checks if the user can access a path. The mode specifies which access + * checks to perform. If the requested permissions are granted, then the + * method returns normally. If access is denied, then the method throws an + * {@link AccessControlException}. + *

+ * The default implementation of this method calls {@link #getFileStatus(Path)} + * and checks the returned permissions against the requested permissions. + * Note that the getFileStatus call will be subject to authorization checks. + * Typically, this requires search (execute) permissions on each directory in + * the path's prefix, but this is implementation-defined. Any file system + * that provides a richer authorization model (such as ACLs) may override the + * default implementation so that it checks against that model instead. + *

+ * In general, applications should avoid using this method, due to the risk of + * time-of-check/time-of-use race conditions. The permissions on a file may + * change immediately after the access call returns. Most applications should + * prefer running specific file system actions as the desired user represented + * by a {@link UserGroupInformation}. + * + * @param path Path to check + * @param mode type of access to check + * @throws AccessControlException if access is denied + * @throws FileNotFoundException if the path does not exist + * @throws UnsupportedFileSystemException if file system for path + * is not supported + * @throws IOException see specific implementation + * + * Exceptions applicable to file systems accessed over RPC: + * @throws RpcClientException If an exception occurred in the RPC client + * @throws RpcServerException If an exception occurred in the RPC server + * @throws UnexpectedServerException If server implementation throws + * undeclared exception to RPC server + */ + @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"}) + public void access(final Path path, final FsAction mode) + throws AccessControlException, FileNotFoundException, + UnsupportedFileSystemException, IOException { + final Path absPath = fixRelativePart(path); + new FSLinkResolver() { + @Override + public Void next(AbstractFileSystem fs, Path p) throws IOException, + UnresolvedLinkException { + fs.access(p, mode); + return null; + } + }.resolve(this, absPath); + } + /** * Return a file status object that represents the path. If the path * refers to a symlink then the FileStatus of the symlink is returned. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 1eb54d16a9b..1d2270b37ed 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -25,6 +25,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; +import java.util.Arrays; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; @@ -50,6 +51,7 @@ import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.io.Text; @@ -2072,6 +2074,71 @@ public abstract class FileSystem extends Configured implements Closeable { */ public abstract FileStatus getFileStatus(Path f) throws IOException; + /** + * Checks if the user can access a path. The mode specifies which access + * checks to perform. If the requested permissions are granted, then the + * method returns normally. If access is denied, then the method throws an + * {@link AccessControlException}. + *

+ * The default implementation of this method calls {@link #getFileStatus(Path)} + * and checks the returned permissions against the requested permissions. + * Note that the getFileStatus call will be subject to authorization checks. + * Typically, this requires search (execute) permissions on each directory in + * the path's prefix, but this is implementation-defined. Any file system + * that provides a richer authorization model (such as ACLs) may override the + * default implementation so that it checks against that model instead. + *

+ * In general, applications should avoid using this method, due to the risk of + * time-of-check/time-of-use race conditions. The permissions on a file may + * change immediately after the access call returns. Most applications should + * prefer running specific file system actions as the desired user represented + * by a {@link UserGroupInformation}. + * + * @param path Path to check + * @param mode type of access to check + * @throws AccessControlException if access is denied + * @throws FileNotFoundException if the path does not exist + * @throws IOException see specific implementation + */ + @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"}) + public void access(Path path, FsAction mode) throws AccessControlException, + FileNotFoundException, IOException { + checkAccessPermissions(this.getFileStatus(path), mode); + } + + /** + * This method provides the default implementation of + * {@link #access(Path, FsAction)}. + * + * @param stat FileStatus to check + * @param mode type of access to check + * @throws IOException for any error + */ + @InterfaceAudience.Private + static void checkAccessPermissions(FileStatus stat, FsAction mode) + throws IOException { + FsPermission perm = stat.getPermission(); + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + String user = ugi.getShortUserName(); + List groups = Arrays.asList(ugi.getGroupNames()); + if (user.equals(stat.getOwner())) { + if (perm.getUserAction().implies(mode)) { + return; + } + } else if (groups.contains(stat.getGroup())) { + if (perm.getGroupAction().implies(mode)) { + return; + } + } else { + if (perm.getOtherAction().implies(mode)) { + return; + } + } + throw new AccessControlException(String.format( + "Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(), + stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm)); + } + /** * See {@link FileContext#fixRelativePart} */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java index 139e1430f8b..52706f4049a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java @@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.security.AccessControlException; @@ -397,6 +398,12 @@ public class FilterFileSystem extends FileSystem { return fs.getFileStatus(f); } + @Override + public void access(Path path, FsAction mode) throws AccessControlException, + FileNotFoundException, IOException { + fs.access(path, mode); + } + public void createSymlink(final Path target, final Path link, final boolean createParent) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java index 6ffe9214b37..b6e1d96e038 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java @@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.security.AccessControlException; @@ -119,6 +120,13 @@ public abstract class FilterFs extends AbstractFileSystem { return myFs.getFileStatus(f); } + @Override + public void access(Path path, FsAction mode) throws AccessControlException, + FileNotFoundException, UnresolvedLinkException, IOException { + checkPath(path); + myFs.access(path, mode); + } + @Override public FileStatus getFileLinkStatus(final Path f) throws IOException, UnresolvedLinkException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java index fcb0690d8d4..6798fbee438 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java @@ -118,7 +118,11 @@ class Delete { } catch(FileNotFoundException fnfe) { throw fnfe; } catch (IOException ioe) { - throw new IOException(ioe.getMessage() + ". Consider using -skipTrash option", ioe); + String msg = ioe.getMessage(); + if (ioe.getCause() != null) { + msg += ": " + ioe.getCause().getMessage(); + } + throw new IOException(msg + ". Consider using -skipTrash option", ioe); } } return success; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java index 4480da20f39..9650a374d18 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java @@ -41,7 +41,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.Progressable; /** @@ -222,6 +224,12 @@ class ChRootedFileSystem extends FilterFileSystem { return super.getFileStatus(fullPath(f)); } + @Override + public void access(Path path, FsAction mode) throws AccessControlException, + FileNotFoundException, IOException { + super.access(fullPath(path), mode); + } + @Override public FsStatus getStatus(Path p) throws IOException { return super.getStatus(fullPath(p)); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java index 5d53eb79d0a..9569e1089bb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java @@ -41,7 +41,9 @@ import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Progressable; @@ -200,6 +202,11 @@ class ChRootedFs extends AbstractFileSystem { return myFs.getFileStatus(fullPath(f)); } + public void access(Path path, FsAction mode) throws AccessControlException, + FileNotFoundException, UnresolvedLinkException, IOException { + myFs.access(fullPath(path), mode); + } + @Override public FileStatus getFileLinkStatus(final Path f) throws IOException, UnresolvedLinkException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index b4ac18eb1af..963289f4373 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -51,6 +51,7 @@ import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclUtil; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.viewfs.InodeTree.INode; import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink; @@ -359,7 +360,14 @@ public class ViewFileSystem extends FileSystem { return new ViewFsFileStatus(status, this.makeQualified(f)); } - + @Override + public void access(Path path, FsAction mode) throws AccessControlException, + FileNotFoundException, IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + res.targetFileSystem.access(res.remainingPath, mode); + } + @Override public FileStatus[] listStatus(final Path f) throws AccessControlException, FileNotFoundException, IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index 5cdccd29975..014f4881275 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -54,6 +54,7 @@ import org.apache.hadoop.fs.local.LocalConfigKeys; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclUtil; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.viewfs.InodeTree.INode; import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink; @@ -352,6 +353,14 @@ public class ViewFs extends AbstractFileSystem { return new ViewFsFileStatus(status, this.makeQualified(f)); } + @Override + public void access(Path path, FsAction mode) throws AccessControlException, + FileNotFoundException, UnresolvedLinkException, IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + res.targetFileSystem.access(res.remainingPath, mode); + } + @Override public FileStatus getFileLinkStatus(final Path f) throws AccessControlException, FileNotFoundException, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java index 976a93f91b1..fafa29543e0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java @@ -33,6 +33,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.HardLink; import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException; import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.Shell; @@ -823,6 +824,14 @@ public class NativeIO { } } + public static void link(File src, File dst) throws IOException { + if (!nativeLoaded) { + HardLink.createHardLink(src, dst); + } else { + link0(src.getAbsolutePath(), dst.getAbsolutePath()); + } + } + /** * A version of renameTo that throws a descriptive exception when it fails. * @@ -833,4 +842,7 @@ public class NativeIO { */ private static native void renameTo0(String src, String dst) throws NativeIOException; + + private static native void link0(String src, String dst) + throws NativeIOException; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java index b71fbda6301..b5bf26fa084 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java @@ -77,7 +77,8 @@ public class SecurityUtil { * For use only by tests and initialization */ @InterfaceAudience.Private - static void setTokenServiceUseIp(boolean flag) { + @VisibleForTesting + public static void setTokenServiceUseIp(boolean flag) { useIpForTokenService = flag; hostResolver = !useIpForTokenService ? new QualifiedHostResolver() diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java index bb35ce51d48..6d9c6af2631 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java @@ -67,11 +67,11 @@ public class CredentialShell extends Configured implements Tool { if (command.validate()) { command.execute(); } else { - exitCode = -1; + exitCode = 1; } } catch (Exception e) { e.printStackTrace(err); - return -1; + return 1; } return exitCode; } @@ -79,47 +79,54 @@ public class CredentialShell extends Configured implements Tool { /** * Parse the command line arguments and initialize the data *

-   * % hadoop alias create alias [--provider providerPath]
-   * % hadoop alias list [-provider providerPath]
-   * % hadoop alias delete alias [--provider providerPath] [-i]
+   * % hadoop credential create alias [-provider providerPath]
+   * % hadoop credential list [-provider providerPath]
+   * % hadoop credential delete alias [-provider providerPath] [-i]
    * 
* @param args - * @return + * @return 0 if the argument(s) were recognized, 1 otherwise * @throws IOException */ - private int init(String[] args) throws IOException { + protected int init(String[] args) throws IOException { + // no args should print the help message + if (0 == args.length) { + printCredShellUsage(); + ToolRunner.printGenericCommandUsage(System.err); + return 1; + } + for (int i = 0; i < args.length; i++) { // parse command line if (args[i].equals("create")) { String alias = args[++i]; command = new CreateCommand(alias); - if (alias.equals("--help")) { + if (alias.equals("-help")) { printCredShellUsage(); - return -1; + return 0; } } else if (args[i].equals("delete")) { String alias = args[++i]; command = new DeleteCommand(alias); - if (alias.equals("--help")) { + if (alias.equals("-help")) { printCredShellUsage(); - return -1; + return 0; } } else if (args[i].equals("list")) { command = new ListCommand(); - } else if (args[i].equals("--provider")) { + } else if (args[i].equals("-provider")) { userSuppliedProvider = true; getConf().set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, args[++i]); - } else if (args[i].equals("-i") || (args[i].equals("--interactive"))) { + } else if (args[i].equals("-i") || (args[i].equals("-interactive"))) { interactive = true; - } else if (args[i].equals("-v") || (args[i].equals("--value"))) { + } else if (args[i].equals("-v") || (args[i].equals("-value"))) { value = args[++i]; - } else if (args[i].equals("--help")) { + } else if (args[i].equals("-help")) { printCredShellUsage(); - return -1; + return 0; } else { printCredShellUsage(); ToolRunner.printGenericCommandUsage(System.err); - return -1; + return 1; } } return 0; @@ -188,20 +195,20 @@ public class CredentialShell extends Configured implements Tool { } private class ListCommand extends Command { - public static final String USAGE = "list [--provider] [--help]"; + public static final String USAGE = "list [-provider] [-help]"; public static final String DESC = "The list subcommand displays the aliases contained within \n" + "a particular provider - as configured in core-site.xml or " + - "indicated\nthrough the --provider argument."; + "indicated\nthrough the -provider argument."; public boolean validate() { boolean rc = true; provider = getCredentialProvider(); if (provider == null) { out.println("There are no non-transient CredentialProviders configured.\n" - + "Consider using the --provider option to indicate the provider\n" + + "Consider using the -provider option to indicate the provider\n" + "to use. If you want to list a transient provider then you\n" - + "you MUST use the --provider argument."); + + "you MUST use the -provider argument."); rc = false; } return rc; @@ -229,11 +236,11 @@ public class CredentialShell extends Configured implements Tool { } private class DeleteCommand extends Command { - public static final String USAGE = "delete [--provider] [--help]"; + public static final String USAGE = "delete [-provider] [-help]"; public static final String DESC = "The delete subcommand deletes the credenital\n" + "specified as the argument from within the provider\n" + - "indicated through the --provider argument"; + "indicated through the -provider argument"; String alias = null; boolean cont = true; @@ -248,13 +255,13 @@ public class CredentialShell extends Configured implements Tool { if (provider == null) { out.println("There are no valid CredentialProviders configured.\n" + "Nothing will be deleted.\n" - + "Consider using the --provider option to indicate the provider" + + "Consider using the -provider option to indicate the provider" + " to use."); return false; } if (alias == null) { out.println("There is no alias specified. Please provide the" + - "mandatory . See the usage description with --help."); + "mandatory . See the usage description with -help."); return false; } if (interactive) { @@ -299,11 +306,11 @@ public class CredentialShell extends Configured implements Tool { } private class CreateCommand extends Command { - public static final String USAGE = "create [--provider] [--help]"; + public static final String USAGE = "create [-provider] [-help]"; public static final String DESC = "The create subcommand creates a new credential for the name specified\n" + "as the argument within the provider indicated through\n" + - "the --provider argument."; + "the -provider argument."; String alias = null; @@ -317,13 +324,13 @@ public class CredentialShell extends Configured implements Tool { if (provider == null) { out.println("There are no valid CredentialProviders configured." + "\nCredential will not be created.\n" - + "Consider using the --provider option to indicate the provider" + + + "Consider using the -provider option to indicate the provider" + " to use."); rc = false; } if (alias == null) { out.println("There is no alias specified. Please provide the" + - "mandatory . See the usage description with --help."); + "mandatory . See the usage description with -help."); rc = false; } return rc; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Classpath.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Classpath.java new file mode 100644 index 00000000000..0d3df2d0ce0 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Classpath.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.shell.CommandFormat; +import org.apache.hadoop.fs.shell.CommandFormat.UnknownOptionException; + +/** + * Command-line utility for getting the full classpath needed to launch a Hadoop + * client application. If the hadoop script is called with "classpath" as the + * command, then it simply prints the classpath and exits immediately without + * launching a JVM. The output likely will include wildcards in the classpath. + * If there are arguments passed to the classpath command, then this class gets + * called. With the --glob argument, it prints the full classpath with wildcards + * expanded. This is useful in situations where wildcard syntax isn't usable. + * With the --jar argument, it writes the classpath as a manifest in a jar file. + * This is useful in environments with short limitations on the maximum command + * line length, where it may not be possible to specify the full classpath in a + * command. For example, the maximum command line length on Windows is 8191 + * characters. + */ +@InterfaceAudience.Private +public final class Classpath { + private static final String usage = + "classpath [--glob|--jar |-h|--help] :\n" + + " Prints the classpath needed to get the Hadoop jar and the required\n" + + " libraries.\n" + + " Options:\n" + + "\n" + + " --glob expand wildcards\n" + + " --jar write classpath as manifest in jar named \n" + + " -h, --help print help\n"; + + /** + * Main entry point. + * + * @param args command-line arguments + */ + public static void main(String[] args) { + if (args.length < 1 || args[0].equals("-h") || args[0].equals("--help")) { + System.out.println(usage); + return; + } + + // Copy args, because CommandFormat mutates the list. + List argsList = new ArrayList(Arrays.asList(args)); + CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "-glob", "-jar"); + try { + cf.parse(argsList); + } catch (UnknownOptionException e) { + terminate(1, "unrecognized option"); + return; + } + + String classPath = System.getProperty("java.class.path"); + + if (cf.getOpt("-glob")) { + // The classpath returned from the property has been globbed already. + System.out.println(classPath); + } else if (cf.getOpt("-jar")) { + if (argsList.isEmpty() || argsList.get(0) == null || + argsList.get(0).isEmpty()) { + terminate(1, "-jar option requires path of jar file to write"); + return; + } + + // Write the classpath into the manifest of a temporary jar file. + Path workingDir = new Path(System.getProperty("user.dir")); + final String tmpJarPath; + try { + tmpJarPath = FileUtil.createJarWithClassPath(classPath, workingDir, + System.getenv()); + } catch (IOException e) { + terminate(1, "I/O error creating jar: " + e.getMessage()); + return; + } + + // Rename the temporary file to its final location. + String jarPath = argsList.get(0); + try { + FileUtil.replaceFile(new File(tmpJarPath), new File(jarPath)); + } catch (IOException e) { + terminate(1, "I/O error renaming jar temporary file to path: " + + e.getMessage()); + return; + } + } + } + + /** + * Prints a message to stderr and exits with a status code. + * + * @param status exit code + * @param msg message + */ + private static void terminate(int status, String msg) { + System.err.println(msg); + ExitUtil.terminate(status, msg); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java index 72a4d1b70e9..f2ee446b4ab 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java @@ -78,6 +78,20 @@ public class DiskChecker { (mkdirsWithExistsCheck(new File(parent)) && (canonDir.mkdir() || canonDir.exists())); } + + /** + * Recurse down a directory tree, checking all child directories. + * @param dir + * @throws DiskErrorException + */ + public static void checkDirs(File dir) throws DiskErrorException { + checkDir(dir); + for (File child : dir.listFiles()) { + if (child.isDirectory()) { + checkDirs(child); + } + } + } /** * Create the directory if it doesn't exist and check that dir is readable, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java index e7f983ac668..4e2783df88d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java @@ -27,6 +27,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; @@ -377,6 +378,19 @@ public class StringUtils { return str.trim().split("\\s*,\\s*"); } + /** + * Trims all the strings in a Collection and returns a Set. + * @param strings + * @return + */ + public static Set getTrimmedStrings(Collection strings) { + Set trimmedStrings = new HashSet(); + for (String string: strings) { + trimmedStrings.add(string.trim()); + } + return trimmedStrings; + } + final public static String[] emptyStringArray = {}; final public static char COMMA = ','; final public static String COMMA_STR = ","; diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c index 95bb987602f..23513bfd667 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c @@ -1054,6 +1054,43 @@ done: #endif } +JNIEXPORT void JNICALL +Java_org_apache_hadoop_io_nativeio_NativeIO_link0(JNIEnv *env, +jclass clazz, jstring jsrc, jstring jdst) +{ +#ifdef UNIX + const char *src = NULL, *dst = NULL; + + src = (*env)->GetStringUTFChars(env, jsrc, NULL); + if (!src) goto done; // exception was thrown + dst = (*env)->GetStringUTFChars(env, jdst, NULL); + if (!dst) goto done; // exception was thrown + if (link(src, dst)) { + throw_ioe(env, errno); + } + +done: + if (src) (*env)->ReleaseStringUTFChars(env, jsrc, src); + if (dst) (*env)->ReleaseStringUTFChars(env, jdst, dst); +#endif + +#ifdef WINDOWS + LPCTSTR src = NULL, dst = NULL; + + src = (LPCTSTR) (*env)->GetStringChars(env, jsrc, NULL); + if (!src) goto done; // exception was thrown + dst = (LPCTSTR) (*env)->GetStringChars(env, jdst, NULL); + if (!dst) goto done; // exception was thrown + if (!CreateHardLink(dst, src, NULL)) { + throw_ioe(env, GetLastError()); + } + +done: + if (src) (*env)->ReleaseStringChars(env, jsrc, src); + if (dst) (*env)->ReleaseStringChars(env, jdst, dst); +#endif +} + JNIEXPORT jlong JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_getMemlockLimit0( JNIEnv *env, jclass clazz) diff --git a/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm index 149c2202506..01ed6bcf39f 100644 --- a/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm +++ b/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm @@ -296,9 +296,24 @@ User Commands * <<>> Prints the class path needed to get the Hadoop jar and the required - libraries. + libraries. If called without arguments, then prints the classpath set up by + the command scripts, which is likely to contain wildcards in the classpath + entries. Additional options print the classpath after wildcard expansion or + write the classpath into the manifest of a jar file. The latter is useful in + environments where wildcards cannot be used and the expanded classpath exceeds + the maximum supported command line length. - Usage: <<>> + Usage: <<|-h|--help]>>> + +*-----------------+-----------------------------------------------------------+ +|| COMMAND_OPTION || Description +*-----------------+-----------------------------------------------------------+ +| --glob | expand wildcards +*-----------------+-----------------------------------------------------------+ +| --jar | write classpath as manifest in jar named +*-----------------+-----------------------------------------------------------+ +| -h, --help | print help +*-----------------+-----------------------------------------------------------+ Administration Commands diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java index a0da98b4fc6..70ec6feaf10 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java @@ -26,10 +26,10 @@ import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.junit.BeforeClass; import org.junit.Test; - import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; @@ -118,8 +118,15 @@ public class TestKeyProviderCryptoExtension { new IvParameterSpec(KeyProviderCryptoExtension.EncryptedKeyVersion .deriveIV(encryptedKeyIv))); final byte[] manualMaterial = cipher.doFinal(encryptedKeyMaterial); + + // Test the createForDecryption factory method + EncryptedKeyVersion eek2 = + EncryptedKeyVersion.createForDecryption( + eek.getEncryptionKeyVersionName(), eek.getEncryptedKeyIv(), + eek.getEncryptedKeyVersion().getMaterial()); + // Decrypt it with the API - KeyVersion decryptedKey = kpExt.decryptEncryptedKey(eek); + KeyVersion decryptedKey = kpExt.decryptEncryptedKey(eek2); final byte[] apiMaterial = decryptedKey.getMaterial(); assertArrayEquals("Wrong key material from decryptEncryptedKey", diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java index 154579b567d..5981a2a6a38 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java @@ -73,7 +73,7 @@ public class TestKeyShell { private void deleteKey(KeyShell ks, String keyName) throws Exception { int rc; outContent.reset(); - final String[] delArgs = {"delete", keyName, "--provider", jceksProvider}; + final String[] delArgs = {"delete", keyName, "-provider", jceksProvider}; rc = ks.run(delArgs); assertEquals(0, rc); assertTrue(outContent.toString().contains(keyName + " has been " + @@ -90,8 +90,8 @@ public class TestKeyShell { private String listKeys(KeyShell ks, boolean wantMetadata) throws Exception { int rc; outContent.reset(); - final String[] listArgs = {"list", "--provider", jceksProvider }; - final String[] listArgsM = {"list", "--metadata", "--provider", jceksProvider }; + final String[] listArgs = {"list", "-provider", jceksProvider }; + final String[] listArgsM = {"list", "-metadata", "-provider", jceksProvider }; rc = ks.run(wantMetadata ? listArgsM : listArgs); assertEquals(0, rc); return outContent.toString(); @@ -106,7 +106,7 @@ public class TestKeyShell { ks.setConf(new Configuration()); outContent.reset(); - final String[] args1 = {"create", keyName, "--provider", jceksProvider}; + final String[] args1 = {"create", keyName, "-provider", jceksProvider}; rc = ks.run(args1); assertEquals(0, rc); assertTrue(outContent.toString().contains(keyName + " has been " + @@ -121,7 +121,7 @@ public class TestKeyShell { assertTrue(listOut.contains("created")); outContent.reset(); - final String[] args2 = {"roll", keyName, "--provider", jceksProvider}; + final String[] args2 = {"roll", keyName, "-provider", jceksProvider}; rc = ks.run(args2); assertEquals(0, rc); assertTrue(outContent.toString().contains("key1 has been successfully " + @@ -137,8 +137,8 @@ public class TestKeyShell { @Test public void testKeySuccessfulCreationWithDescription() throws Exception { outContent.reset(); - final String[] args1 = {"create", "key1", "--provider", jceksProvider, - "--description", "someDescription"}; + final String[] args1 = {"create", "key1", "-provider", jceksProvider, + "-description", "someDescription"}; int rc = 0; KeyShell ks = new KeyShell(); ks.setConf(new Configuration()); @@ -154,7 +154,7 @@ public class TestKeyShell { @Test public void testInvalidKeySize() throws Exception { - final String[] args1 = {"create", "key1", "--size", "56", "--provider", + final String[] args1 = {"create", "key1", "-size", "56", "-provider", jceksProvider}; int rc = 0; @@ -167,7 +167,7 @@ public class TestKeyShell { @Test public void testInvalidCipher() throws Exception { - final String[] args1 = {"create", "key1", "--cipher", "LJM", "--provider", + final String[] args1 = {"create", "key1", "-cipher", "LJM", "-provider", jceksProvider}; int rc = 0; @@ -180,7 +180,7 @@ public class TestKeyShell { @Test public void testInvalidProvider() throws Exception { - final String[] args1 = {"create", "key1", "--cipher", "AES", "--provider", + final String[] args1 = {"create", "key1", "-cipher", "AES", "-provider", "sdff://file/tmp/keystore.jceks"}; int rc = 0; @@ -194,7 +194,7 @@ public class TestKeyShell { @Test public void testTransientProviderWarning() throws Exception { - final String[] args1 = {"create", "key1", "--cipher", "AES", "--provider", + final String[] args1 = {"create", "key1", "-cipher", "AES", "-provider", "user:///"}; int rc = 0; @@ -224,8 +224,8 @@ public class TestKeyShell { @Test public void testFullCipher() throws Exception { final String keyName = "key1"; - final String[] args1 = {"create", keyName, "--cipher", "AES/CBC/pkcs5Padding", - "--provider", jceksProvider}; + final String[] args1 = {"create", keyName, "-cipher", "AES/CBC/pkcs5Padding", + "-provider", jceksProvider}; int rc = 0; KeyShell ks = new KeyShell(); @@ -245,8 +245,8 @@ public class TestKeyShell { ks.setConf(new Configuration()); /* Simple creation test */ - final String[] args1 = {"create", "keyattr1", "--provider", jceksProvider, - "--attr", "foo=bar"}; + final String[] args1 = {"create", "keyattr1", "-provider", jceksProvider, + "-attr", "foo=bar"}; rc = ks.run(args1); assertEquals(0, rc); assertTrue(outContent.toString().contains("keyattr1 has been " + @@ -259,8 +259,8 @@ public class TestKeyShell { /* Negative tests: no attribute */ outContent.reset(); - final String[] args2 = {"create", "keyattr2", "--provider", jceksProvider, - "--attr", "=bar"}; + final String[] args2 = {"create", "keyattr2", "-provider", jceksProvider, + "-attr", "=bar"}; rc = ks.run(args2); assertEquals(1, rc); @@ -288,10 +288,10 @@ public class TestKeyShell { /* Test several attrs together... */ outContent.reset(); - final String[] args3 = {"create", "keyattr3", "--provider", jceksProvider, - "--attr", "foo = bar", - "--attr", " glarch =baz ", - "--attr", "abc=def"}; + final String[] args3 = {"create", "keyattr3", "-provider", jceksProvider, + "-attr", "foo = bar", + "-attr", " glarch =baz ", + "-attr", "abc=def"}; rc = ks.run(args3); assertEquals(0, rc); @@ -304,9 +304,9 @@ public class TestKeyShell { /* Negative test - repeated attributes should fail */ outContent.reset(); - final String[] args4 = {"create", "keyattr4", "--provider", jceksProvider, - "--attr", "foo=bar", - "--attr", "foo=glarch"}; + final String[] args4 = {"create", "keyattr4", "-provider", jceksProvider, + "-attr", "foo=bar", + "-attr", "foo=glarch"}; rc = ks.run(args4); assertEquals(1, rc); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java index 24e712c051c..1e86439785b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java @@ -23,6 +23,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; @@ -201,6 +202,8 @@ public class TestHarFileSystem { public void removeXAttr(Path path, String name) throws IOException; public AclStatus getAclStatus(Path path) throws IOException; + + public void access(Path path, FsAction mode) throws IOException; } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java index c48b69f2149..b9f0dc937cb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java @@ -17,16 +17,18 @@ */ package org.apache.hadoop.security.alias; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.PrintStream; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.security.alias.CredentialShell.PasswordReader; import org.junit.Before; import org.junit.Test; @@ -45,7 +47,7 @@ public class TestCredShell { @Test public void testCredentialSuccessfulLifecycle() throws Exception { outContent.reset(); - String[] args1 = {"create", "credential1", "--value", "p@ssw0rd", "--provider", + String[] args1 = {"create", "credential1", "-value", "p@ssw0rd", "-provider", "jceks://file" + tmpDir + "/credstore.jceks"}; int rc = 0; CredentialShell cs = new CredentialShell(); @@ -56,14 +58,14 @@ public class TestCredShell { "created.")); outContent.reset(); - String[] args2 = {"list", "--provider", + String[] args2 = {"list", "-provider", "jceks://file" + tmpDir + "/credstore.jceks"}; rc = cs.run(args2); assertEquals(0, rc); assertTrue(outContent.toString().contains("credential1")); outContent.reset(); - String[] args4 = {"delete", "credential1", "--provider", + String[] args4 = {"delete", "credential1", "-provider", "jceks://file" + tmpDir + "/credstore.jceks"}; rc = cs.run(args4); assertEquals(0, rc); @@ -71,7 +73,7 @@ public class TestCredShell { "deleted.")); outContent.reset(); - String[] args5 = {"list", "--provider", + String[] args5 = {"list", "-provider", "jceks://file" + tmpDir + "/credstore.jceks"}; rc = cs.run(args5); assertEquals(0, rc); @@ -80,21 +82,21 @@ public class TestCredShell { @Test public void testInvalidProvider() throws Exception { - String[] args1 = {"create", "credential1", "--value", "p@ssw0rd", "--provider", + String[] args1 = {"create", "credential1", "-value", "p@ssw0rd", "-provider", "sdff://file/tmp/credstore.jceks"}; int rc = 0; CredentialShell cs = new CredentialShell(); cs.setConf(new Configuration()); rc = cs.run(args1); - assertEquals(-1, rc); + assertEquals(1, rc); assertTrue(outContent.toString().contains("There are no valid " + "CredentialProviders configured.")); } @Test public void testTransientProviderWarning() throws Exception { - String[] args1 = {"create", "credential1", "--value", "p@ssw0rd", "--provider", + String[] args1 = {"create", "credential1", "-value", "p@ssw0rd", "-provider", "user:///"}; int rc = 0; @@ -105,7 +107,7 @@ public class TestCredShell { assertTrue(outContent.toString().contains("WARNING: you are modifying a " + "transient provider.")); - String[] args2 = {"delete", "credential1", "--provider", "user:///"}; + String[] args2 = {"delete", "credential1", "-provider", "user:///"}; rc = cs.run(args2); assertEquals(outContent.toString(), 0, rc); assertTrue(outContent.toString().contains("credential1 has been successfully " + @@ -122,14 +124,14 @@ public class TestCredShell { config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, "user:///"); cs.setConf(config); rc = cs.run(args1); - assertEquals(-1, rc); + assertEquals(1, rc); assertTrue(outContent.toString().contains("There are no valid " + "CredentialProviders configured.")); } @Test public void testPromptForCredentialWithEmptyPasswd() throws Exception { - String[] args1 = {"create", "credential1", "--provider", + String[] args1 = {"create", "credential1", "-provider", "jceks://file" + tmpDir + "/credstore.jceks"}; ArrayList passwords = new ArrayList(); passwords.add(null); @@ -139,13 +141,13 @@ public class TestCredShell { shell.setConf(new Configuration()); shell.setPasswordReader(new MockPasswordReader(passwords)); rc = shell.run(args1); - assertEquals(outContent.toString(), -1, rc); + assertEquals(outContent.toString(), 1, rc); assertTrue(outContent.toString().contains("Passwords don't match")); } @Test public void testPromptForCredential() throws Exception { - String[] args1 = {"create", "credential1", "--provider", + String[] args1 = {"create", "credential1", "-provider", "jceks://file" + tmpDir + "/credstore.jceks"}; ArrayList passwords = new ArrayList(); passwords.add("p@ssw0rd"); @@ -159,7 +161,7 @@ public class TestCredShell { assertTrue(outContent.toString().contains("credential1 has been successfully " + "created.")); - String[] args2 = {"delete", "credential1", "--provider", + String[] args2 = {"delete", "credential1", "-provider", "jceks://file" + tmpDir + "/credstore.jceks"}; rc = shell.run(args2); assertEquals(0, rc); @@ -186,4 +188,21 @@ public class TestCredShell { System.out.println(message); } } + + @Test + public void testEmptyArgList() throws Exception { + CredentialShell shell = new CredentialShell(); + shell.setConf(new Configuration()); + assertEquals(1, shell.init(new String[0])); + } + + @Test + public void testCommandHelpExitsNormally() throws Exception { + for (String cmd : Arrays.asList("create", "list", "delete")) { + CredentialShell shell = new CredentialShell(); + shell.setConf(new Configuration()); + assertEquals("Expected help argument on " + cmd + " to return 0", + 0, shell.init(new String[] {cmd, "-help"})); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClasspath.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClasspath.java new file mode 100644 index 00000000000..9ffde9030e2 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClasspath.java @@ -0,0 +1,176 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.util; + +import static org.junit.Assert.*; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.PrintStream; +import java.nio.charset.Charset; +import java.util.jar.Attributes; +import java.util.jar.JarFile; +import java.util.jar.Manifest; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.io.IOUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * Tests covering the classpath command-line utility. + */ +public class TestClasspath { + + private static final Log LOG = LogFactory.getLog(TestClasspath.class); + private static final File TEST_DIR = new File( + System.getProperty("test.build.data", "/tmp"), "TestClasspath"); + private static final Charset UTF8 = Charset.forName("UTF-8"); + + static { + ExitUtil.disableSystemExit(); + } + + private PrintStream oldStdout, oldStderr; + private ByteArrayOutputStream stdout, stderr; + private PrintStream printStdout, printStderr; + + @Before + public void setUp() { + assertTrue(FileUtil.fullyDelete(TEST_DIR)); + assertTrue(TEST_DIR.mkdirs()); + oldStdout = System.out; + oldStderr = System.err; + + stdout = new ByteArrayOutputStream(); + printStdout = new PrintStream(stdout); + System.setOut(printStdout); + + stderr = new ByteArrayOutputStream(); + printStderr = new PrintStream(stderr); + System.setErr(printStderr); + } + + @After + public void tearDown() { + System.setOut(oldStdout); + System.setErr(oldStderr); + IOUtils.cleanup(LOG, printStdout, printStderr); + assertTrue(FileUtil.fullyDelete(TEST_DIR)); + } + + @Test + public void testGlob() { + Classpath.main(new String[] { "--glob" }); + String strOut = new String(stdout.toByteArray(), UTF8); + assertEquals(System.getProperty("java.class.path"), strOut.trim()); + assertTrue(stderr.toByteArray().length == 0); + } + + @Test + public void testJar() throws IOException { + File file = new File(TEST_DIR, "classpath.jar"); + Classpath.main(new String[] { "--jar", file.getAbsolutePath() }); + assertTrue(stdout.toByteArray().length == 0); + assertTrue(stderr.toByteArray().length == 0); + assertTrue(file.exists()); + assertJar(file); + } + + @Test + public void testJarReplace() throws IOException { + // Run the command twice with the same output jar file, and expect success. + testJar(); + testJar(); + } + + @Test + public void testJarFileMissing() throws IOException { + try { + Classpath.main(new String[] { "--jar" }); + fail("expected exit"); + } catch (ExitUtil.ExitException e) { + assertTrue(stdout.toByteArray().length == 0); + String strErr = new String(stderr.toByteArray(), UTF8); + assertTrue(strErr.contains("requires path of jar")); + } + } + + @Test + public void testHelp() { + Classpath.main(new String[] { "--help" }); + String strOut = new String(stdout.toByteArray(), UTF8); + assertTrue(strOut.contains("Prints the classpath")); + assertTrue(stderr.toByteArray().length == 0); + } + + @Test + public void testHelpShort() { + Classpath.main(new String[] { "-h" }); + String strOut = new String(stdout.toByteArray(), UTF8); + assertTrue(strOut.contains("Prints the classpath")); + assertTrue(stderr.toByteArray().length == 0); + } + + @Test + public void testUnrecognized() { + try { + Classpath.main(new String[] { "--notarealoption" }); + fail("expected exit"); + } catch (ExitUtil.ExitException e) { + assertTrue(stdout.toByteArray().length == 0); + String strErr = new String(stderr.toByteArray(), UTF8); + assertTrue(strErr.contains("unrecognized option")); + } + } + + /** + * Asserts that the specified file is a jar file with a manifest containing a + * non-empty classpath attribute. + * + * @param file File to check + * @throws IOException if there is an I/O error + */ + private static void assertJar(File file) throws IOException { + JarFile jarFile = null; + try { + jarFile = new JarFile(file); + Manifest manifest = jarFile.getManifest(); + assertNotNull(manifest); + Attributes mainAttributes = manifest.getMainAttributes(); + assertNotNull(mainAttributes); + assertTrue(mainAttributes.containsKey(Attributes.Name.CLASS_PATH)); + String classPathAttr = mainAttributes.getValue(Attributes.Name.CLASS_PATH); + assertNotNull(classPathAttr); + assertFalse(classPathAttr.isEmpty()); + } finally { + // It's too bad JarFile doesn't implement Closeable. + if (jarFile != null) { + try { + jarFile.close(); + } catch (IOException e) { + LOG.warn("exception closing jarFile: " + jarFile, e); + } + } + } + } +} diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java index 2b663368737..9c4e7940929 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java @@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms.server; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.apache.hadoop.crypto.key.kms.KMSRESTConstants; @@ -27,7 +28,6 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; -import org.apache.hadoop.util.StringUtils; import javax.ws.rs.Consumes; import javax.ws.rs.DELETE; @@ -59,22 +59,25 @@ import java.util.Map; @Path(KMSRESTConstants.SERVICE_VERSION) @InterfaceAudience.Private public class KMS { - private static final String CREATE_KEY = "CREATE_KEY"; - private static final String DELETE_KEY = "DELETE_KEY"; - private static final String ROLL_NEW_VERSION = "ROLL_NEW_VERSION"; - private static final String GET_KEYS = "GET_KEYS"; - private static final String GET_KEYS_METADATA = "GET_KEYS_METADATA"; - private static final String GET_KEY_VERSION = "GET_KEY_VERSION"; - private static final String GET_CURRENT_KEY = "GET_CURRENT_KEY"; - private static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS"; - private static final String GET_METADATA = "GET_METADATA"; - private static final String GENERATE_EEK = "GENERATE_EEK"; - private static final String DECRYPT_EEK = "DECRYPT_EEK"; + public static final String CREATE_KEY = "CREATE_KEY"; + public static final String DELETE_KEY = "DELETE_KEY"; + public static final String ROLL_NEW_VERSION = "ROLL_NEW_VERSION"; + public static final String GET_KEYS = "GET_KEYS"; + public static final String GET_KEYS_METADATA = "GET_KEYS_METADATA"; + public static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS"; + public static final String GET_METADATA = "GET_METADATA"; + public static final String GET_KEY_VERSION = "GET_KEY_VERSION"; + public static final String GET_CURRENT_KEY = "GET_CURRENT_KEY"; + public static final String GENERATE_EEK = "GENERATE_EEK"; + public static final String DECRYPT_EEK = "DECRYPT_EEK"; + private KeyProviderCryptoExtension provider; + private KMSAudit kmsAudit; public KMS() throws Exception { provider = KMSWebApp.getKeyProvider(); + kmsAudit= KMSWebApp.getKMSAudit(); } private static Principal getPrincipal(SecurityContext securityContext) @@ -86,13 +89,26 @@ public class KMS { return user; } - private static void assertAccess(KMSACLs.Type aclType, Principal principal, + + private static final String UNAUTHORIZED_MSG_WITH_KEY = + "User:{0} not allowed to do ''{1}'' on ''{2}''"; + + private static final String UNAUTHORIZED_MSG_WITHOUT_KEY = + "User:{0} not allowed to do ''{1}''"; + + private void assertAccess(KMSACLs.Type aclType, Principal principal, + String operation) throws AccessControlException { + assertAccess(aclType, principal, operation, null); + } + + private void assertAccess(KMSACLs.Type aclType, Principal principal, String operation, String key) throws AccessControlException { if (!KMSWebApp.getACLs().hasAccess(aclType, principal.getName())) { KMSWebApp.getUnauthorizedCallsMeter().mark(); - KMSAudit.unauthorized(principal, operation, key); + kmsAudit.unauthorized(principal, operation, key); throw new AuthorizationException(MessageFormat.format( - "User:{0} not allowed to do ''{1}'' on ''{2}''", + (key != null) ? UNAUTHORIZED_MSG_WITH_KEY + : UNAUTHORIZED_MSG_WITHOUT_KEY, principal.getName(), operation, key)); } } @@ -149,7 +165,7 @@ public class KMS { provider.flush(); - KMSAudit.ok(user, CREATE_KEY, name, "UserProvidedMaterial:" + + kmsAudit.ok(user, CREATE_KEY, name, "UserProvidedMaterial:" + (material != null) + " Description:" + description); if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) { @@ -175,7 +191,7 @@ public class KMS { provider.deleteKey(name); provider.flush(); - KMSAudit.ok(user, DELETE_KEY, name, ""); + kmsAudit.ok(user, DELETE_KEY, name, ""); return Response.ok().build(); } @@ -203,7 +219,7 @@ public class KMS { provider.flush(); - KMSAudit.ok(user, ROLL_NEW_VERSION, name, "UserProvidedMaterial:" + + kmsAudit.ok(user, ROLL_NEW_VERSION, name, "UserProvidedMaterial:" + (material != null) + " NewVersion:" + keyVersion.getVersionName()); if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) { @@ -222,11 +238,10 @@ public class KMS { KMSWebApp.getAdminCallsMeter().mark(); Principal user = getPrincipal(securityContext); String[] keyNames = keyNamesList.toArray(new String[keyNamesList.size()]); - String names = StringUtils.arrayToString(keyNames); - assertAccess(KMSACLs.Type.GET_METADATA, user, GET_KEYS_METADATA, names); + assertAccess(KMSACLs.Type.GET_METADATA, user, GET_KEYS_METADATA); KeyProvider.Metadata[] keysMeta = provider.getKeysMetadata(keyNames); Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta); - KMSAudit.ok(user, GET_KEYS_METADATA, names, ""); + kmsAudit.ok(user, GET_KEYS_METADATA, ""); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); } @@ -237,9 +252,9 @@ public class KMS { throws Exception { KMSWebApp.getAdminCallsMeter().mark(); Principal user = getPrincipal(securityContext); - assertAccess(KMSACLs.Type.GET_KEYS, user, GET_KEYS, "*"); + assertAccess(KMSACLs.Type.GET_KEYS, user, GET_KEYS); Object json = provider.getKeys(); - KMSAudit.ok(user, GET_KEYS, "*", ""); + kmsAudit.ok(user, GET_KEYS, ""); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); } @@ -263,7 +278,7 @@ public class KMS { KMSWebApp.getAdminCallsMeter().mark(); assertAccess(KMSACLs.Type.GET_METADATA, user, GET_METADATA, name); Object json = KMSServerJSONUtils.toJSON(name, provider.getMetadata(name)); - KMSAudit.ok(user, GET_METADATA, name, ""); + kmsAudit.ok(user, GET_METADATA, name, ""); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); } @@ -279,7 +294,7 @@ public class KMS { KMSWebApp.getKeyCallsMeter().mark(); assertAccess(KMSACLs.Type.GET, user, GET_CURRENT_KEY, name); Object json = KMSServerJSONUtils.toJSON(provider.getCurrentKey(name)); - KMSAudit.ok(user, GET_CURRENT_KEY, name, ""); + kmsAudit.ok(user, GET_CURRENT_KEY, name, ""); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); } @@ -292,9 +307,12 @@ public class KMS { Principal user = getPrincipal(securityContext); KMSClientProvider.checkNotEmpty(versionName, "versionName"); KMSWebApp.getKeyCallsMeter().mark(); - assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSION, versionName); - Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersion(versionName)); - KMSAudit.ok(user, GET_KEY_VERSION, versionName, ""); + KeyVersion keyVersion = provider.getKeyVersion(versionName); + assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSION); + if (keyVersion != null) { + kmsAudit.ok(user, GET_KEY_VERSION, keyVersion.getName(), ""); + } + Object json = KMSServerJSONUtils.toJSON(keyVersion); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); } @@ -327,7 +345,7 @@ public class KMS { } catch (Exception e) { throw new IOException(e); } - KMSAudit.ok(user, GENERATE_EEK, name, ""); + kmsAudit.ok(user, GENERATE_EEK, name, ""); retJSON = new ArrayList(); for (EncryptedKeyVersion edek : retEdeks) { ((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek)); @@ -362,7 +380,7 @@ public class KMS { (String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD); Object retJSON; if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) { - assertAccess(KMSACLs.Type.DECRYPT_EEK, user, DECRYPT_EEK, versionName); + assertAccess(KMSACLs.Type.DECRYPT_EEK, user, DECRYPT_EEK, keyName); KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD); byte[] iv = Base64.decodeBase64(ivStr); KMSClientProvider.checkNotNull(encMaterialStr, @@ -373,7 +391,7 @@ public class KMS { new KMSClientProvider.KMSEncryptedKeyVersion(keyName, versionName, iv, KeyProviderCryptoExtension.EEK, encMaterial)); retJSON = KMSServerJSONUtils.toJSON(retKeyVersion); - KMSAudit.ok(user, DECRYPT_EEK, versionName, ""); + kmsAudit.ok(user, DECRYPT_EEK, keyName, ""); } else { throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP + " value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " + @@ -396,7 +414,7 @@ public class KMS { KMSWebApp.getKeyCallsMeter().mark(); assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSIONS, name); Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersions(name)); - KMSAudit.ok(user, GET_KEY_VERSIONS, name, ""); + kmsAudit.ok(user, GET_KEY_VERSIONS, name, ""); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); } diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java index e212d7d97a1..3d387eb354b 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java @@ -20,43 +20,202 @@ package org.apache.hadoop.crypto.key.kms.server; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.base.Joiner; +import com.google.common.base.Strings; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.RemovalListener; +import com.google.common.cache.RemovalNotification; +import com.google.common.collect.Sets; +import com.google.common.util.concurrent.ThreadFactoryBuilder; + import java.security.Principal; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; /** * Provides convenience methods for audit logging consistently the different * types of events. */ public class KMSAudit { + + private static class AuditEvent { + private final AtomicLong accessCount = new AtomicLong(-1); + private final String keyName; + private final String user; + private final String op; + private final String extraMsg; + private final long startTime = System.currentTimeMillis(); + + private AuditEvent(String keyName, String user, String op, String msg) { + this.keyName = keyName; + this.user = user; + this.op = op; + this.extraMsg = msg; + } + + public String getExtraMsg() { + return extraMsg; + } + + public AtomicLong getAccessCount() { + return accessCount; + } + + public String getKeyName() { + return keyName; + } + + public String getUser() { + return user; + } + + public String getOp() { + return op; + } + + public long getStartTime() { + return startTime; + } + } + + public static enum OpStatus { + OK, UNAUTHORIZED, UNAUTHENTICATED, ERROR; + } + + private static Set AGGREGATE_OPS_WHITELIST = Sets.newHashSet( + KMS.GET_KEY_VERSION, KMS.GET_CURRENT_KEY, KMS.DECRYPT_EEK, KMS.GENERATE_EEK + ); + + private Cache cache; + + private ScheduledExecutorService executor; + public static final String KMS_LOGGER_NAME = "kms-audit"; private static Logger AUDIT_LOG = LoggerFactory.getLogger(KMS_LOGGER_NAME); - private static void op(String status, String op, Principal user, String key, - String extraMsg) { - AUDIT_LOG.info("Status:{} User:{} Op:{} Name:{}{}", status, user.getName(), - op, key, extraMsg); + KMSAudit(long delay) { + cache = CacheBuilder.newBuilder() + .expireAfterWrite(delay, TimeUnit.MILLISECONDS) + .removalListener( + new RemovalListener() { + @Override + public void onRemoval( + RemovalNotification entry) { + AuditEvent event = entry.getValue(); + if (event.getAccessCount().get() > 0) { + KMSAudit.this.logEvent(event); + event.getAccessCount().set(0); + KMSAudit.this.cache.put(entry.getKey(), event); + } + } + }).build(); + executor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() + .setDaemon(true).setNameFormat(KMS_LOGGER_NAME + "_thread").build()); + executor.scheduleAtFixedRate(new Runnable() { + @Override + public void run() { + cache.cleanUp(); + } + }, delay / 10, delay / 10, TimeUnit.MILLISECONDS); } - public static void ok(Principal user, String op, String key, - String extraMsg) { - op("OK", op, user, key, extraMsg); - } - - public static void unauthorized(Principal user, String op, String key) { - op("UNAUTHORIZED", op, user, key, ""); - } - - public static void error(Principal user, String method, String url, - String extraMsg) { - AUDIT_LOG.info("Status:ERROR User:{} Method:{} URL:{} Exception:'{}'", - user.getName(), method, url, extraMsg); - } - - public static void unauthenticated(String remoteHost, String method, - String url, String extraMsg) { + private void logEvent(AuditEvent event) { AUDIT_LOG.info( - "Status:UNAUTHENTICATED RemoteHost:{} Method:{} URL:{} ErrorMsg:'{}'", - remoteHost, method, url, extraMsg); + "OK[op={}, key={}, user={}, accessCount={}, interval={}ms] {}", + event.getOp(), event.getKeyName(), event.getUser(), + event.getAccessCount().get(), + (System.currentTimeMillis() - event.getStartTime()), + event.getExtraMsg()); } + private void op(OpStatus opStatus, final String op, final String user, + final String key, final String extraMsg) { + if (!Strings.isNullOrEmpty(user) && !Strings.isNullOrEmpty(key) + && !Strings.isNullOrEmpty(op) + && AGGREGATE_OPS_WHITELIST.contains(op)) { + String cacheKey = createCacheKey(user, key, op); + if (opStatus == OpStatus.UNAUTHORIZED) { + cache.invalidate(cacheKey); + AUDIT_LOG.info("UNAUTHORIZED[op={}, key={}, user={}] {}", op, key, user, + extraMsg); + } else { + try { + AuditEvent event = cache.get(cacheKey, new Callable() { + @Override + public AuditEvent call() throws Exception { + return new AuditEvent(key, user, op, extraMsg); + } + }); + // Log first access (initialized as -1 so + // incrementAndGet() == 0 implies first access) + if (event.getAccessCount().incrementAndGet() == 0) { + event.getAccessCount().incrementAndGet(); + logEvent(event); + } + } catch (ExecutionException ex) { + throw new RuntimeException(ex); + } + } + } else { + List kvs = new LinkedList(); + if (!Strings.isNullOrEmpty(op)) { + kvs.add("op=" + op); + } + if (!Strings.isNullOrEmpty(key)) { + kvs.add("key=" + key); + } + if (!Strings.isNullOrEmpty(user)) { + kvs.add("user=" + user); + } + if (kvs.size() == 0) { + AUDIT_LOG.info("{} {}", opStatus.toString(), extraMsg); + } else { + String join = Joiner.on(", ").join(kvs); + AUDIT_LOG.info("{}[{}] {}", opStatus.toString(), join, extraMsg); + } + } + } + + public void ok(Principal user, String op, String key, + String extraMsg) { + op(OpStatus.OK, op, user.getName(), key, extraMsg); + } + + public void ok(Principal user, String op, String extraMsg) { + op(OpStatus.OK, op, user.getName(), null, extraMsg); + } + + public void unauthorized(Principal user, String op, String key) { + op(OpStatus.UNAUTHORIZED, op, user.getName(), key, ""); + } + + public void error(Principal user, String method, String url, + String extraMsg) { + op(OpStatus.ERROR, null, user.getName(), null, "Method:'" + method + + "' Exception:'" + extraMsg + "'"); + } + + public void unauthenticated(String remoteHost, String method, + String url, String extraMsg) { + op(OpStatus.UNAUTHENTICATED, null, null, null, "RemoteHost:" + + remoteHost + " Method:" + method + + " URL:" + url + " ErrorMsg:'" + extraMsg + "'"); + } + + private static String createCacheKey(String user, String key, String op) { + return user + "#" + key + "#" + op; + } + + public void shutdown() { + executor.shutdownNow(); + } } diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java index f1872a24e8b..db60b097ee7 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java @@ -115,8 +115,10 @@ public class KMSAuthenticationFilter extends AuthenticationFilter { if (queryString != null) { requestURL.append("?").append(queryString); } - KMSAudit.unauthenticated(request.getRemoteHost(), method, - requestURL.toString(), kmsResponse.msg); + + KMSWebApp.getKMSAudit().unauthenticated( + request.getRemoteHost(), method, requestURL.toString(), + kmsResponse.msg); } } diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java index e2b8fc4c093..30d742e7fe8 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java @@ -43,12 +43,17 @@ public class KMSConfiguration { // TImeout for the Current Key cache public static final String CURR_KEY_CACHE_TIMEOUT_KEY = CONFIG_PREFIX + "current.key.cache.timeout.ms"; - + // Delay for Audit logs that need aggregation + public static final String KMS_AUDIT_AGGREGATION_DELAY = CONFIG_PREFIX + + "aggregation.delay.ms"; + public static final boolean KEY_CACHE_ENABLE_DEFAULT = true; // 10 mins public static final long KEY_CACHE_TIMEOUT_DEFAULT = 10 * 60 * 1000; // 30 secs public static final long CURR_KEY_CACHE_TIMEOUT_DEFAULT = 30 * 1000; + // 10 secs + public static final long KMS_AUDIT_AGGREGATION_DELAY_DEFAULT = 10000; static Configuration getConfiguration(boolean loadHadoopDefaults, String ... resources) { diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java index 1c4c32ddb7f..bf24ed8a108 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java @@ -20,9 +20,11 @@ package org.apache.hadoop.crypto.key.kms.server; import org.apache.hadoop.classification.InterfaceAudience; import com.sun.jersey.api.container.ContainerException; + import org.apache.hadoop.crypto.key.kms.KMSRESTConstants; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.hadoop.security.authorize.AuthorizationException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -30,6 +32,7 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.ext.ExceptionMapper; import javax.ws.rs.ext.Provider; + import java.io.IOException; import java.security.Principal; import java.util.LinkedHashMap; @@ -83,6 +86,10 @@ public class KMSExceptionsProvider implements ExceptionMapper { status = Response.Status.FORBIDDEN; // we don't audit here because we did it already when checking access doAudit = false; + } else if (throwable instanceof AuthorizationException) { + status = Response.Status.UNAUTHORIZED; + // we don't audit here because we did it already when checking access + doAudit = false; } else if (throwable instanceof AccessControlException) { status = Response.Status.FORBIDDEN; } else if (exception instanceof IOException) { @@ -95,7 +102,8 @@ public class KMSExceptionsProvider implements ExceptionMapper { status = Response.Status.INTERNAL_SERVER_ERROR; } if (doAudit) { - KMSAudit.error(KMSMDCFilter.getPrincipal(), KMSMDCFilter.getMethod(), + KMSWebApp.getKMSAudit().error(KMSMDCFilter.getPrincipal(), + KMSMDCFilter.getMethod(), KMSMDCFilter.getURL(), getOneLineMessage(exception)); } return createResponse(status, throwable); diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java index d794463ac32..571ab965351 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java @@ -76,6 +76,7 @@ public class KMSWebApp implements ServletContextListener { private static Meter decryptEEKCallsMeter; private static Meter generateEEKCallsMeter; private static Meter invalidCallsMeter; + private static KMSAudit kmsAudit; private static KeyProviderCryptoExtension keyProviderCryptoExtension; static { @@ -144,6 +145,11 @@ public class KMSWebApp implements ServletContextListener { unauthenticatedCallsMeter = metricRegistry.register( UNAUTHENTICATED_CALLS_METER, new Meter()); + kmsAudit = + new KMSAudit(kmsConf.getLong( + KMSConfiguration.KMS_AUDIT_AGGREGATION_DELAY, + KMSConfiguration.KMS_AUDIT_AGGREGATION_DELAY_DEFAULT)); + // this is required for the the JMXJsonServlet to work properly. // the JMXJsonServlet is behind the authentication filter, // thus the '*' ACL. @@ -199,6 +205,7 @@ public class KMSWebApp implements ServletContextListener { @Override public void contextDestroyed(ServletContextEvent sce) { + kmsAudit.shutdown(); acls.stopReloader(); jmxReporter.stop(); jmxReporter.close(); @@ -245,4 +252,8 @@ public class KMSWebApp implements ServletContextListener { public static KeyProviderCryptoExtension getKeyProvider() { return keyProviderCryptoExtension; } + + public static KMSAudit getKMSAudit() { + return kmsAudit; + } } diff --git a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm index 41a2cd968af..ebfe8e2c170 100644 --- a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm +++ b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm @@ -104,6 +104,25 @@ Hadoop Key Management Server (KMS) - Documentation Sets ${project.version} +---+ +** KMS Aggregated Audit logs + + Audit logs are aggregated for API accesses to the GET_KEY_VERSION, + GET_CURRENT_KEY, DECRYPT_EEK, GENERATE_EEK operations. + + Entries are grouped by the (user,key,operation) combined key for a + configurable aggregation interval after which the number of accesses to the + specified end-point by the user for a given key is flushed to the audit log. + + The Aggregation interval is configured via the property : + ++---+ + + hadoop.kms.aggregation.delay.ms + 10000 + ++---+ + + ** Start/Stop the KMS To start/stop KMS use KMS's bin/kms.sh script. For example: diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java new file mode 100644 index 00000000000..b5d9a36d198 --- /dev/null +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto.key.kms.server; + +import java.io.ByteArrayOutputStream; +import java.io.FilterOutputStream; +import java.io.OutputStream; +import java.io.PrintStream; +import java.security.Principal; + +import org.apache.log4j.LogManager; +import org.apache.log4j.PropertyConfigurator; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +public class TestKMSAudit { + + private PrintStream originalOut; + private ByteArrayOutputStream memOut; + private FilterOut filterOut; + private PrintStream capturedOut; + + private KMSAudit kmsAudit; + + private static class FilterOut extends FilterOutputStream { + public FilterOut(OutputStream out) { + super(out); + } + + public void setOutputStream(OutputStream out) { + this.out = out; + } + } + + @Before + public void setUp() { + originalOut = System.err; + memOut = new ByteArrayOutputStream(); + filterOut = new FilterOut(memOut); + capturedOut = new PrintStream(filterOut); + System.setErr(capturedOut); + PropertyConfigurator.configure(Thread.currentThread(). + getContextClassLoader() + .getResourceAsStream("log4j-kmsaudit.properties")); + this.kmsAudit = new KMSAudit(1000); + } + + @After + public void cleanUp() { + System.setErr(originalOut); + LogManager.resetConfiguration(); + kmsAudit.shutdown(); + } + + private String getAndResetLogOutput() { + capturedOut.flush(); + String logOutput = new String(memOut.toByteArray()); + memOut = new ByteArrayOutputStream(); + filterOut.setOutputStream(memOut); + return logOutput; + } + + @Test + public void testAggregation() throws Exception { + Principal luser = Mockito.mock(Principal.class); + Mockito.when(luser.getName()).thenReturn("luser"); + kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); + kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); + kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); + kmsAudit.ok(luser, KMS.DELETE_KEY, "k1", "testmsg"); + kmsAudit.ok(luser, KMS.ROLL_NEW_VERSION, "k1", "testmsg"); + kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); + kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); + kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); + Thread.sleep(1500); + kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); + Thread.sleep(1500); + String out = getAndResetLogOutput(); + System.out.println(out); + Assert.assertTrue( + out.matches( + "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg" + // Not aggregated !! + + "OK\\[op=DELETE_KEY, key=k1, user=luser\\] testmsg" + + "OK\\[op=ROLL_NEW_VERSION, key=k1, user=luser\\] testmsg" + // Aggregated + + "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=6, interval=[^m]{1,4}ms\\] testmsg" + + "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg")); + } + + @Test + public void testAggregationUnauth() throws Exception { + Principal luser = Mockito.mock(Principal.class); + Mockito.when(luser.getName()).thenReturn("luser"); + kmsAudit.unauthorized(luser, KMS.GENERATE_EEK, "k2"); + Thread.sleep(1000); + kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); + kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); + kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); + kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); + kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); + kmsAudit.unauthorized(luser, KMS.GENERATE_EEK, "k3"); + kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); + Thread.sleep(2000); + String out = getAndResetLogOutput(); + System.out.println(out); + Assert.assertTrue( + out.matches( + "UNAUTHORIZED\\[op=GENERATE_EEK, key=k2, user=luser\\] " + + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg" + + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=5, interval=[^m]{1,4}ms\\] testmsg" + + "UNAUTHORIZED\\[op=GENERATE_EEK, key=k3, user=luser\\] " + + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg")); + } + +} diff --git a/hadoop-common-project/hadoop-kms/src/test/resources/log4j-kmsaudit.properties b/hadoop-common-project/hadoop-kms/src/test/resources/log4j-kmsaudit.properties new file mode 100644 index 00000000000..cca6941d14b --- /dev/null +++ b/hadoop-common-project/hadoop-kms/src/test/resources/log4j-kmsaudit.properties @@ -0,0 +1,25 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# LOG Appender +log4j.appender.kms-audit=org.apache.log4j.ConsoleAppender +log4j.appender.kms-audit.Target=System.err +log4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout +log4j.appender.kms-audit.layout.ConversionPattern=%m + +log4j.rootLogger=INFO, kms-audit \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index 1650b14724d..cccc464e550 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -140,7 +140,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { public static final int DEFAULT_UMASK = 0022; public static final FsPermission umask = new FsPermission( (short) DEFAULT_UMASK); - + static final Log LOG = LogFactory.getLog(RpcProgramNfs3.class); private final NfsConfiguration config; @@ -149,14 +149,14 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { private final DFSClientCache clientCache; private final NfsExports exports; - + private final short replication; private final long blockSize; private final int bufferSize; private final boolean aixCompatMode; private Statistics statistics; private String writeDumpDir; // The dir save dump files - + private final RpcCallCache rpcCallCache; public RpcProgramNfs3(NfsConfiguration config, DatagramSocket registrationSocket, @@ -166,11 +166,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT), Nfs3Constant.PROGRAM, Nfs3Constant.VERSION, Nfs3Constant.VERSION, registrationSocket, allowInsecurePorts); - + this.config = config; config.set(FsPermission.UMASK_LABEL, "000"); iug = new IdUserGroup(config); - + aixCompatMode = config.getBoolean( NfsConfigKeys.AIX_COMPAT_MODE_KEY, NfsConfigKeys.AIX_COMPAT_MODE_DEFAULT); @@ -184,7 +184,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { bufferSize = config.getInt( CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT); - + writeDumpDir = config.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY, NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_DEFAULT); boolean enableDump = config.getBoolean(NfsConfigKeys.DFS_NFS_FILE_DUMP_KEY, @@ -216,12 +216,23 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { throw new IOException("Cannot create dump directory " + dumpDir); } } - + @Override public void startDaemons() { writeManager.startAsyncDataSerivce(); } - + + // Checks the type of IOException and maps it to appropriate Nfs3Status code. + private int mapErrorStatus(IOException e) { + if (e instanceof FileNotFoundException) { + return Nfs3Status.NFS3ERR_STALE; + } else if (e instanceof AccessControlException) { + return Nfs3Status.NFS3ERR_ACCES; + } else { + return Nfs3Status.NFS3ERR_IO; + } + } + /****************************************************** * RPC call handlers ******************************************************/ @@ -236,20 +247,25 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { @Override public GETATTR3Response getattr(XDR xdr, RpcInfo info) { + return getattr(xdr, getSecurityHandler(info), info.remoteAddress()); + } + + @VisibleForTesting + GETATTR3Response getattr(XDR xdr, SecurityHandler securityHandler, + SocketAddress remoteAddress) { GETATTR3Response response = new GETATTR3Response(Nfs3Status.NFS3_OK); - - if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) { + + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } - - SecurityHandler securityHandler = getSecurityHandler(info); + DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } - + GETATTR3Request request = null; try { request = new GETATTR3Request(xdr); @@ -280,7 +296,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } catch (IOException e) { LOG.info("Can't get file attribute, fileId=" + handle.getFileId(), e); - response.setStatus(Nfs3Status.NFS3ERR_IO); + int status = mapErrorStatus(e); + response.setStatus(status); return response; } if (attrs == null) { @@ -297,7 +314,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { private void setattrInternal(DFSClient dfsClient, String fileIdPath, SetAttr3 newAttr, boolean setMode) throws IOException { EnumSet updateFields = newAttr.getUpdateFields(); - + if (setMode && updateFields.contains(SetAttrField.MODE)) { if (LOG.isDebugEnabled()) { LOG.debug("set new mode:" + newAttr.getMode()); @@ -328,14 +345,19 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { @Override public SETATTR3Response setattr(XDR xdr, RpcInfo info) { + return setattr(xdr, getSecurityHandler(info), info.remoteAddress()); + } + + @VisibleForTesting + SETATTR3Response setattr(XDR xdr, SecurityHandler securityHandler, + SocketAddress remoteAddress) { SETATTR3Response response = new SETATTR3Response(Nfs3Status.NFS3_OK); - SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } - + SETATTR3Request request = null; try { request = new SETATTR3Request(xdr); @@ -373,9 +395,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return new SETATTR3Response(Nfs3Status.NFS3ERR_NOT_SYNC, wccData); } } - + // check the write access privilege - if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) { + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { return new SETATTR3Response(Nfs3Status.NFS3ERR_ACCES, new WccData( preOpWcc, preOpAttr)); } @@ -394,30 +416,33 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } catch (IOException e1) { LOG.info("Can't get postOpAttr for fileIdPath: " + fileIdPath, e1); } - if (e instanceof AccessControlException) { - return new SETATTR3Response(Nfs3Status.NFS3ERR_ACCES, wccData); - } else { - return new SETATTR3Response(Nfs3Status.NFS3ERR_IO, wccData); - } + + int status = mapErrorStatus(e); + return new SETATTR3Response(status, wccData); } } @Override public LOOKUP3Response lookup(XDR xdr, RpcInfo info) { + return lookup(xdr, getSecurityHandler(info), info.remoteAddress()); + } + + @VisibleForTesting + LOOKUP3Response lookup(XDR xdr, SecurityHandler securityHandler, + SocketAddress remoteAddress) { LOOKUP3Response response = new LOOKUP3Response(Nfs3Status.NFS3_OK); - - if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) { + + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } - - SecurityHandler securityHandler = getSecurityHandler(info); + DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } - + LOOKUP3Request request = null; try { request = new LOOKUP3Request(xdr); @@ -460,26 +485,32 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } catch (IOException e) { LOG.warn("Exception ", e); - return new LOOKUP3Response(Nfs3Status.NFS3ERR_IO); + int status = mapErrorStatus(e); + return new LOOKUP3Response(status); } } - + @Override public ACCESS3Response access(XDR xdr, RpcInfo info) { + return access(xdr, getSecurityHandler(info), info.remoteAddress()); + } + + @VisibleForTesting + ACCESS3Response access(XDR xdr, SecurityHandler securityHandler, + SocketAddress remoteAddress) { ACCESS3Response response = new ACCESS3Response(Nfs3Status.NFS3_OK); - - if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) { + + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } - - SecurityHandler securityHandler = getSecurityHandler(info); + DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } - + ACCESS3Request request = null; try { request = new ACCESS3Request(xdr); @@ -493,7 +524,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { if (LOG.isDebugEnabled()) { LOG.debug("NFS ACCESS fileId: " + handle.getFileId()); - } + } try { // HDFS-5804 removed supserUserClient access @@ -506,7 +537,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { int access = Nfs3Utils.getAccessRightsForUserGroup( securityHandler.getUid(), securityHandler.getGid(), securityHandler.getAuxGids(), attrs); - + return new ACCESS3Response(Nfs3Status.NFS3_OK, attrs, access); } catch (RemoteException r) { LOG.warn("Exception ", r); @@ -521,20 +552,26 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } catch (IOException e) { LOG.warn("Exception ", e); - return new ACCESS3Response(Nfs3Status.NFS3ERR_IO); + int status = mapErrorStatus(e); + return new ACCESS3Response(status); } } @Override public READLINK3Response readlink(XDR xdr, RpcInfo info) { + return readlink(xdr, getSecurityHandler(info), info.remoteAddress()); + } + + @VisibleForTesting + READLINK3Response readlink(XDR xdr, SecurityHandler securityHandler, + SocketAddress remoteAddress) { READLINK3Response response = new READLINK3Response(Nfs3Status.NFS3_OK); - if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) { + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } - SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -588,39 +625,33 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } catch (IOException e) { LOG.warn("Readlink error: " + e.getClass(), e); - if (e instanceof FileNotFoundException) { - return new READLINK3Response(Nfs3Status.NFS3ERR_STALE); - } else if (e instanceof AccessControlException) { - return new READLINK3Response(Nfs3Status.NFS3ERR_ACCES); - } - return new READLINK3Response(Nfs3Status.NFS3ERR_IO); + int status = mapErrorStatus(e); + return new READLINK3Response(status); } } @Override public READ3Response read(XDR xdr, RpcInfo info) { - SecurityHandler securityHandler = getSecurityHandler(info); - SocketAddress remoteAddress = info.remoteAddress(); - return read(xdr, securityHandler, remoteAddress); + return read(xdr, getSecurityHandler(info), info.remoteAddress()); } - + @VisibleForTesting READ3Response read(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { READ3Response response = new READ3Response(Nfs3Status.NFS3_OK); final String userName = securityHandler.getUser(); - + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } - + DFSClient dfsClient = clientCache.getDfsClient(userName); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } - + READ3Request request = null; try { @@ -670,7 +701,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return new READ3Response(Nfs3Status.NFS3ERR_ACCES); } } - + // In case there is buffered data for the same file, flush it. This can be // optimized later by reading from the cache. int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count); @@ -725,7 +756,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } catch (IOException e) { LOG.warn("Read error: " + e.getClass() + " offset: " + offset + " count: " + count, e); - return new READ3Response(Nfs3Status.NFS3ERR_IO); + int status = mapErrorStatus(e); + return new READ3Response(status); } } @@ -737,7 +769,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { SocketAddress remoteAddress = info.remoteAddress(); return write(xdr, info.channel(), xid, securityHandler, remoteAddress); } - + @VisibleForTesting WRITE3Response write(XDR xdr, Channel channel, int xid, SecurityHandler securityHandler, SocketAddress remoteAddress) { @@ -748,7 +780,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } - + WRITE3Request request = null; try { @@ -781,13 +813,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.error("Can't get path for fileId:" + handle.getFileId()); return new WRITE3Response(Nfs3Status.NFS3ERR_STALE); } - + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { return new WRITE3Response(Nfs3Status.NFS3ERR_ACCES, new WccData( Nfs3Utils.getWccAttr(preOpAttr), preOpAttr), 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); } - + if (LOG.isDebugEnabled()) { LOG.debug("requesed offset=" + offset + " and current filesize=" + preOpAttr.getSize()); @@ -807,8 +839,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } WccAttr attr = preOpAttr == null ? null : Nfs3Utils.getWccAttr(preOpAttr); WccData fileWcc = new WccData(attr, postOpAttr); - return new WRITE3Response(Nfs3Status.NFS3ERR_IO, fileWcc, 0, - request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF); + + int status = mapErrorStatus(e); + return new WRITE3Response(status, fileWcc, 0, request.getStableHow(), + Nfs3Constant.WRITE_COMMIT_VERF); } return null; @@ -816,11 +850,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { @Override public CREATE3Response create(XDR xdr, RpcInfo info) { - SecurityHandler securityHandler = getSecurityHandler(info); - SocketAddress remoteAddress = info.remoteAddress(); - return create(xdr, securityHandler, remoteAddress); + return create(xdr, getSecurityHandler(info), info.remoteAddress()); } - + @VisibleForTesting CREATE3Response create(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { @@ -830,7 +862,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } - + CREATE3Request request = null; try { @@ -868,7 +900,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.error("Can't get path for dirHandle:" + dirHandle); return new CREATE3Response(Nfs3Status.NFS3ERR_STALE); } - + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { return new CREATE3Response(Nfs3Status.NFS3ERR_ACCES, null, preOpDirAttr, new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), @@ -881,15 +913,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { FsPermission permission = setAttr3.getUpdateFields().contains( SetAttrField.MODE) ? new FsPermission((short) setAttr3.getMode()) : FsPermission.getDefault().applyUMask(umask); - + EnumSet flag = (createMode != Nfs3Constant.CREATE_EXCLUSIVE) ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE); - + fos = new HdfsDataOutputStream(dfsClient.create(fileIdPath, permission, flag, false, replication, blockSize, null, bufferSize, null), statistics); - + if ((createMode == Nfs3Constant.CREATE_UNCHECKED) || (createMode == Nfs3Constant.CREATE_GUARDED)) { // Set group if it's not specified in the request. @@ -903,7 +935,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { postOpObjAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug); dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug); - + // Add open stream OpenFileCtx openFileCtx = new OpenFileCtx(fos, postOpObjAttr, writeDumpDir + "/" + postOpObjAttr.getFileId(), dfsClient, iug, @@ -920,7 +952,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { + fileHandle.getFileId()); } } - + } catch (IOException e) { LOG.error("Exception", e); if (fos != null) { @@ -940,29 +972,30 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { + dirHandle.getFileId(), e1); } } - if (e instanceof AccessControlException) { - return new CREATE3Response(Nfs3Status.NFS3ERR_ACCES, fileHandle, - postOpObjAttr, dirWcc); - } else { - return new CREATE3Response(Nfs3Status.NFS3ERR_IO, fileHandle, - postOpObjAttr, dirWcc); - } + + int status = mapErrorStatus(e); + return new CREATE3Response(status, fileHandle, postOpObjAttr, dirWcc); } - + return new CREATE3Response(Nfs3Status.NFS3_OK, fileHandle, postOpObjAttr, dirWcc); } @Override public MKDIR3Response mkdir(XDR xdr, RpcInfo info) { + return mkdir(xdr, getSecurityHandler(info), info.remoteAddress()); + } + + @VisibleForTesting + MKDIR3Response mkdir(XDR xdr, SecurityHandler securityHandler, + SocketAddress remoteAddress) { MKDIR3Response response = new MKDIR3Response(Nfs3Status.NFS3_OK); - SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } - + MKDIR3Request request = null; try { @@ -992,11 +1025,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return new MKDIR3Response(Nfs3Status.NFS3ERR_STALE); } - if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) { + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { return new MKDIR3Response(Nfs3Status.NFS3ERR_ACCES, null, preOpDirAttr, new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr)); } - + final String fileIdPath = dirFileIdPath + "/" + fileName; SetAttr3 setAttr3 = request.getObjAttr(); FsPermission permission = setAttr3.getUpdateFields().contains( @@ -1015,7 +1048,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { setAttr3.setGid(securityHandler.getGid()); } setattrInternal(dfsClient, fileIdPath, setAttr3, false); - + postOpObjAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug); objFileHandle = new FileHandle(postOpObjAttr.getFileId()); WccData dirWcc = Nfs3Utils.createWccData( @@ -1032,15 +1065,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e); } } + WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), postOpDirAttr); - if (e instanceof AccessControlException) { - return new MKDIR3Response(Nfs3Status.NFS3ERR_PERM, objFileHandle, - postOpObjAttr, dirWcc); - } else { - return new MKDIR3Response(Nfs3Status.NFS3ERR_IO, objFileHandle, - postOpObjAttr, dirWcc); - } + int status = mapErrorStatus(e); + return new MKDIR3Response(status, objFileHandle, postOpObjAttr, dirWcc); } } @@ -1048,21 +1077,22 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { public READDIR3Response mknod(XDR xdr, RpcInfo info) { return new READDIR3Response(Nfs3Status.NFS3ERR_NOTSUPP); } - + @Override public REMOVE3Response remove(XDR xdr, RpcInfo info) { return remove(xdr, getSecurityHandler(info), info.remoteAddress()); } - + @VisibleForTesting - REMOVE3Response remove(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { + REMOVE3Response remove(XDR xdr, SecurityHandler securityHandler, + SocketAddress remoteAddress) { REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } - + REMOVE3Request request = null; try { request = new REMOVE3Request(xdr); @@ -1120,26 +1150,29 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1); } } + WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), postOpDirAttr); - if (e instanceof AccessControlException) { - return new REMOVE3Response(Nfs3Status.NFS3ERR_PERM, dirWcc); - } else { - return new REMOVE3Response(Nfs3Status.NFS3ERR_IO, dirWcc); - } + int status = mapErrorStatus(e); + return new REMOVE3Response(status, dirWcc); } } @Override public RMDIR3Response rmdir(XDR xdr, RpcInfo info) { + return rmdir(xdr, getSecurityHandler(info), info.remoteAddress()); + } + + @VisibleForTesting + RMDIR3Response rmdir(XDR xdr, SecurityHandler securityHandler, + SocketAddress remoteAddress) { RMDIR3Response response = new RMDIR3Response(Nfs3Status.NFS3_OK); - SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } - + RMDIR3Request request = null; try { request = new RMDIR3Request(xdr); @@ -1164,10 +1197,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.info("Can't get path for dir fileId:" + dirHandle.getFileId()); return new RMDIR3Response(Nfs3Status.NFS3ERR_STALE); } - + WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr); - if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) { + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, errWcc); } @@ -1179,7 +1212,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { if (!fstat.isDir()) { return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTDIR, errWcc); } - + if (fstat.getChildrenNum() > 0) { return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTEMPTY, errWcc); } @@ -1202,26 +1235,29 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1); } } + WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), postOpDirAttr); - if (e instanceof AccessControlException) { - return new RMDIR3Response(Nfs3Status.NFS3ERR_PERM, dirWcc); - } else { - return new RMDIR3Response(Nfs3Status.NFS3ERR_IO, dirWcc); - } + int status = mapErrorStatus(e); + return new RMDIR3Response(status, dirWcc); } } @Override public RENAME3Response rename(XDR xdr, RpcInfo info) { + return rename(xdr, getSecurityHandler(info), info.remoteAddress()); + } + + @VisibleForTesting + RENAME3Response rename(XDR xdr, SecurityHandler securityHandler, + SocketAddress remoteAddress) { RENAME3Response response = new RENAME3Response(Nfs3Status.NFS3_OK); - SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } - + RENAME3Request request = null; try { request = new RENAME3Request(xdr); @@ -1258,8 +1294,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.info("Can't get path for toHandle fileId:" + toHandle.getFileId()); return new RENAME3Response(Nfs3Status.NFS3ERR_STALE); } - - if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) { + + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { WccData fromWcc = new WccData(Nfs3Utils.getWccAttr(fromPreOpAttr), fromPreOpAttr); WccData toWcc = new WccData(Nfs3Utils.getWccAttr(toPreOpAttr), @@ -1280,7 +1316,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return new RENAME3Response(Nfs3Status.NFS3_OK, fromDirWcc, toDirWcc); } catch (IOException e) { LOG.warn("Exception ", e); - // Try to return correct WccData + // Try to return correct WccData try { fromDirWcc = Nfs3Utils.createWccData( Nfs3Utils.getWccAttr(fromPreOpAttr), dfsClient, fromDirFileIdPath, @@ -1291,25 +1327,27 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.info("Can't get postOpDirAttr for " + fromDirFileIdPath + " or" + toDirFileIdPath, e1); } - if (e instanceof AccessControlException) { - return new RENAME3Response(Nfs3Status.NFS3ERR_PERM, fromDirWcc, - toDirWcc); - } else { - return new RENAME3Response(Nfs3Status.NFS3ERR_IO, fromDirWcc, toDirWcc); - } + + int status = mapErrorStatus(e); + return new RENAME3Response(status, fromDirWcc, toDirWcc); } } @Override public SYMLINK3Response symlink(XDR xdr, RpcInfo info) { + return symlink(xdr, getSecurityHandler(info), info.remoteAddress()); + } + + @VisibleForTesting + SYMLINK3Response symlink(XDR xdr, SecurityHandler securityHandler, + SocketAddress remoteAddress) { SYMLINK3Response response = new SYMLINK3Response(Nfs3Status.NFS3_OK); - if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) { + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } - SecurityHandler securityHandler = getSecurityHandler(info); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); @@ -1355,7 +1393,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } catch (IOException e) { LOG.warn("Exception:" + e); - response.setStatus(Nfs3Status.NFS3ERR_IO); + int status = mapErrorStatus(e); + response.setStatus(status); return response; } } @@ -1387,28 +1426,27 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } return dlisting; } - + @Override public READDIR3Response readdir(XDR xdr, RpcInfo info) { - SecurityHandler securityHandler = getSecurityHandler(info); - SocketAddress remoteAddress = info.remoteAddress(); - return readdir(xdr, securityHandler, remoteAddress); + return readdir(xdr, getSecurityHandler(info), info.remoteAddress()); } + public READDIR3Response readdir(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { READDIR3Response response = new READDIR3Response(Nfs3Status.NFS3_OK); - + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } - + DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } - + READDIR3Request request = null; try { request = new READDIR3Request(xdr); @@ -1427,7 +1465,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.info("Nonpositive count in invalid READDIR request:" + count); return new READDIR3Response(Nfs3Status.NFS3_OK); } - + if (LOG.isDebugEnabled()) { LOG.debug("NFS READDIR fileId: " + handle.getFileId() + " cookie: " + cookie + " count: " + count); @@ -1492,7 +1530,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { String inodeIdPath = Nfs3Utils.getFileIdPath(cookie); startAfter = inodeIdPath.getBytes(); } - + dlisting = listPaths(dfsClient, dirFileIdPath, startAfter); postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); if (postOpAttr == null) { @@ -1501,21 +1539,22 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } catch (IOException e) { LOG.warn("Exception ", e); - return new READDIR3Response(Nfs3Status.NFS3ERR_IO); + int status = mapErrorStatus(e); + return new READDIR3Response(status); } /** * Set up the dirents in the response. fileId is used as the cookie with one * exception. Linux client can either be stuck with "ls" command (on REHL) * or report "Too many levels of symbolic links" (Ubuntu). - * + * * The problem is that, only two items returned, "." and ".." when the * namespace is empty. Both of them are "/" with the same cookie(root * fileId). Linux client doesn't think such a directory is a real directory. * Even though NFS protocol specifies cookie is an opaque data, Linux client * somehow doesn't like an empty dir returns same cookie for both "." and * "..". - * + * * The workaround is to use 0 as the cookie for "." and always return "." as * the first entry in readdir/readdirplus response. */ @@ -1523,7 +1562,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { int n = (int) Math.min(fstatus.length, count-2); boolean eof = (n < fstatus.length) ? false : (dlisting .getRemainingEntries() == 0); - + Entry3[] entries; if (cookie == 0) { entries = new Entry3[n + 2]; @@ -1543,7 +1582,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { fstatus[i].getLocalName(), fstatus[i].getFileId()); } } - + DirList3 dirList = new READDIR3Response.DirList3(entries, eof); return new READDIR3Response(Nfs3Status.NFS3_OK, postOpAttr, dirStatus.getModificationTime(), dirList); @@ -1551,9 +1590,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { @Override public READDIRPLUS3Response readdirplus(XDR xdr, RpcInfo info) { - SecurityHandler securityHandler = getSecurityHandler(info); - SocketAddress remoteAddress = info.remoteAddress(); - return readdirplus(xdr, securityHandler, remoteAddress); + return readdirplus(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting @@ -1562,12 +1599,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_ACCES); } - + DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_SERVERFAULT); } - + READDIRPLUS3Request request = null; try { request = new READDIRPLUS3Request(xdr); @@ -1592,7 +1629,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.info("Nonpositive maxcount in invalid READDIRPLUS request:" + maxCount); return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL); } - + if (LOG.isDebugEnabled()) { LOG.debug("NFS READDIRPLUS fileId: " + handle.getFileId() + " cookie: " + cookie + " dirCount: " + dirCount + " maxCount: " + maxCount); @@ -1655,7 +1692,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { String inodeIdPath = Nfs3Utils.getFileIdPath(cookie); startAfter = inodeIdPath.getBytes(); } - + dlisting = listPaths(dfsClient, dirFileIdPath, startAfter); postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); if (postOpDirAttr == null) { @@ -1664,19 +1701,20 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } catch (IOException e) { LOG.warn("Exception ", e); - return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_IO); + int status = mapErrorStatus(e); + return new READDIRPLUS3Response(status); } - + // Set up the dirents in the response HdfsFileStatus[] fstatus = dlisting.getPartialListing(); int n = (int) Math.min(fstatus.length, dirCount-2); boolean eof = (n < fstatus.length) ? false : (dlisting .getRemainingEntries() == 0); - + READDIRPLUS3Response.EntryPlus3[] entries; if (cookie == 0) { entries = new READDIRPLUS3Response.EntryPlus3[n+2]; - + entries[0] = new READDIRPLUS3Response.EntryPlus3( postOpDirAttr.getFileId(), ".", 0, postOpDirAttr, new FileHandle( postOpDirAttr.getFileId())); @@ -1720,23 +1758,28 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return new READDIRPLUS3Response(Nfs3Status.NFS3_OK, postOpDirAttr, dirStatus.getModificationTime(), dirListPlus); } - + @Override public FSSTAT3Response fsstat(XDR xdr, RpcInfo info) { + return fsstat(xdr, getSecurityHandler(info), info.remoteAddress()); + } + + @VisibleForTesting + FSSTAT3Response fsstat(XDR xdr, SecurityHandler securityHandler, + SocketAddress remoteAddress) { FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK); - - if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) { + + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } - - SecurityHandler securityHandler = getSecurityHandler(info); + DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } - + FSSTAT3Request request = null; try { request = new FSSTAT3Request(xdr); @@ -1754,14 +1797,14 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { FsStatus fsStatus = dfsClient.getDiskStatus(); long totalBytes = fsStatus.getCapacity(); long freeBytes = fsStatus.getRemaining(); - + Nfs3FileAttributes attrs = writeManager.getFileAttr(dfsClient, handle, iug); if (attrs == null) { LOG.info("Can't get path for fileId:" + handle.getFileId()); return new FSSTAT3Response(Nfs3Status.NFS3ERR_STALE); } - + long maxFsObjects = config.getLong("dfs.max.objects", 0); if (maxFsObjects == 0) { // A value of zero in HDFS indicates no limit to the number @@ -1769,7 +1812,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { // Long.MAX_VALUE so 32bit client won't complain. maxFsObjects = Integer.MAX_VALUE; } - + return new FSSTAT3Response(Nfs3Status.NFS3_OK, attrs, totalBytes, freeBytes, freeBytes, maxFsObjects, maxFsObjects, maxFsObjects, 0); } catch (RemoteException r) { @@ -1785,26 +1828,32 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } catch (IOException e) { LOG.warn("Exception ", e); - return new FSSTAT3Response(Nfs3Status.NFS3ERR_IO); + int status = mapErrorStatus(e); + return new FSSTAT3Response(status); } } @Override public FSINFO3Response fsinfo(XDR xdr, RpcInfo info) { + return fsinfo(xdr, getSecurityHandler(info), info.remoteAddress()); + } + + @VisibleForTesting + FSINFO3Response fsinfo(XDR xdr, SecurityHandler securityHandler, + SocketAddress remoteAddress) { FSINFO3Response response = new FSINFO3Response(Nfs3Status.NFS3_OK); - - if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) { + + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } - - SecurityHandler securityHandler = getSecurityHandler(info); + DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } - + FSINFO3Request request = null; try { request = new FSINFO3Request(xdr); @@ -1835,7 +1884,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.info("Can't get path for fileId:" + handle.getFileId()); return new FSINFO3Response(Nfs3Status.NFS3ERR_STALE); } - + int fsProperty = Nfs3Constant.FSF3_CANSETTIME | Nfs3Constant.FSF3_HOMOGENEOUS; @@ -1843,26 +1892,32 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { wtmax, wtmax, 1, dtperf, Long.MAX_VALUE, new NfsTime(1), fsProperty); } catch (IOException e) { LOG.warn("Exception ", e); - return new FSINFO3Response(Nfs3Status.NFS3ERR_IO); + int status = mapErrorStatus(e); + return new FSINFO3Response(status); } } @Override public PATHCONF3Response pathconf(XDR xdr, RpcInfo info) { + return pathconf(xdr, getSecurityHandler(info), info.remoteAddress()); + } + + @VisibleForTesting + PATHCONF3Response pathconf(XDR xdr, SecurityHandler securityHandler, + SocketAddress remoteAddress) { PATHCONF3Response response = new PATHCONF3Response(Nfs3Status.NFS3_OK); - - if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) { + + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } - - SecurityHandler securityHandler = getSecurityHandler(info); + DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } - + PATHCONF3Request request = null; try { request = new PATHCONF3Request(xdr); @@ -1890,22 +1945,30 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { HdfsConstants.MAX_PATH_LENGTH, true, false, false, true); } catch (IOException e) { LOG.warn("Exception ", e); - return new PATHCONF3Response(Nfs3Status.NFS3ERR_IO); + int status = mapErrorStatus(e); + return new PATHCONF3Response(status); } } @Override public COMMIT3Response commit(XDR xdr, RpcInfo info) { - //Channel channel, int xid, - // SecurityHandler securityHandler, InetAddress client) { - COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK); SecurityHandler securityHandler = getSecurityHandler(info); + RpcCall rpcCall = (RpcCall) info.header(); + int xid = rpcCall.getXid(); + SocketAddress remoteAddress = info.remoteAddress(); + return commit(xdr, info.channel(), xid, securityHandler, remoteAddress); + } + + @VisibleForTesting + COMMIT3Response commit(XDR xdr, Channel channel, int xid, + SecurityHandler securityHandler, SocketAddress remoteAddress) { + COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } - + COMMIT3Request request = null; try { request = new COMMIT3Request(xdr); @@ -1929,21 +1992,19 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.info("Can't get path for fileId:" + handle.getFileId()); return new COMMIT3Response(Nfs3Status.NFS3ERR_STALE); } - - if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) { + + if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { return new COMMIT3Response(Nfs3Status.NFS3ERR_ACCES, new WccData( Nfs3Utils.getWccAttr(preOpAttr), preOpAttr), Nfs3Constant.WRITE_COMMIT_VERF); } - + long commitOffset = (request.getCount() == 0) ? 0 : (request.getOffset() + request.getCount()); - + // Insert commit as an async request - RpcCall rpcCall = (RpcCall) info.header(); - int xid = rpcCall.getXid(); writeManager.handleCommit(dfsClient, handle, commitOffset, - info.channel(), xid, preOpAttr); + channel, xid, preOpAttr); return null; } catch (IOException e) { LOG.warn("Exception ", e); @@ -1953,9 +2014,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } catch (IOException e1) { LOG.info("Can't get postOpAttr for fileId: " + handle.getFileId(), e1); } + WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr); - return new COMMIT3Response(Nfs3Status.NFS3ERR_IO, fileWcc, - Nfs3Constant.WRITE_COMMIT_VERF); + int status = mapErrorStatus(e); + return new COMMIT3Response(status, fileWcc, + Nfs3Constant.WRITE_COMMIT_VERF); } } @@ -1973,7 +2036,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { RpcCall rpcCall = (RpcCall) info.header(); return getSecurityHandler(rpcCall.getCredential(), rpcCall.getVerifier()); } - + @Override public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) { RpcCall rpcCall = (RpcCall) info.header(); @@ -1986,7 +2049,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { InetAddress client = ((InetSocketAddress) info.remoteAddress()) .getAddress(); Credentials credentials = rpcCall.getCredential(); - + // Ignore auth only for NFSPROC3_NULL, especially for Linux clients. if (nfsproc3 != NFSPROC3.NULL) { if (credentials.getFlavor() != AuthFlavor.AUTH_SYS @@ -2023,7 +2086,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } } - + NFS3Response response = null; if (nfsproc3 == NFSPROC3.NULL) { response = nullProcedure(); @@ -2040,7 +2103,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } else if (nfsproc3 == NFSPROC3.READ) { if (LOG.isDebugEnabled()) { LOG.debug(Nfs3Utils.READ_RPC_START + xid); - } + } response = read(xdr, info); if (LOG.isDebugEnabled() && (nfsproc3 == NFSPROC3.READ)) { LOG.debug(Nfs3Utils.READ_RPC_END + xid); @@ -2053,7 +2116,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { // Write end debug trace is in Nfs3Utils.writeChannel } else if (nfsproc3 == NFSPROC3.CREATE) { response = create(xdr, info); - } else if (nfsproc3 == NFSPROC3.MKDIR) { + } else if (nfsproc3 == NFSPROC3.MKDIR) { response = mkdir(xdr, info); } else if (nfsproc3 == NFSPROC3.SYMLINK) { response = symlink(xdr, info); @@ -2104,18 +2167,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { RpcUtil.sendRpcResponse(ctx, rsp); } - + @Override protected boolean isIdempotent(RpcCall call) { - final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(call.getProcedure()); + final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(call.getProcedure()); return nfsproc3 == null || nfsproc3.isIdempotent(); } - - private boolean checkAccessPrivilege(RpcInfo info, - final AccessPrivilege expected) { - SocketAddress remoteAddress = info.remoteAddress(); - return checkAccessPrivilege(remoteAddress, expected); - } private boolean checkAccessPrivilege(SocketAddress remoteAddress, final AccessPrivilege expected) { @@ -2139,7 +2196,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } return true; } - + @VisibleForTesting WriteManager getWriteManager() { return this.writeManager; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java index 4634e4bae68..e89929b889f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java @@ -18,19 +18,603 @@ package org.apache.hadoop.hdfs.nfs.nfs3; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import org.jboss.netty.channel.Channel; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.mockito.Mockito; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; +import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; -import org.junit.Assert; -import org.junit.Test; +import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; +import org.apache.hadoop.nfs.nfs3.Nfs3Status; +import org.apache.hadoop.nfs.nfs3.request.LOOKUP3Request; +import org.apache.hadoop.nfs.nfs3.request.READ3Request; +import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; +import org.apache.hadoop.nfs.nfs3.response.ACCESS3Response; +import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response; +import org.apache.hadoop.nfs.nfs3.response.CREATE3Response; +import org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response; +import org.apache.hadoop.nfs.nfs3.response.FSINFO3Response; +import org.apache.hadoop.nfs.nfs3.response.GETATTR3Response; +import org.apache.hadoop.nfs.nfs3.response.LOOKUP3Response; +import org.apache.hadoop.nfs.nfs3.response.PATHCONF3Response; +import org.apache.hadoop.nfs.nfs3.response.READ3Response; +import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response; +import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response; +import org.apache.hadoop.nfs.nfs3.response.RENAME3Response; +import org.apache.hadoop.nfs.nfs3.response.READDIR3Response; +import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response; +import org.apache.hadoop.nfs.nfs3.response.READLINK3Response; +import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response; +import org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response; +import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; +import org.apache.hadoop.nfs.nfs3.request.SetAttr3; +import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.SecurityHandler; +import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; +import org.apache.hadoop.security.authorize.ProxyUsers; /** * Tests for {@link RpcProgramNfs3} */ public class TestRpcProgramNfs3 { + static DistributedFileSystem hdfs; + static MiniDFSCluster cluster = null; + static NfsConfiguration config = new NfsConfiguration(); + static NameNode nn; + static Nfs3 nfs; + static RpcProgramNfs3 nfsd; + static SecurityHandler securityHandler; + static SecurityHandler securityHandlerUnpriviledged; + static String testdir = "/tmp"; + + @BeforeClass + public static void setup() throws Exception { + String currentUser = System.getProperty("user.name"); + + config.set("fs.permissions.umask-mode", "u=rwx,g=,o="); + config.set(DefaultImpersonationProvider.getTestProvider() + .getProxySuperuserGroupConfKey(currentUser), "*"); + config.set(DefaultImpersonationProvider.getTestProvider() + .getProxySuperuserIpConfKey(currentUser), "*"); + ProxyUsers.refreshSuperUserGroupsConfiguration(config); + + cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); + cluster.waitActive(); + hdfs = cluster.getFileSystem(); + nn = cluster.getNameNode(); + + // Use ephemeral ports in case tests are running in parallel + config.setInt("nfs3.mountd.port", 0); + config.setInt("nfs3.server.port", 0); + + // Start NFS with allowed.hosts set to "* rw" + config.set("dfs.nfs.exports.allowed.hosts", "* rw"); + nfs = new Nfs3(config); + nfs.startServiceInternal(false); + nfsd = (RpcProgramNfs3) nfs.getRpcProgram(); + + + // Mock SecurityHandler which returns system user.name + securityHandler = Mockito.mock(SecurityHandler.class); + Mockito.when(securityHandler.getUser()).thenReturn(currentUser); + + // Mock SecurityHandler which returns a dummy username "harry" + securityHandlerUnpriviledged = Mockito.mock(SecurityHandler.class); + Mockito.when(securityHandlerUnpriviledged.getUser()).thenReturn("harry"); + } + + @AfterClass + public static void shutdown() throws Exception { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Before + public void createFiles() throws IllegalArgumentException, IOException { + hdfs.delete(new Path(testdir), true); + hdfs.mkdirs(new Path(testdir)); + hdfs.mkdirs(new Path(testdir + "/foo")); + DFSTestUtil.createFile(hdfs, new Path(testdir + "/bar"), 0, (short) 1, 0); + } + + @Test(timeout = 60000) + public void testGetattr() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); + long dirId = status.getFileId(); + FileHandle handle = new FileHandle(dirId); + XDR xdr_req = new XDR(); + handle.serialize(xdr_req); + + // Attempt by an unpriviledged user should fail. + GETATTR3Response response1 = nfsd.getattr(xdr_req.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + GETATTR3Response response2 = nfsd.getattr(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testSetattr() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); + long dirId = status.getFileId(); + XDR xdr_req = new XDR(); + FileHandle handle = new FileHandle(dirId); + handle.serialize(xdr_req); + xdr_req.writeString("bar"); + SetAttr3 symAttr = new SetAttr3(); + symAttr.serialize(xdr_req); + xdr_req.writeBoolean(false); + + // Attempt by an unpriviledged user should fail. + SETATTR3Response response1 = nfsd.setattr(xdr_req.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + SETATTR3Response response2 = nfsd.setattr(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testLookup() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); + long dirId = status.getFileId(); + FileHandle handle = new FileHandle(dirId); + LOOKUP3Request lookupReq = new LOOKUP3Request(handle, "bar"); + XDR xdr_req = new XDR(); + lookupReq.serialize(xdr_req); + + // Attempt by an unpriviledged user should fail. + LOOKUP3Response response1 = nfsd.lookup(xdr_req.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + LOOKUP3Response response2 = nfsd.lookup(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testAccess() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); + long dirId = status.getFileId(); + FileHandle handle = new FileHandle(dirId); + XDR xdr_req = new XDR(); + handle.serialize(xdr_req); + + // Attempt by an unpriviledged user should fail. + ACCESS3Response response1 = nfsd.access(xdr_req.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + ACCESS3Response response2 = nfsd.access(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testReadlink() throws Exception { + // Create a symlink first. + HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); + long dirId = status.getFileId(); + XDR xdr_req = new XDR(); + FileHandle handle = new FileHandle(dirId); + handle.serialize(xdr_req); + xdr_req.writeString("fubar"); + SetAttr3 symAttr = new SetAttr3(); + symAttr.serialize(xdr_req); + xdr_req.writeString("bar"); + + SYMLINK3Response response = nfsd.symlink(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, + response.getStatus()); + + // Now perform readlink operations. + FileHandle handle2 = response.getObjFileHandle(); + XDR xdr_req2 = new XDR(); + handle2.serialize(xdr_req2); + + // Attempt by an unpriviledged user should fail. + READLINK3Response response1 = nfsd.readlink(xdr_req2.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + READLINK3Response response2 = nfsd.readlink(xdr_req2.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testRead() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); + long dirId = status.getFileId(); + FileHandle handle = new FileHandle(dirId); + + READ3Request readReq = new READ3Request(handle, 0, 5); + XDR xdr_req = new XDR(); + readReq.serialize(xdr_req); + + // Attempt by an unpriviledged user should fail. + /* Hits HDFS-6582. It needs to be fixed first. + READ3Response response1 = nfsd.read(xdr_req.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + */ + + // Attempt by a priviledged user should pass. + READ3Response response2 = nfsd.read(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testWrite() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); + long dirId = status.getFileId(); + FileHandle handle = new FileHandle(dirId); + + byte[] buffer = new byte[10]; + for (int i = 0; i < 10; i++) { + buffer[i] = (byte) i; + } + + WRITE3Request writeReq = new WRITE3Request(handle, 0, 10, + WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer)); + XDR xdr_req = new XDR(); + writeReq.serialize(xdr_req); + + // Attempt by an unpriviledged user should fail. + WRITE3Response response1 = nfsd.write(xdr_req.asReadOnlyWrap(), + null, 1, securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + WRITE3Response response2 = nfsd.write(xdr_req.asReadOnlyWrap(), + null, 1, securityHandler, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect response:", null, response2); + } + + @Test(timeout = 60000) + public void testCreate() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); + long dirId = status.getFileId(); + XDR xdr_req = new XDR(); + FileHandle handle = new FileHandle(dirId); + handle.serialize(xdr_req); + xdr_req.writeString("fubar"); + xdr_req.writeInt(Nfs3Constant.CREATE_UNCHECKED); + SetAttr3 symAttr = new SetAttr3(); + symAttr.serialize(xdr_req); + + // Attempt by an unpriviledged user should fail. + CREATE3Response response1 = nfsd.create(xdr_req.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + CREATE3Response response2 = nfsd.create(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testMkdir() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); + long dirId = status.getFileId(); + XDR xdr_req = new XDR(); + FileHandle handle = new FileHandle(dirId); + handle.serialize(xdr_req); + xdr_req.writeString("fubar"); + SetAttr3 symAttr = new SetAttr3(); + symAttr.serialize(xdr_req); + xdr_req.writeString("bar"); + + // Attempt to remove by an unpriviledged user should fail. + SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt to remove by a priviledged user should pass. + SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testSymlink() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); + long dirId = status.getFileId(); + XDR xdr_req = new XDR(); + FileHandle handle = new FileHandle(dirId); + handle.serialize(xdr_req); + xdr_req.writeString("fubar"); + SetAttr3 symAttr = new SetAttr3(); + symAttr.serialize(xdr_req); + xdr_req.writeString("bar"); + + // Attempt by an unpriviledged user should fail. + SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testRemove() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); + long dirId = status.getFileId(); + XDR xdr_req = new XDR(); + FileHandle handle = new FileHandle(dirId); + handle.serialize(xdr_req); + xdr_req.writeString("bar"); + + // Attempt by an unpriviledged user should fail. + REMOVE3Response response1 = nfsd.remove(xdr_req.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + REMOVE3Response response2 = nfsd.remove(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testRmdir() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); + long dirId = status.getFileId(); + XDR xdr_req = new XDR(); + FileHandle handle = new FileHandle(dirId); + handle.serialize(xdr_req); + xdr_req.writeString("foo"); + + // Attempt by an unpriviledged user should fail. + RMDIR3Response response1 = nfsd.rmdir(xdr_req.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + RMDIR3Response response2 = nfsd.rmdir(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testRename() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); + long dirId = status.getFileId(); + XDR xdr_req = new XDR(); + FileHandle handle = new FileHandle(dirId); + handle.serialize(xdr_req); + xdr_req.writeString("bar"); + handle.serialize(xdr_req); + xdr_req.writeString("fubar"); + + // Attempt by an unpriviledged user should fail. + RENAME3Response response1 = nfsd.rename(xdr_req.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + RENAME3Response response2 = nfsd.rename(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testReaddir() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); + long dirId = status.getFileId(); + FileHandle handle = new FileHandle(dirId); + XDR xdr_req = new XDR(); + handle.serialize(xdr_req); + xdr_req.writeLongAsHyper(0); + xdr_req.writeLongAsHyper(0); + xdr_req.writeInt(100); + + // Attempt by an unpriviledged user should fail. + READDIR3Response response1 = nfsd.readdir(xdr_req.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + READDIR3Response response2 = nfsd.readdir(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testReaddirplus() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); + long dirId = status.getFileId(); + FileHandle handle = new FileHandle(dirId); + XDR xdr_req = new XDR(); + handle.serialize(xdr_req); + xdr_req.writeLongAsHyper(0); + xdr_req.writeLongAsHyper(0); + xdr_req.writeInt(3); + xdr_req.writeInt(2); + + // Attempt by an unpriviledged user should fail. + READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testFsstat() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); + long dirId = status.getFileId(); + FileHandle handle = new FileHandle(dirId); + XDR xdr_req = new XDR(); + handle.serialize(xdr_req); + + // Attempt by an unpriviledged user should fail. + FSSTAT3Response response1 = nfsd.fsstat(xdr_req.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + FSSTAT3Response response2 = nfsd.fsstat(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testFsinfo() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); + long dirId = status.getFileId(); + FileHandle handle = new FileHandle(dirId); + XDR xdr_req = new XDR(); + handle.serialize(xdr_req); + + // Attempt by an unpriviledged user should fail. + FSINFO3Response response1 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + FSINFO3Response response2 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testPathconf() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); + long dirId = status.getFileId(); + FileHandle handle = new FileHandle(dirId); + XDR xdr_req = new XDR(); + handle.serialize(xdr_req); + + // Attempt by an unpriviledged user should fail. + PATHCONF3Response response1 = nfsd.pathconf(xdr_req.asReadOnlyWrap(), + securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + PATHCONF3Response response2 = nfsd.pathconf(xdr_req.asReadOnlyWrap(), + securityHandler, new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, + response2.getStatus()); + } + + @Test(timeout = 60000) + public void testCommit() throws Exception { + HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); + long dirId = status.getFileId(); + FileHandle handle = new FileHandle(dirId); + XDR xdr_req = new XDR(); + handle.serialize(xdr_req); + xdr_req.writeLongAsHyper(0); + xdr_req.writeInt(5); + + Channel ch = Mockito.mock(Channel.class); + + // Attempt by an unpriviledged user should fail. + COMMIT3Response response1 = nfsd.commit(xdr_req.asReadOnlyWrap(), + ch, 1, securityHandlerUnpriviledged, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, + response1.getStatus()); + + // Attempt by a priviledged user should pass. + COMMIT3Response response2 = nfsd.commit(xdr_req.asReadOnlyWrap(), + ch, 1, securityHandler, + new InetSocketAddress("localhost", 1234)); + assertEquals("Incorrect COMMIT3Response:", null, response2); + } + @Test(timeout=1000) public void testIdempotent() { Object[][] procedures = { diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d9751c87598..94e6e3fa6ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -130,6 +130,9 @@ Trunk (Unreleased) HDFS-6609. Use DirectorySnapshottableFeature to represent a snapshottable directory. (Jing Zhao via wheat9) + HDFS-6482. Use block ID-based block layout on datanodes (James Thomas via + Colin Patrick McCabe) + OPTIMIZATIONS BUG FIXES @@ -184,9 +187,6 @@ Trunk (Unreleased) HDFS-3549. Fix dist tar build fails in hadoop-hdfs-raid project. (Jason Lowe via daryn) - HDFS-3482. hdfs balancer throws ArrayIndexOutOfBoundsException - if option is specified without values. ( Madhukara Phatak via umamahesh) - HDFS-3614. Revert unused MiniDFSCluster constructor from HDFS-3049. (acmurthy via eli) @@ -332,6 +332,31 @@ Release 2.6.0 - UNRELEASED HDFS-6778. The extended attributes javadoc should simply refer to the user docs. (clamb via wang) + HDFS-6570. add api that enables checking if a user has certain permissions on + a file. (Jitendra Pandey via cnauroth) + + HDFS-6441. Add ability to exclude/include specific datanodes while + balancing. (Benoy Antony and Yu Li via Arpit Agarwal) + + HDFS-6685. Balancer should preserve storage type of replicas. (szetszwo) + + HDFS-6798. Add test case for incorrect data node condition during + balancing. (Benoy Antony via Arpit Agarwal) + + HDFS-6796. Improve the argument check during balancer command line parsing. + (Benoy Antony via szetszwo) + + HDFS-6794. Update BlockManager methods to use DatanodeStorageInfo + where possible (Arpit Agarwal) + + HDFS-6802. Some tests in TestDFSClientFailover are missing @Test + annotation. (Akira Ajisaka via wang) + + HDFS-6788. Improve synchronization in BPOfferService with read write lock. + (Yongjun Zhang via wang) + + HDFS-6787. Remove duplicate code in FSDirectory#unprotectedConcat. (Yi Liu via umamahesh) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) @@ -394,6 +419,27 @@ Release 2.6.0 - UNRELEASED HDFS-6749. FSNamesystem methods should call resolvePath. (Charles Lamb via cnauroth) + HDFS-4629. Using com.sun.org.apache.xml.internal.serialize.* in + XmlEditsVisitor.java is JVM vendor specific. Breaks IBM JAVA. + (Amir Sanjar via stevel) + + HDFS-3482. hdfs balancer throws ArrayIndexOutOfBoundsException + if option is specified without values. ( Madhukara Phatak via umamahesh) + + HDFS-6797. DataNode logs wrong layoutversion during upgrade. (Benoy Antony + via Arpit Agarwal) + + HDFS-6810. StorageReport array is initialized with wrong size in + DatanodeDescriptor#getStorageReports. (szetszwo via Arpit Agarwal) + + HDFS-5723. Append failed FINALIZED replica should not be accepted as valid + when that block is underconstruction (vinayakumarb) + + HDFS-5185. DN fails to startup if one of the data dir is full. (vinayakumarb) + + HDFS-6451. NFS should not return NFS3ERR_IO for AccessControlException + (Abhiraj Butala via brandonli) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -949,6 +995,9 @@ Release 2.5.0 - UNRELEASED HDFS-6717. JIRA HDFS-5804 breaks default nfs-gateway behavior for unsecured config (brandonli) + HDFS-6768. Fix a few unit tests that use hard-coded port numbers. (Arpit + Agarwal) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index f0404ad79b0..e910695f172 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -176,6 +176,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> netty compile + + xerces + xercesImpl + compile + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java index e779cb51d01..111630c5f59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CryptoCodec; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.hdfs.CorruptFileBlockIterator; @@ -456,6 +457,11 @@ public class Hdfs extends AbstractFileSystem { dfs.removeXAttr(getUriPath(path), name); } + @Override + public void access(Path path, final FsAction mode) throws IOException { + dfs.checkAccess(getUriPath(path), mode); + } + /** * Renew an existing delegation token. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index d3532607c84..f27342053c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -132,6 +132,7 @@ import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.net.Peer; @@ -2951,6 +2952,17 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, } } + public void checkAccess(String src, FsAction mode) throws IOException { + checkOpen(); + try { + namenode.checkAccess(src, mode); + } catch (RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + FileNotFoundException.class, + UnresolvedPathException.class); + } + } + @Override // RemotePeerFactory public Peer newConnectedPeer(InetSocketAddress addr, Token blockToken, DatanodeID datanodeId) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index d884e5f7c88..a63059b7566 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -381,8 +381,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTP_DEFAULT_PORT; public static final String DFS_DATANODE_MAX_RECEIVER_THREADS_KEY = "dfs.datanode.max.transfer.threads"; public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096; - public static final String DFS_DATANODE_NUMBLOCKS_KEY = "dfs.datanode.numblocks"; - public static final int DFS_DATANODE_NUMBLOCKS_DEFAULT = 64; public static final String DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours"; public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0; public static final String DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed"; @@ -668,4 +666,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY = "dfs.datanode.slow.io.warning.threshold.ms"; public static final long DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 300; + + public static final String DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS_KEY = + "dfs.datanode.block.id.layout.upgrade.threads"; + public static final int DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS = 12; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 52721962531..ee38d7ee62c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -59,6 +59,7 @@ import org.apache.hadoop.fs.VolumeId; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; @@ -1913,4 +1914,23 @@ public class DistributedFileSystem extends FileSystem { } }.resolve(this, absF); } + + @Override + public void access(Path path, final FsAction mode) throws IOException { + final Path absF = fixRelativePart(path); + new FileSystemLinkResolver() { + @Override + public Void doCall(final Path p) throws IOException { + dfs.checkAccess(getPathName(p), mode); + return null; + } + + @Override + public Void next(final FileSystem fs, final Path p) + throws IOException { + fs.access(p, mode); + return null; + } + }.resolve(this, absF); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java index 408f678d650..3d8133c7ce5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hdfs; +import java.util.Arrays; +import java.util.List; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -32,4 +35,11 @@ public enum StorageType { SSD; public static final StorageType DEFAULT = DISK; + public static final StorageType[] EMPTY_ARRAY = {}; + + private static final StorageType[] VALUES = values(); + + public static List asList() { + return Arrays.asList(VALUES); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java index 680d73b7f94..b35365aa7a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java @@ -50,6 +50,9 @@ public class Block implements Writable, Comparable { public static final Pattern metaFilePattern = Pattern .compile(BLOCK_FILE_PREFIX + "(-??\\d++)_(\\d++)\\" + METADATA_EXTENSION + "$"); + public static final Pattern metaOrBlockFilePattern = Pattern + .compile(BLOCK_FILE_PREFIX + "(-??\\d++)(_(\\d++)\\" + METADATA_EXTENSION + + ")?$"); public static boolean isBlockFilename(File f) { String name = f.getName(); @@ -65,6 +68,11 @@ public class Block implements Writable, Comparable { return metaFilePattern.matcher(name).matches(); } + public static File metaToBlockFile(File metaFile) { + return new File(metaFile.getParent(), metaFile.getName().substring( + 0, metaFile.getName().lastIndexOf('_'))); + } + /** * Get generation stamp from the name of the metafile name */ @@ -75,10 +83,10 @@ public class Block implements Writable, Comparable { } /** - * Get the blockId from the name of the metafile name + * Get the blockId from the name of the meta or block file */ - public static long getBlockId(String metaFile) { - Matcher m = metaFilePattern.matcher(metaFile); + public static long getBlockId(String metaOrBlockFile) { + Matcher m = metaOrBlockFilePattern.matcher(metaOrBlockFile); return m.matches() ? Long.parseLong(m.group(1)) : 0; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index a571ac6e389..21e1c07767b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; @@ -1346,4 +1347,22 @@ public interface ClientProtocol { */ @AtMostOnce public void removeXAttr(String src, XAttr xAttr) throws IOException; + + /** + * Checks if the user can access a path. The mode specifies which access + * checks to perform. If the requested permissions are granted, then the + * method returns normally. If access is denied, then the method throws an + * {@link AccessControlException}. + * In general, applications should avoid using this method, due to the risk of + * time-of-check/time-of-use race conditions. The permissions on a file may + * change immediately after the access call returns. + * + * @param path Path to check + * @param mode type of access to check + * @throws AccessControlException if access is denied + * @throws FileNotFoundException if the path does not exist + * @throws IOException see specific implementation + */ + @Idempotent + public void checkAccess(String path, FsAction mode) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index acb0294eee2..29449933973 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -175,6 +175,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto; @@ -325,6 +327,9 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements private static final RemoveXAttrResponseProto VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance(); + private static final CheckAccessResponseProto + VOID_CHECKACCESS_RESPONSE = CheckAccessResponseProto.getDefaultInstance(); + /** * Constructor * @@ -1375,4 +1380,15 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } return VOID_REMOVEXATTR_RESPONSE; } + + @Override + public CheckAccessResponseProto checkAccess(RpcController controller, + CheckAccessRequestProto req) throws ServiceException { + try { + server.checkAccess(req.getPath(), PBHelper.convert(req.getMode())); + } catch (IOException e) { + throw new ServiceException(e); + } + return VOID_CHECKACCESS_RESPONSE; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 24f722bca3b..662682448b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; @@ -147,6 +148,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSaf import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto; @@ -1400,4 +1402,15 @@ public class ClientNamenodeProtocolTranslatorPB implements throw ProtobufHelper.getRemoteException(e); } } + + @Override + public void checkAccess(String path, FsAction mode) throws IOException { + CheckAccessRequestProto req = CheckAccessRequestProto.newBuilder() + .setPath(path).setMode(PBHelper.convert(mode)).build(); + try { + rpcProxy.checkAccess(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 230f5455198..4dcac39a1a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -357,15 +357,19 @@ public class PBHelper { return BlockWithLocationsProto.newBuilder() .setBlock(convert(blk.getBlock())) .addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids())) - .addAllStorageUuids(Arrays.asList(blk.getStorageIDs())).build(); + .addAllStorageUuids(Arrays.asList(blk.getStorageIDs())) + .addAllStorageTypes(convertStorageTypes(blk.getStorageTypes())) + .build(); } public static BlockWithLocations convert(BlockWithLocationsProto b) { final List datanodeUuids = b.getDatanodeUuidsList(); final List storageUuids = b.getStorageUuidsList(); + final List storageTypes = b.getStorageTypesList(); return new BlockWithLocations(convert(b.getBlock()), datanodeUuids.toArray(new String[datanodeUuids.size()]), - storageUuids.toArray(new String[storageUuids.size()])); + storageUuids.toArray(new String[storageUuids.size()]), + convertStorageTypes(storageTypes, storageUuids.size())); } public static BlocksWithLocationsProto convert(BlocksWithLocations blks) { @@ -2122,11 +2126,11 @@ public class PBHelper { return castEnum(v, XATTR_NAMESPACE_VALUES); } - private static FsActionProto convert(FsAction v) { + public static FsActionProto convert(FsAction v) { return FsActionProto.valueOf(v != null ? v.ordinal() : 0); } - private static FsAction convert(FsActionProto v) { + public static FsAction convert(FsActionProto v) { return castEnum(v, FSACTION_VALUES); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 5dbdd643cdf..e5ff544ee75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -38,6 +38,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Date; +import java.util.EnumMap; import java.util.Formatter; import java.util.HashMap; import java.util.HashSet; @@ -45,6 +46,7 @@ import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -78,16 +80,21 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.HostsFileReader; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import com.google.common.base.Preconditions; + /**

The balancer is a tool that balances disk space usage on an HDFS cluster * when some datanodes become full or when new empty nodes join the cluster. * The tool is deployed as an application program that can be run by the @@ -188,7 +195,9 @@ import org.apache.hadoop.util.ToolRunner; @InterfaceAudience.Private public class Balancer { static final Log LOG = LogFactory.getLog(Balancer.class); - final private static long MAX_BLOCKS_SIZE_TO_FETCH = 2*1024*1024*1024L; //2GB + final private static long GB = 1L << 30; //1GB + final private static long MAX_SIZE_TO_MOVE = 10*GB; + final private static long MAX_BLOCKS_SIZE_TO_FETCH = 2*GB; private static long WIN_WIDTH = 5400*1000L; // 1.5 hour /** The maximum number of concurrent blocks moves for @@ -203,34 +212,38 @@ public class Balancer { + "\n\t[-policy ]\tthe balancing policy: " + BalancingPolicy.Node.INSTANCE.getName() + " or " + BalancingPolicy.Pool.INSTANCE.getName() - + "\n\t[-threshold ]\tPercentage of disk capacity"; + + "\n\t[-threshold ]\tPercentage of disk capacity" + + "\n\t[-exclude [-f | comma-sperated list of hosts]]" + + "\tExcludes the specified datanodes." + + "\n\t[-include [-f | comma-sperated list of hosts]]" + + "\tIncludes only the specified datanodes."; private final NameNodeConnector nnc; private final BalancingPolicy policy; private final SaslDataTransferClient saslClient; private final double threshold; + // set of data nodes to be excluded from balancing operations. + Set nodesToBeExcluded; + //Restrict balancing to the following nodes. + Set nodesToBeIncluded; // all data node lists - private final Collection overUtilizedDatanodes - = new LinkedList(); - private final Collection aboveAvgUtilizedDatanodes - = new LinkedList(); - private final Collection belowAvgUtilizedDatanodes - = new LinkedList(); - private final Collection underUtilizedDatanodes - = new LinkedList(); + private final Collection overUtilized = new LinkedList(); + private final Collection aboveAvgUtilized = new LinkedList(); + private final Collection belowAvgUtilized + = new LinkedList(); + private final Collection underUtilized + = new LinkedList(); - private final Collection sources - = new HashSet(); - private final Collection targets - = new HashSet(); + private final Collection sources = new HashSet(); + private final Collection targets + = new HashSet(); private final Map globalBlockList = new HashMap(); private final MovedBlocks movedBlocks = new MovedBlocks(); - /** Map (datanodeUuid -> BalancerDatanodes) */ - private final Map datanodeMap - = new HashMap(); + /** Map (datanodeUuid,storageType -> StorageGroup) */ + private final StorageGroupMap storageGroupMap = new StorageGroupMap(); private NetworkTopology cluster; @@ -238,12 +251,39 @@ public class Balancer { private final ExecutorService dispatcherExecutor; private final int maxConcurrentMovesPerNode; + + private static class StorageGroupMap { + private static String toKey(String datanodeUuid, StorageType storageType) { + return datanodeUuid + ":" + storageType; + } + + private final Map map + = new HashMap(); + + BalancerDatanode.StorageGroup get(String datanodeUuid, StorageType storageType) { + return map.get(toKey(datanodeUuid, storageType)); + } + + void put(BalancerDatanode.StorageGroup g) { + final String key = toKey(g.getDatanode().getDatanodeUuid(), g.storageType); + final BalancerDatanode.StorageGroup existing = map.put(key, g); + Preconditions.checkState(existing == null); + } + + int size() { + return map.size(); + } + + void clear() { + map.clear(); + } + } /* This class keeps track of a scheduled block move */ private class PendingBlockMove { private BalancerBlock block; private Source source; private BalancerDatanode proxySource; - private BalancerDatanode target; + private BalancerDatanode.StorageGroup target; /** constructor */ private PendingBlockMove() { @@ -254,7 +294,7 @@ public class Balancer { final Block b = block.getBlock(); return b + " with size=" + b.getNumBytes() + " from " + source.getDisplayName() + " to " + target.getDisplayName() - + " through " + proxySource.getDisplayName(); + + " through " + proxySource.datanode; } /* choose a block & a proxy source for this pendingMove @@ -306,20 +346,20 @@ public class Balancer { final DatanodeInfo targetDN = target.getDatanode(); // if node group is supported, first try add nodes in the same node group if (cluster.isNodeGroupAware()) { - for (BalancerDatanode loc : block.getLocations()) { + for (BalancerDatanode.StorageGroup loc : block.getLocations()) { if (cluster.isOnSameNodeGroup(loc.getDatanode(), targetDN) && addTo(loc)) { return true; } } } // check if there is replica which is on the same rack with the target - for (BalancerDatanode loc : block.getLocations()) { + for (BalancerDatanode.StorageGroup loc : block.getLocations()) { if (cluster.isOnSameRack(loc.getDatanode(), targetDN) && addTo(loc)) { return true; } } // find out a non-busy replica - for (BalancerDatanode loc : block.getLocations()) { + for (BalancerDatanode.StorageGroup loc : block.getLocations()) { if (addTo(loc)) { return true; } @@ -327,8 +367,9 @@ public class Balancer { return false; } - // add a BalancerDatanode as proxy source for specific block movement - private boolean addTo(BalancerDatanode bdn) { + /** add to a proxy source for specific block movement */ + private boolean addTo(BalancerDatanode.StorageGroup g) { + final BalancerDatanode bdn = g.getBalancerDatanode(); if (bdn.addPendingBlock(this)) { proxySource = bdn; return true; @@ -344,7 +385,7 @@ public class Balancer { DataInputStream in = null; try { sock.connect( - NetUtils.createSocketAddr(target.datanode.getXferAddr()), + NetUtils.createSocketAddr(target.getDatanode().getXferAddr()), HdfsServerConstants.READ_TIMEOUT); /* Unfortunately we don't have a good way to know if the Datanode is * taking a really long time to move a block, OR something has @@ -361,7 +402,7 @@ public class Balancer { ExtendedBlock eb = new ExtendedBlock(nnc.blockpoolID, block.getBlock()); Token accessToken = nnc.getAccessToken(eb); IOStreamPair saslStreams = saslClient.socketSend(sock, unbufOut, - unbufIn, nnc, accessToken, target.datanode); + unbufIn, nnc, accessToken, target.getDatanode()); unbufOut = saslStreams.out; unbufIn = saslStreams.in; out = new DataOutputStream(new BufferedOutputStream(unbufOut, @@ -381,14 +422,14 @@ public class Balancer { * gets out of sync with work going on in datanode. */ proxySource.activateDelay(DELAY_AFTER_ERROR); - target.activateDelay(DELAY_AFTER_ERROR); + target.getBalancerDatanode().activateDelay(DELAY_AFTER_ERROR); } finally { IOUtils.closeStream(out); IOUtils.closeStream(in); IOUtils.closeSocket(sock); proxySource.removePendingBlock(this); - target.removePendingBlock(this); + target.getBalancerDatanode().removePendingBlock(this); synchronized (this ) { reset(); @@ -404,7 +445,7 @@ public class Balancer { StorageType storageType, Token accessToken) throws IOException { new Sender(out).replaceBlock(eb, storageType, accessToken, - source.getStorageID(), proxySource.getDatanode()); + source.getDatanode().getDatanodeUuid(), proxySource.datanode); } /* Receive a block copy response from the input stream */ @@ -444,8 +485,9 @@ public class Balancer { /* A class for keeping track of blocks in the Balancer */ static private class BalancerBlock { private final Block block; // the block - private final List locations - = new ArrayList(3); // its locations + /** The locations of the replicas of the block. */ + private final List locations + = new ArrayList(3); /* Constructor */ private BalancerBlock(Block block) { @@ -458,20 +500,19 @@ public class Balancer { } /* add a location */ - private synchronized void addLocation(BalancerDatanode datanode) { - if (!locations.contains(datanode)) { - locations.add(datanode); + private synchronized void addLocation(BalancerDatanode.StorageGroup g) { + if (!locations.contains(g)) { + locations.add(g); } } - /* Return if the block is located on datanode */ - private synchronized boolean isLocatedOnDatanode( - BalancerDatanode datanode) { - return locations.contains(datanode); + /** @return if the block is located on the given storage group. */ + private synchronized boolean isLocatedOn(BalancerDatanode.StorageGroup g) { + return locations.contains(g); } /* Return its locations */ - private synchronized List getLocations() { + private synchronized List getLocations() { return locations; } @@ -488,37 +529,84 @@ public class Balancer { /* The class represents a desired move of bytes between two nodes * and the target. - * An object of this class is stored in a source node. + * An object of this class is stored in a source. */ - static private class NodeTask { - private final BalancerDatanode datanode; //target node + static private class Task { + private final BalancerDatanode.StorageGroup target; private long size; //bytes scheduled to move /* constructor */ - private NodeTask(BalancerDatanode datanode, long size) { - this.datanode = datanode; + private Task(BalancerDatanode.StorageGroup target, long size) { + this.target = target; this.size = size; } - - /* Get the node */ - private BalancerDatanode getDatanode() { - return datanode; - } - - /* Get the number of bytes that need to be moved */ - private long getSize() { - return size; - } } /* A class that keeps track of a datanode in Balancer */ private static class BalancerDatanode { - final private static long MAX_SIZE_TO_MOVE = 10*1024*1024*1024L; //10GB + + /** A group of storages in a datanode with the same storage type. */ + private class StorageGroup { + final StorageType storageType; + final double utilization; + final long maxSize2Move; + private long scheduledSize = 0L; + + private StorageGroup(StorageType storageType, double utilization, + long maxSize2Move) { + this.storageType = storageType; + this.utilization = utilization; + this.maxSize2Move = maxSize2Move; + } + + BalancerDatanode getBalancerDatanode() { + return BalancerDatanode.this; + } + + DatanodeInfo getDatanode() { + return BalancerDatanode.this.datanode; + } + + /** Decide if still need to move more bytes */ + protected synchronized boolean hasSpaceForScheduling() { + return availableSizeToMove() > 0L; + } + + /** @return the total number of bytes that need to be moved */ + synchronized long availableSizeToMove() { + return maxSize2Move - scheduledSize; + } + + /** increment scheduled size */ + synchronized void incScheduledSize(long size) { + scheduledSize += size; + } + + /** @return scheduled size */ + synchronized long getScheduledSize() { + return scheduledSize; + } + + /** Reset scheduled size to zero. */ + synchronized void resetScheduledSize() { + scheduledSize = 0L; + } + + /** @return the name for display */ + String getDisplayName() { + return datanode + ":" + storageType; + } + + @Override + public String toString() { + return "" + utilization; + } + } + final DatanodeInfo datanode; - final double utilization; - final long maxSize2Move; - private long scheduledSize = 0L; + final EnumMap storageMap + = new EnumMap(StorageType.class); protected long delayUntil = 0L; // blocks being moved but not confirmed yet private final List pendingBlocks; @@ -526,78 +614,38 @@ public class Balancer { @Override public String toString() { - return getClass().getSimpleName() + "[" + datanode - + ", utilization=" + utilization + "]"; + return getClass().getSimpleName() + ":" + datanode + ":" + storageMap; } /* Constructor * Depending on avgutil & threshold, calculate maximum bytes to move */ - private BalancerDatanode(DatanodeInfo node, BalancingPolicy policy, double threshold, - int maxConcurrentMoves) { - datanode = node; - utilization = policy.getUtilization(node); - final double avgUtil = policy.getAvgUtilization(); - long maxSizeToMove; - - if (utilization >= avgUtil+threshold - || utilization <= avgUtil-threshold) { - maxSizeToMove = (long)(threshold*datanode.getCapacity()/100); - } else { - maxSizeToMove = - (long)(Math.abs(avgUtil-utilization)*datanode.getCapacity()/100); - } - if (utilization < avgUtil ) { - maxSizeToMove = Math.min(datanode.getRemaining(), maxSizeToMove); - } - this.maxSize2Move = Math.min(MAX_SIZE_TO_MOVE, maxSizeToMove); + private BalancerDatanode(DatanodeStorageReport report, + double threshold, int maxConcurrentMoves) { + this.datanode = report.getDatanodeInfo(); this.maxConcurrentMoves = maxConcurrentMoves; this.pendingBlocks = new ArrayList(maxConcurrentMoves); } - /** Get the datanode */ - protected DatanodeInfo getDatanode() { - return datanode; - } - - /** Get the name of the datanode */ - protected String getDisplayName() { - return datanode.toString(); - } - - /* Get the storage id of the datanode */ - protected String getStorageID() { - return datanode.getDatanodeUuid(); - } - - /** Decide if still need to move more bytes */ - protected synchronized boolean hasSpaceForScheduling() { - return scheduledSize nodeTasks = new ArrayList(2); + private final List tasks = new ArrayList(2); private long blocksToReceive = 0L; /* source blocks point to balancerBlocks in the global list because * we want to keep one copy of a block in balancer and be aware that @@ -663,17 +711,17 @@ public class Balancer { = new ArrayList(); /* constructor */ - private Source(DatanodeInfo node, BalancingPolicy policy, double threshold, - int maxConcurrentMoves) { - super(node, policy, threshold, maxConcurrentMoves); + private Source(StorageType storageType, double utilization, + long maxSize2Move, BalancerDatanode dn) { + dn.super(storageType, utilization, maxSize2Move); } - /** Add a node task */ - private void addNodeTask(NodeTask task) { - assert (task.datanode != this) : - "Source and target are the same " + datanode; - incScheduledSize(task.getSize()); - nodeTasks.add(task); + /** Add a task */ + private void addTask(Task task) { + Preconditions.checkState(task.target != this, + "Source and target are the same storage group " + getDisplayName()); + incScheduledSize(task.size); + tasks.add(task); } /* Return an iterator to this source's blocks */ @@ -686,8 +734,10 @@ public class Balancer { * Return the total size of the received blocks in the number of bytes. */ private long getBlockList() throws IOException { - BlockWithLocations[] newBlocks = nnc.namenode.getBlocks(datanode, - Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive)).getBlocks(); + final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive); + final BlockWithLocations[] newBlocks = nnc.namenode.getBlocks( + getDatanode(), size).getBlocks(); + long bytesReceived = 0; for (BlockWithLocations blk : newBlocks) { bytesReceived += blk.getBlock().getNumBytes(); @@ -703,10 +753,13 @@ public class Balancer { synchronized (block) { // update locations - for (String datanodeUuid : blk.getDatanodeUuids()) { - final BalancerDatanode d = datanodeMap.get(datanodeUuid); - if (d != null) { // not an unknown datanode - block.addLocation(d); + final String[] datanodeUuids = blk.getDatanodeUuids(); + final StorageType[] storageTypes = blk.getStorageTypes(); + for (int i = 0; i < datanodeUuids.length; i++) { + final BalancerDatanode.StorageGroup g = storageGroupMap.get( + datanodeUuids[i], storageTypes[i]); + if (g != null) { // not unknown + block.addLocation(g); } } } @@ -721,8 +774,8 @@ public class Balancer { /* Decide if the given block is a good candidate to move or not */ private boolean isGoodBlockCandidate(BalancerBlock block) { - for (NodeTask nodeTask : nodeTasks) { - if (Balancer.this.isGoodBlockCandidate(this, nodeTask.datanode, block)) { + for (Task t : tasks) { + if (Balancer.this.isGoodBlockCandidate(this, t.target, block)) { return true; } } @@ -737,20 +790,20 @@ public class Balancer { * The block should be dispatched immediately after this method is returned. */ private PendingBlockMove chooseNextBlockToMove() { - for ( Iterator tasks=nodeTasks.iterator(); tasks.hasNext(); ) { - NodeTask task = tasks.next(); - BalancerDatanode target = task.getDatanode(); + for (Iterator i = tasks.iterator(); i.hasNext();) { + final Task task = i.next(); + final BalancerDatanode target = task.target.getBalancerDatanode(); PendingBlockMove pendingBlock = new PendingBlockMove(); if (target.addPendingBlock(pendingBlock)) { // target is not busy, so do a tentative block allocation pendingBlock.source = this; - pendingBlock.target = target; + pendingBlock.target = task.target; if ( pendingBlock.chooseBlockAndProxy() ) { long blockSize = pendingBlock.block.getNumBytes(); - decScheduledSize(blockSize); + incScheduledSize(-blockSize); task.size -= blockSize; if (task.size == 0) { - tasks.remove(); + i.remove(); } return pendingBlock; } else { @@ -824,7 +877,7 @@ public class Balancer { // in case no blocks can be moved for source node's task, // jump out of while-loop after 5 iterations. if (noPendingBlockIteration >= MAX_NO_PENDING_BLOCK_ITERATIONS) { - setScheduledSize(0); + resetScheduledSize(); } } @@ -869,6 +922,8 @@ public class Balancer { Balancer(NameNodeConnector theblockpool, Parameters p, Configuration conf) { this.threshold = p.threshold; this.policy = p.policy; + this.nodesToBeExcluded = p.nodesToBeExcluded; + this.nodesToBeIncluded = p.nodesToBeIncluded; this.nnc = theblockpool; cluster = NetworkTopology.getInstance(conf); @@ -889,95 +944,154 @@ public class Balancer { IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT)); } - /* Given a data node set, build a network topology and decide - * over-utilized datanodes, above average utilized datanodes, - * below average utilized datanodes, and underutilized datanodes. - * The input data node set is shuffled before the datanodes - * are put into the over-utilized datanodes, above average utilized - * datanodes, below average utilized datanodes, and - * underutilized datanodes lists. This will add some randomness - * to the node matching later on. - * + + private static long getCapacity(DatanodeStorageReport report, StorageType t) { + long capacity = 0L; + for(StorageReport r : report.getStorageReports()) { + if (r.getStorage().getStorageType() == t) { + capacity += r.getCapacity(); + } + } + return capacity; + } + + private static long getRemaining(DatanodeStorageReport report, StorageType t) { + long remaining = 0L; + for(StorageReport r : report.getStorageReports()) { + if (r.getStorage().getStorageType() == t) { + remaining += r.getRemaining(); + } + } + return remaining; + } + + private boolean shouldIgnore(DatanodeInfo dn) { + //ignore decommissioned nodes + final boolean decommissioned = dn.isDecommissioned(); + //ignore decommissioning nodes + final boolean decommissioning = dn.isDecommissionInProgress(); + // ignore nodes in exclude list + final boolean excluded = Util.shouldBeExcluded(nodesToBeExcluded, dn); + // ignore nodes not in the include list (if include list is not empty) + final boolean notIncluded = !Util.shouldBeIncluded(nodesToBeIncluded, dn); + + if (decommissioned || decommissioning || excluded || notIncluded) { + if (LOG.isTraceEnabled()) { + LOG.trace("Excluding datanode " + dn + ": " + decommissioned + ", " + + decommissioning + ", " + excluded + ", " + notIncluded); + } + return true; + } + return false; + } + + /** + * Given a datanode storage set, build a network topology and decide + * over-utilized storages, above average utilized storages, + * below average utilized storages, and underutilized storages. + * The input datanode storage set is shuffled in order to randomize + * to the storage matching later on. + * * @return the total number of bytes that are * needed to move to make the cluster balanced. - * @param datanodes a set of datanodes + * @param reports a set of datanode storage reports */ - private long initNodes(DatanodeInfo[] datanodes) { + private long init(DatanodeStorageReport[] reports) { // compute average utilization - for (DatanodeInfo datanode : datanodes) { - if (datanode.isDecommissioned() || datanode.isDecommissionInProgress()) { - continue; // ignore decommissioning or decommissioned nodes + for (DatanodeStorageReport r : reports) { + if (shouldIgnore(r.getDatanodeInfo())) { + continue; } - policy.accumulateSpaces(datanode); + policy.accumulateSpaces(r); } policy.initAvgUtilization(); - /*create network topology and all data node lists: - * overloaded, above-average, below-average, and underloaded - * we alternates the accessing of the given datanodes array either by - * an increasing order or a decreasing order. - */ + // create network topology and classify utilization collections: + // over-utilized, above-average, below-average and under-utilized. long overLoadedBytes = 0L, underLoadedBytes = 0L; - for (DatanodeInfo datanode : DFSUtil.shuffle(datanodes)) { - if (datanode.isDecommissioned() || datanode.isDecommissionInProgress()) { + for(DatanodeStorageReport r : DFSUtil.shuffle(reports)) { + final DatanodeInfo datanode = r.getDatanodeInfo(); + if (shouldIgnore(datanode)) { continue; // ignore decommissioning or decommissioned nodes } cluster.add(datanode); - BalancerDatanode datanodeS; - final double avg = policy.getAvgUtilization(); - if (policy.getUtilization(datanode) >= avg) { - datanodeS = new Source(datanode, policy, threshold, maxConcurrentMovesPerNode); - if (isAboveAvgUtilized(datanodeS)) { - this.aboveAvgUtilizedDatanodes.add((Source)datanodeS); - } else { - assert(isOverUtilized(datanodeS)) : - datanodeS.getDisplayName()+ "is not an overUtilized node"; - this.overUtilizedDatanodes.add((Source)datanodeS); - overLoadedBytes += (long)((datanodeS.utilization-avg - -threshold)*datanodeS.datanode.getCapacity()/100.0); + + final BalancerDatanode dn = new BalancerDatanode(r, underLoadedBytes, + maxConcurrentMovesPerNode); + for(StorageType t : StorageType.asList()) { + final Double utilization = policy.getUtilization(r, t); + if (utilization == null) { // datanode does not have such storage type + continue; } - } else { - datanodeS = new BalancerDatanode(datanode, policy, threshold, - maxConcurrentMovesPerNode); - if ( isBelowOrEqualAvgUtilized(datanodeS)) { - this.belowAvgUtilizedDatanodes.add(datanodeS); + + final long capacity = getCapacity(r, t); + final double utilizationDiff = utilization - policy.getAvgUtilization(t); + final double thresholdDiff = Math.abs(utilizationDiff) - threshold; + final long maxSize2Move = computeMaxSize2Move(capacity, + getRemaining(r, t), utilizationDiff, threshold); + + final BalancerDatanode.StorageGroup g; + if (utilizationDiff > 0) { + final Source s = dn.addSource(t, utilization, maxSize2Move, this); + if (thresholdDiff <= 0) { // within threshold + aboveAvgUtilized.add(s); + } else { + overLoadedBytes += precentage2bytes(thresholdDiff, capacity); + overUtilized.add(s); + } + g = s; } else { - assert isUnderUtilized(datanodeS) : "isUnderUtilized(" - + datanodeS.getDisplayName() + ")=" + isUnderUtilized(datanodeS) - + ", utilization=" + datanodeS.utilization; - this.underUtilizedDatanodes.add(datanodeS); - underLoadedBytes += (long)((avg-threshold- - datanodeS.utilization)*datanodeS.datanode.getCapacity()/100.0); + g = dn.addStorageGroup(t, utilization, maxSize2Move); + if (thresholdDiff <= 0) { // within threshold + belowAvgUtilized.add(g); + } else { + underLoadedBytes += precentage2bytes(thresholdDiff, capacity); + underUtilized.add(g); + } } + storageGroupMap.put(g); } - datanodeMap.put(datanode.getDatanodeUuid(), datanodeS); } - //logging - logNodes(); + logUtilizationCollections(); - assert (this.datanodeMap.size() == - overUtilizedDatanodes.size()+underUtilizedDatanodes.size()+ - aboveAvgUtilizedDatanodes.size()+belowAvgUtilizedDatanodes.size()) - : "Mismatched number of datanodes"; + Preconditions.checkState(storageGroupMap.size() == overUtilized.size() + + underUtilized.size() + aboveAvgUtilized.size() + belowAvgUtilized.size(), + "Mismatched number of storage groups"); // return number of bytes to be moved in order to make the cluster balanced return Math.max(overLoadedBytes, underLoadedBytes); } - /* log the over utilized & under utilized nodes */ - private void logNodes() { - logNodes("over-utilized", overUtilizedDatanodes); - if (LOG.isTraceEnabled()) { - logNodes("above-average", aboveAvgUtilizedDatanodes); - logNodes("below-average", belowAvgUtilizedDatanodes); + private static long computeMaxSize2Move(final long capacity, final long remaining, + final double utilizationDiff, final double threshold) { + final double diff = Math.min(threshold, Math.abs(utilizationDiff)); + long maxSizeToMove = precentage2bytes(diff, capacity); + if (utilizationDiff < 0) { + maxSizeToMove = Math.min(remaining, maxSizeToMove); } - logNodes("underutilized", underUtilizedDatanodes); + return Math.min(MAX_SIZE_TO_MOVE, maxSizeToMove); } - private static void logNodes( - String name, Collection nodes) { - LOG.info(nodes.size() + " " + name + ": " + nodes); + private static long precentage2bytes(double precentage, long capacity) { + Preconditions.checkArgument(precentage >= 0, + "precentage = " + precentage + " < 0"); + return (long)(precentage * capacity / 100.0); + } + + /* log the over utilized & under utilized nodes */ + private void logUtilizationCollections() { + logUtilizationCollection("over-utilized", overUtilized); + if (LOG.isTraceEnabled()) { + logUtilizationCollection("above-average", aboveAvgUtilized); + logUtilizationCollection("below-average", belowAvgUtilized); + } + logUtilizationCollection("underutilized", underUtilized); + } + + private static + void logUtilizationCollection(String name, Collection items) { + LOG.info(items.size() + " " + name + ": " + items); } /** A matcher interface for matching nodes. */ @@ -1013,26 +1127,24 @@ public class Balancer { /** * Decide all pairs and * the number of bytes to move from a source to a target - * Maximum bytes to be moved per node is - * Min(1 Band worth of bytes, MAX_SIZE_TO_MOVE). - * Return total number of bytes to move in this iteration + * Maximum bytes to be moved per storage group is + * min(1 Band worth of bytes, MAX_SIZE_TO_MOVE). + * @return total number of bytes to move in this iteration */ - private long chooseNodes() { + private long chooseStorageGroups() { // First, match nodes on the same node group if cluster is node group aware if (cluster.isNodeGroupAware()) { - chooseNodes(SAME_NODE_GROUP); + chooseStorageGroups(SAME_NODE_GROUP); } // Then, match nodes on the same rack - chooseNodes(SAME_RACK); + chooseStorageGroups(SAME_RACK); // At last, match all remaining nodes - chooseNodes(ANY_OTHER); + chooseStorageGroups(ANY_OTHER); - assert (datanodeMap.size() >= sources.size()+targets.size()) - : "Mismatched number of datanodes (" + - datanodeMap.size() + " total, " + - sources.size() + " sources, " + - targets.size() + " targets)"; + Preconditions.checkState(storageGroupMap.size() >= sources.size() + targets.size(), + "Mismatched number of datanodes (" + storageGroupMap.size() + " < " + + sources.size() + " sources, " + targets.size() + " targets)"); long bytesToMove = 0L; for (Source src : sources) { @@ -1042,25 +1154,25 @@ public class Balancer { } /** Decide all pairs according to the matcher. */ - private void chooseNodes(final Matcher matcher) { + private void chooseStorageGroups(final Matcher matcher) { /* first step: match each overUtilized datanode (source) to * one or more underUtilized datanodes (targets). */ - chooseDatanodes(overUtilizedDatanodes, underUtilizedDatanodes, matcher); + chooseStorageGroups(overUtilized, underUtilized, matcher); /* match each remaining overutilized datanode (source) to * below average utilized datanodes (targets). * Note only overutilized datanodes that haven't had that max bytes to move * satisfied in step 1 are selected */ - chooseDatanodes(overUtilizedDatanodes, belowAvgUtilizedDatanodes, matcher); + chooseStorageGroups(overUtilized, belowAvgUtilized, matcher); /* match each remaining underutilized datanode (target) to * above average utilized datanodes (source). * Note only underutilized datanodes that have not had that max bytes to * move satisfied in step 1 are selected. */ - chooseDatanodes(underUtilizedDatanodes, aboveAvgUtilizedDatanodes, matcher); + chooseStorageGroups(underUtilized, aboveAvgUtilized, matcher); } /** @@ -1068,13 +1180,14 @@ public class Balancer { * datanodes or the candidates are source nodes with (utilization > Avg), and * the others are target nodes with (utilization < Avg). */ - private void - chooseDatanodes(Collection datanodes, Collection candidates, + private + void chooseStorageGroups(Collection groups, Collection candidates, Matcher matcher) { - for (Iterator i = datanodes.iterator(); i.hasNext();) { - final D datanode = i.next(); - for(; chooseForOneDatanode(datanode, candidates, matcher); ); - if (!datanode.hasSpaceForScheduling()) { + for(final Iterator i = groups.iterator(); i.hasNext();) { + final G g = i.next(); + for(; choose4One(g, candidates, matcher); ); + if (!g.hasSpaceForScheduling()) { i.remove(); } } @@ -1084,18 +1197,19 @@ public class Balancer { * For the given datanode, choose a candidate and then schedule it. * @return true if a candidate is chosen; false if no candidates is chosen. */ - private boolean chooseForOneDatanode( - BalancerDatanode dn, Collection candidates, Matcher matcher) { + private + boolean choose4One(BalancerDatanode.StorageGroup g, + Collection candidates, Matcher matcher) { final Iterator i = candidates.iterator(); - final C chosen = chooseCandidate(dn, i, matcher); - + final C chosen = chooseCandidate(g, i, matcher); + if (chosen == null) { return false; } - if (dn instanceof Source) { - matchSourceWithTargetToMove((Source)dn, chosen); + if (g instanceof Source) { + matchSourceWithTargetToMove((Source)g, chosen); } else { - matchSourceWithTargetToMove((Source)chosen, dn); + matchSourceWithTargetToMove((Source)chosen, g); } if (!chosen.hasSpaceForScheduling()) { i.remove(); @@ -1103,27 +1217,28 @@ public class Balancer { return true; } - private void matchSourceWithTargetToMove( - Source source, BalancerDatanode target) { + private void matchSourceWithTargetToMove(Source source, + BalancerDatanode.StorageGroup target) { long size = Math.min(source.availableSizeToMove(), target.availableSizeToMove()); - NodeTask nodeTask = new NodeTask(target, size); - source.addNodeTask(nodeTask); - target.incScheduledSize(nodeTask.getSize()); + final Task task = new Task(target, size); + source.addTask(task); + target.incScheduledSize(task.size); sources.add(source); targets.add(target); LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from " - +source.datanode.getName() + " to " + target.datanode.getName()); + + source.getDisplayName() + " to " + target.getDisplayName()); } /** Choose a candidate for the given datanode. */ - private - C chooseCandidate(D dn, Iterator candidates, Matcher matcher) { - if (dn.hasSpaceForScheduling()) { + private + C chooseCandidate(G g, Iterator candidates, Matcher matcher) { + if (g.hasSpaceForScheduling()) { for(; candidates.hasNext(); ) { final C c = candidates.next(); if (!c.hasSpaceForScheduling()) { candidates.remove(); - } else if (matcher.match(cluster, dn.getDatanode(), c.getDatanode())) { + } else if (matcher.match(cluster, g.getDatanode(), c.getDatanode())) { return c; } } @@ -1177,9 +1292,10 @@ public class Balancer { boolean shouldWait; do { shouldWait = false; - for (BalancerDatanode target : targets) { - if (!target.isPendingQEmpty()) { + for (BalancerDatanode.StorageGroup target : targets) { + if (!target.getBalancerDatanode().isPendingQEmpty()) { shouldWait = true; + break; } } if (shouldWait) { @@ -1248,12 +1364,15 @@ public class Balancer { * 3. doing the move does not reduce the number of racks that the block has */ private boolean isGoodBlockCandidate(Source source, - BalancerDatanode target, BalancerBlock block) { + BalancerDatanode.StorageGroup target, BalancerBlock block) { + if (source.storageType != target.storageType) { + return false; + } // check if the block is moved or not if (movedBlocks.contains(block)) { - return false; + return false; } - if (block.isLocatedOnDatanode(target)) { + if (block.isLocatedOn(target)) { return false; } if (cluster.isNodeGroupAware() && @@ -1268,8 +1387,8 @@ public class Balancer { } else { boolean notOnSameRack = true; synchronized (block) { - for (BalancerDatanode loc : block.locations) { - if (cluster.isOnSameRack(loc.datanode, target.datanode)) { + for (BalancerDatanode.StorageGroup loc : block.locations) { + if (cluster.isOnSameRack(loc.getDatanode(), target.getDatanode())) { notOnSameRack = false; break; } @@ -1280,9 +1399,9 @@ public class Balancer { goodBlock = true; } else { // good if source is on the same rack as on of the replicas - for (BalancerDatanode loc : block.locations) { + for (BalancerDatanode.StorageGroup loc : block.locations) { if (loc != source && - cluster.isOnSameRack(loc.datanode, source.datanode)) { + cluster.isOnSameRack(loc.getDatanode(), source.getDatanode())) { goodBlock = true; break; } @@ -1303,25 +1422,26 @@ public class Balancer { * @return true if there are any replica (other than source) on the same node * group with target */ - private boolean isOnSameNodeGroupWithReplicas(BalancerDatanode target, + private boolean isOnSameNodeGroupWithReplicas(BalancerDatanode.StorageGroup target, BalancerBlock block, Source source) { - for (BalancerDatanode loc : block.locations) { + final DatanodeInfo targetDn = target.getDatanode(); + for (BalancerDatanode.StorageGroup loc : block.locations) { if (loc != source && - cluster.isOnSameNodeGroup(loc.getDatanode(), target.getDatanode())) { - return true; - } + cluster.isOnSameNodeGroup(loc.getDatanode(), targetDn)) { + return true; } + } return false; } /* reset all fields in a balancer preparing for the next iteration */ private void resetData(Configuration conf) { this.cluster = NetworkTopology.getInstance(conf); - this.overUtilizedDatanodes.clear(); - this.aboveAvgUtilizedDatanodes.clear(); - this.belowAvgUtilizedDatanodes.clear(); - this.underUtilizedDatanodes.clear(); - this.datanodeMap.clear(); + this.overUtilized.clear(); + this.aboveAvgUtilized.clear(); + this.belowAvgUtilized.clear(); + this.underUtilized.clear(); + this.storageGroupMap.clear(); this.sources.clear(); this.targets.clear(); this.policy.reset(); @@ -1341,32 +1461,6 @@ public class Balancer { } } } - - /* Return true if the given datanode is overUtilized */ - private boolean isOverUtilized(BalancerDatanode datanode) { - return datanode.utilization > (policy.getAvgUtilization()+threshold); - } - - /* Return true if the given datanode is above or equal to average utilized - * but not overUtilized */ - private boolean isAboveAvgUtilized(BalancerDatanode datanode) { - final double avg = policy.getAvgUtilization(); - return (datanode.utilization <= (avg+threshold)) - && (datanode.utilization >= avg); - } - - /* Return true if the given datanode is underUtilized */ - private boolean isUnderUtilized(BalancerDatanode datanode) { - return datanode.utilization < (policy.getAvgUtilization()-threshold); - } - - /* Return true if the given datanode is below average utilized - * but not underUtilized */ - private boolean isBelowOrEqualAvgUtilized(BalancerDatanode datanode) { - final double avg = policy.getAvgUtilization(); - return (datanode.utilization >= (avg-threshold)) - && (datanode.utilization <= avg); - } // Exit status enum ReturnStatus { @@ -1394,7 +1488,8 @@ public class Balancer { /* get all live datanodes of a cluster and their disk usage * decide the number of bytes need to be moved */ - final long bytesLeftToMove = initNodes(nnc.client.getDatanodeReport(DatanodeReportType.LIVE)); + final long bytesLeftToMove = init( + nnc.client.getDatanodeStorageReport(DatanodeReportType.LIVE)); if (bytesLeftToMove == 0) { System.out.println("The cluster is balanced. Exiting..."); return ReturnStatus.SUCCESS; @@ -1408,7 +1503,7 @@ public class Balancer { * in this iteration. Maximum bytes to be moved per node is * Min(1 Band worth of bytes, MAX_SIZE_TO_MOVE). */ - final long bytesToMove = chooseNodes(); + final long bytesToMove = chooseStorageGroups(); if (bytesToMove == 0) { System.out.println("No block can be moved. Exiting..."); return ReturnStatus.NO_MOVE_BLOCK; @@ -1526,21 +1621,101 @@ public class Balancer { } static class Parameters { - static final Parameters DEFALUT = new Parameters( - BalancingPolicy.Node.INSTANCE, 10.0); + static final Parameters DEFAULT = new Parameters( + BalancingPolicy.Node.INSTANCE, 10.0, + Collections. emptySet(), Collections. emptySet()); final BalancingPolicy policy; final double threshold; + // exclude the nodes in this set from balancing operations + Set nodesToBeExcluded; + //include only these nodes in balancing operations + Set nodesToBeIncluded; - Parameters(BalancingPolicy policy, double threshold) { + Parameters(BalancingPolicy policy, double threshold, + Set nodesToBeExcluded, Set nodesToBeIncluded) { this.policy = policy; this.threshold = threshold; + this.nodesToBeExcluded = nodesToBeExcluded; + this.nodesToBeIncluded = nodesToBeIncluded; } @Override public String toString() { return Balancer.class.getSimpleName() + "." + getClass().getSimpleName() - + "[" + policy + ", threshold=" + threshold + "]"; + + "[" + policy + ", threshold=" + threshold + + ", number of nodes to be excluded = "+ nodesToBeExcluded.size() + + ", number of nodes to be included = "+ nodesToBeIncluded.size() +"]"; + } + } + + static class Util { + + /** + * @param datanode + * @return returns true if data node is part of the excludedNodes. + */ + static boolean shouldBeExcluded(Set excludedNodes, DatanodeInfo datanode) { + return isIn(excludedNodes, datanode); + } + + /** + * @param datanode + * @return returns true if includedNodes is empty or data node is part of the includedNodes. + */ + static boolean shouldBeIncluded(Set includedNodes, DatanodeInfo datanode) { + return (includedNodes.isEmpty() || + isIn(includedNodes, datanode)); + } + /** + * Match is checked using host name , ip address with and without port number. + * @param datanodeSet + * @param datanode + * @return true if the datanode's transfer address matches the set of nodes. + */ + private static boolean isIn(Set datanodeSet, DatanodeInfo datanode) { + return isIn(datanodeSet, datanode.getPeerHostName(), datanode.getXferPort()) || + isIn(datanodeSet, datanode.getIpAddr(), datanode.getXferPort()) || + isIn(datanodeSet, datanode.getHostName(), datanode.getXferPort()); + } + + /** + * returns true if nodes contains host or host:port + * @param nodes + * @param host + * @param port + * @return + */ + private static boolean isIn(Set nodes, String host, int port) { + if (host == null) { + return false; + } + return (nodes.contains(host) || nodes.contains(host +":"+ port)); + } + + /** + * parse a comma separated string to obtain set of host names + * @param string + * @return + */ + static Set parseHostList(String string) { + String[] addrs = StringUtils.getTrimmedStrings(string); + return new HashSet(Arrays.asList(addrs)); + } + + /** + * read set of host names from a file + * @param fileName + * @return + */ + static Set getHostListFromFile(String fileName) { + Set nodes = new HashSet (); + try { + HostsFileReader.readFileToSet("nodes", fileName, nodes); + return StringUtils.getTrimmedStrings(nodes); + } catch (IOException e) { + throw new IllegalArgumentException("Unable to open file: " + fileName); + } } } @@ -1578,15 +1753,17 @@ public class Balancer { /** parse command line arguments */ static Parameters parse(String[] args) { - BalancingPolicy policy = Parameters.DEFALUT.policy; - double threshold = Parameters.DEFALUT.threshold; + BalancingPolicy policy = Parameters.DEFAULT.policy; + double threshold = Parameters.DEFAULT.threshold; + Set nodesTobeExcluded = Parameters.DEFAULT.nodesToBeExcluded; + Set nodesTobeIncluded = Parameters.DEFAULT.nodesToBeIncluded; if (args != null) { try { for(int i = 0; i < args.length; i++) { - checkArgument(args.length >= 2, "args = " + Arrays.toString(args)); if ("-threshold".equalsIgnoreCase(args[i])) { - i++; + checkArgument(++i < args.length, + "Threshold value is missing: args = " + Arrays.toString(args)); try { threshold = Double.parseDouble(args[i]); if (threshold < 1 || threshold > 100) { @@ -1601,25 +1778,52 @@ public class Balancer { throw e; } } else if ("-policy".equalsIgnoreCase(args[i])) { - i++; + checkArgument(++i < args.length, + "Policy value is missing: args = " + Arrays.toString(args)); try { policy = BalancingPolicy.parse(args[i]); } catch(IllegalArgumentException e) { System.err.println("Illegal policy name: " + args[i]); throw e; } + } else if ("-exclude".equalsIgnoreCase(args[i])) { + checkArgument(++i < args.length, + "List of nodes to exclude | -f is missing: args = " + + Arrays.toString(args)); + if ("-f".equalsIgnoreCase(args[i])) { + checkArgument(++i < args.length, + "File containing nodes to exclude is not specified: args = " + + Arrays.toString(args)); + nodesTobeExcluded = Util.getHostListFromFile(args[i]); + } else { + nodesTobeExcluded = Util.parseHostList(args[i]); + } + } else if ("-include".equalsIgnoreCase(args[i])) { + checkArgument(++i < args.length, + "List of nodes to include | -f is missing: args = " + + Arrays.toString(args)); + if ("-f".equalsIgnoreCase(args[i])) { + checkArgument(++i < args.length, + "File containing nodes to include is not specified: args = " + + Arrays.toString(args)); + nodesTobeIncluded = Util.getHostListFromFile(args[i]); + } else { + nodesTobeIncluded = Util.parseHostList(args[i]); + } } else { throw new IllegalArgumentException("args = " + Arrays.toString(args)); } } + checkArgument(nodesTobeExcluded.isEmpty() || nodesTobeIncluded.isEmpty(), + "-exclude and -include options cannot be specified together."); } catch(RuntimeException e) { printUsage(System.err); throw e; } } - return new Parameters(policy, threshold); + return new Parameters(policy, threshold, nodesTobeExcluded, nodesTobeIncluded); } private static void printUsage(PrintStream out) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java index 3297a250a4e..646abd4ef48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java @@ -18,7 +18,11 @@ package org.apache.hadoop.hdfs.server.balancer; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.StorageType; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.hadoop.hdfs.util.EnumCounters; +import org.apache.hadoop.hdfs.util.EnumDoubles; /** * Balancing policy. @@ -28,31 +32,43 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; */ @InterfaceAudience.Private abstract class BalancingPolicy { - long totalCapacity; - long totalUsedSpace; - private double avgUtilization; + final EnumCounters totalCapacities + = new EnumCounters(StorageType.class); + final EnumCounters totalUsedSpaces + = new EnumCounters(StorageType.class); + final EnumDoubles avgUtilizations + = new EnumDoubles(StorageType.class); void reset() { - totalCapacity = 0L; - totalUsedSpace = 0L; - avgUtilization = 0.0; + totalCapacities.reset(); + totalUsedSpaces.reset(); + avgUtilizations.reset(); } /** Get the policy name. */ abstract String getName(); /** Accumulate used space and capacity. */ - abstract void accumulateSpaces(DatanodeInfo d); + abstract void accumulateSpaces(DatanodeStorageReport r); void initAvgUtilization() { - this.avgUtilization = totalUsedSpace*100.0/totalCapacity; - } - double getAvgUtilization() { - return avgUtilization; + for(StorageType t : StorageType.asList()) { + final long capacity = totalCapacities.get(t); + if (capacity > 0L) { + final double avg = totalUsedSpaces.get(t)*100.0/capacity; + avgUtilizations.set(t, avg); + } + } } - /** Return the utilization of a datanode */ - abstract double getUtilization(DatanodeInfo d); + double getAvgUtilization(StorageType t) { + return avgUtilizations.get(t); + } + + /** @return the utilization of a particular storage type of a datanode; + * or return null if the datanode does not have such storage type. + */ + abstract Double getUtilization(DatanodeStorageReport r, StorageType t); @Override public String toString() { @@ -84,14 +100,25 @@ abstract class BalancingPolicy { } @Override - void accumulateSpaces(DatanodeInfo d) { - totalCapacity += d.getCapacity(); - totalUsedSpace += d.getDfsUsed(); + void accumulateSpaces(DatanodeStorageReport r) { + for(StorageReport s : r.getStorageReports()) { + final StorageType t = s.getStorage().getStorageType(); + totalCapacities.add(t, s.getCapacity()); + totalUsedSpaces.add(t, s.getDfsUsed()); + } } @Override - double getUtilization(DatanodeInfo d) { - return d.getDfsUsed()*100.0/d.getCapacity(); + Double getUtilization(DatanodeStorageReport r, final StorageType t) { + long capacity = 0L; + long dfsUsed = 0L; + for(StorageReport s : r.getStorageReports()) { + if (s.getStorage().getStorageType() == t) { + capacity += s.getCapacity(); + dfsUsed += s.getDfsUsed(); + } + } + return capacity == 0L? null: dfsUsed*100.0/capacity; } } @@ -108,14 +135,25 @@ abstract class BalancingPolicy { } @Override - void accumulateSpaces(DatanodeInfo d) { - totalCapacity += d.getCapacity(); - totalUsedSpace += d.getBlockPoolUsed(); + void accumulateSpaces(DatanodeStorageReport r) { + for(StorageReport s : r.getStorageReports()) { + final StorageType t = s.getStorage().getStorageType(); + totalCapacities.add(t, s.getCapacity()); + totalUsedSpaces.add(t, s.getBlockPoolUsed()); + } } @Override - double getUtilization(DatanodeInfo d) { - return d.getBlockPoolUsed()*100.0/d.getCapacity(); + Double getUtilization(DatanodeStorageReport r, final StorageType t) { + long capacity = 0L; + long blockPoolUsed = 0L; + for(StorageReport s : r.getStorageReports()) { + if (s.getStorage().getStorageType() == t) { + capacity += s.getCapacity(); + blockPoolUsed += s.getBlockPoolUsed(); + } + } + return capacity == 0L? null: blockPoolUsed*100.0/capacity; } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 3c4d6a6e890..3a39b01040d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1082,6 +1082,7 @@ public class BlockManager { * Mark the block belonging to datanode as corrupt * @param blk Block to be marked as corrupt * @param dn Datanode which holds the corrupt replica + * @param storageID if known, null otherwise. * @param reason a textual reason why the block should be marked corrupt, * for logging purposes */ @@ -1098,19 +1099,29 @@ public class BlockManager { + blk + " not found"); return; } - markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, - blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED), - dn, storageID); - } - private void markBlockAsCorrupt(BlockToMarkCorrupt b, - DatanodeInfo dn, String storageID) throws IOException { DatanodeDescriptor node = getDatanodeManager().getDatanode(dn); if (node == null) { - throw new IOException("Cannot mark " + b + throw new IOException("Cannot mark " + blk + " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid() + ") does not exist"); } + + markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, + blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED), + storageID == null ? null : node.getStorageInfo(storageID), + node); + } + + /** + * + * @param b + * @param storageInfo storage that contains the block, if known. null otherwise. + * @throws IOException + */ + private void markBlockAsCorrupt(BlockToMarkCorrupt b, + DatanodeStorageInfo storageInfo, + DatanodeDescriptor node) throws IOException { BlockCollection bc = b.corrupted.getBlockCollection(); if (bc == null) { @@ -1121,7 +1132,9 @@ public class BlockManager { } // Add replica to the data-node if it is not already there - node.addBlock(storageID, b.stored); + if (storageInfo != null) { + storageInfo.addBlock(b.stored); + } // Add this replica to corruptReplicas Map corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason, @@ -1460,7 +1473,7 @@ public class BlockManager { * @throws IOException * if the number of targets < minimum replication. * @see BlockPlacementPolicy#chooseTarget(String, int, Node, - * List, boolean, Set, long) + * List, boolean, Set, long, StorageType) */ public DatanodeStorageInfo[] chooseTarget(final String src, final int numOfReplicas, final DatanodeDescriptor client, @@ -1697,7 +1710,7 @@ public class BlockManager { * @throws IOException */ public boolean processReport(final DatanodeID nodeID, - final DatanodeStorage storage, final String poolId, + final DatanodeStorage storage, final BlockListAsLongs newReport) throws IOException { namesystem.writeLock(); final long startTime = Time.now(); //after acquiring write lock @@ -1729,9 +1742,9 @@ public class BlockManager { if (storageInfo.numBlocks() == 0) { // The first block report can be processed a lot more efficiently than // ordinary block reports. This shortens restart times. - processFirstBlockReport(node, storage.getStorageID(), newReport); + processFirstBlockReport(storageInfo, newReport); } else { - processReport(node, storage, newReport); + processReport(storageInfo, newReport); } // Now that we have an up-to-date block report, we know that any @@ -1793,9 +1806,8 @@ public class BlockManager { } } - private void processReport(final DatanodeDescriptor node, - final DatanodeStorage storage, - final BlockListAsLongs report) throws IOException { + private void processReport(final DatanodeStorageInfo storageInfo, + final BlockListAsLongs report) throws IOException { // Normal case: // Modify the (block-->datanode) map, according to the difference // between the old and new block report. @@ -1805,19 +1817,20 @@ public class BlockManager { Collection toInvalidate = new LinkedList(); Collection toCorrupt = new LinkedList(); Collection toUC = new LinkedList(); - reportDiff(node, storage, report, + reportDiff(storageInfo, report, toAdd, toRemove, toInvalidate, toCorrupt, toUC); - + + DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); // Process the blocks on each queue for (StatefulBlockInfo b : toUC) { - addStoredBlockUnderConstruction(b, node, storage.getStorageID()); + addStoredBlockUnderConstruction(b, storageInfo); } for (Block b : toRemove) { removeStoredBlock(b, node); } int numBlocksLogged = 0; for (BlockInfo b : toAdd) { - addStoredBlock(b, node, storage.getStorageID(), null, numBlocksLogged < maxNumBlocksToLog); + addStoredBlock(b, storageInfo, null, numBlocksLogged < maxNumBlocksToLog); numBlocksLogged++; } if (numBlocksLogged > maxNumBlocksToLog) { @@ -1831,7 +1844,7 @@ public class BlockManager { addToInvalidates(b, node); } for (BlockToMarkCorrupt b : toCorrupt) { - markBlockAsCorrupt(b, node, storage.getStorageID()); + markBlockAsCorrupt(b, storageInfo, node); } } @@ -1842,16 +1855,16 @@ public class BlockManager { * a toRemove list (since there won't be any). It also silently discards * any invalid blocks, thereby deferring their processing until * the next block report. - * @param node - DatanodeDescriptor of the node that sent the report + * @param storageInfo - DatanodeStorageInfo that sent the report * @param report - the initial block report, to be processed * @throws IOException */ - private void processFirstBlockReport(final DatanodeDescriptor node, - final String storageID, + private void processFirstBlockReport( + final DatanodeStorageInfo storageInfo, final BlockListAsLongs report) throws IOException { if (report == null) return; assert (namesystem.hasWriteLock()); - assert (node.getStorageInfo(storageID).numBlocks() == 0); + assert (storageInfo.numBlocks() == 0); BlockReportIterator itBR = report.getBlockReportIterator(); while(itBR.hasNext()) { @@ -1860,7 +1873,7 @@ public class BlockManager { if (shouldPostponeBlocksFromFuture && namesystem.isGenStampInFuture(iblk)) { - queueReportedBlock(node, storageID, iblk, reportedState, + queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_FUTURE_GENSTAMP); continue; } @@ -1872,15 +1885,16 @@ public class BlockManager { // If block is corrupt, mark it and continue to next block. BlockUCState ucState = storedBlock.getBlockUCState(); BlockToMarkCorrupt c = checkReplicaCorrupt( - iblk, reportedState, storedBlock, ucState, node); + iblk, reportedState, storedBlock, ucState, + storageInfo.getDatanodeDescriptor()); if (c != null) { if (shouldPostponeBlocksFromFuture) { // In the Standby, we may receive a block report for a file that we // just have an out-of-date gen-stamp or state for, for example. - queueReportedBlock(node, storageID, iblk, reportedState, + queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_CORRUPT_STATE); } else { - markBlockAsCorrupt(c, node, storageID); + markBlockAsCorrupt(c, storageInfo, storageInfo.getDatanodeDescriptor()); } continue; } @@ -1888,7 +1902,7 @@ public class BlockManager { // If block is under construction, add this replica to its list if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { ((BlockInfoUnderConstruction)storedBlock).addReplicaIfNotPresent( - node.getStorageInfo(storageID), iblk, reportedState); + storageInfo, iblk, reportedState); // OpenFileBlocks only inside snapshots also will be added to safemode // threshold. So we need to update such blocks to safemode // refer HDFS-5283 @@ -1901,12 +1915,12 @@ public class BlockManager { } //add replica if appropriate if (reportedState == ReplicaState.FINALIZED) { - addStoredBlockImmediate(storedBlock, node, storageID); + addStoredBlockImmediate(storedBlock, storageInfo); } } } - private void reportDiff(DatanodeDescriptor dn, DatanodeStorage storage, + private void reportDiff(DatanodeStorageInfo storageInfo, BlockListAsLongs newReport, Collection toAdd, // add to DatanodeDescriptor Collection toRemove, // remove from DatanodeDescriptor @@ -1914,8 +1928,6 @@ public class BlockManager { Collection toCorrupt, // add to corrupt replicas list Collection toUC) { // add to under-construction list - final DatanodeStorageInfo storageInfo = dn.getStorageInfo(storage.getStorageID()); - // place a delimiter in the list which separates blocks // that have been reported from those that have not BlockInfo delimiter = new BlockInfo(new Block(), 1); @@ -1932,7 +1944,7 @@ public class BlockManager { while(itBR.hasNext()) { Block iblk = itBR.next(); ReplicaState iState = itBR.getCurrentReplicaState(); - BlockInfo storedBlock = processReportedBlock(dn, storage.getStorageID(), + BlockInfo storedBlock = processReportedBlock(storageInfo, iblk, iState, toAdd, toInvalidate, toCorrupt, toUC); // move block to the head of the list @@ -1969,7 +1981,7 @@ public class BlockManager { * BlockInfoUnderConstruction's list of replicas. * * - * @param dn descriptor for the datanode that made the report + * @param storageInfo DatanodeStorageInfo that sent the report. * @param block reported block replica * @param reportedState reported replica state * @param toAdd add to DatanodeDescriptor @@ -1981,14 +1993,16 @@ public class BlockManager { * @return the up-to-date stored block, if it should be kept. * Otherwise, null. */ - private BlockInfo processReportedBlock(final DatanodeDescriptor dn, - final String storageID, + private BlockInfo processReportedBlock( + final DatanodeStorageInfo storageInfo, final Block block, final ReplicaState reportedState, final Collection toAdd, final Collection toInvalidate, final Collection toCorrupt, final Collection toUC) { + DatanodeDescriptor dn = storageInfo.getDatanodeDescriptor(); + if(LOG.isDebugEnabled()) { LOG.debug("Reported block " + block + " on " + dn + " size " + block.getNumBytes() @@ -1997,7 +2011,7 @@ public class BlockManager { if (shouldPostponeBlocksFromFuture && namesystem.isGenStampInFuture(block)) { - queueReportedBlock(dn, storageID, block, reportedState, + queueReportedBlock(storageInfo, block, reportedState, QUEUE_REASON_FUTURE_GENSTAMP); return null; } @@ -2037,7 +2051,7 @@ public class BlockManager { // TODO: Pretty confident this should be s/storedBlock/block below, // since we should be postponing the info of the reported block, not // the stored block. See HDFS-6289 for more context. - queueReportedBlock(dn, storageID, storedBlock, reportedState, + queueReportedBlock(storageInfo, storedBlock, reportedState, QUEUE_REASON_CORRUPT_STATE); } else { toCorrupt.add(c); @@ -2066,17 +2080,17 @@ public class BlockManager { * standby node. @see PendingDataNodeMessages. * @param reason a textual reason to report in the debug logs */ - private void queueReportedBlock(DatanodeDescriptor dn, String storageID, Block block, + private void queueReportedBlock(DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState, String reason) { assert shouldPostponeBlocksFromFuture; if (LOG.isDebugEnabled()) { LOG.debug("Queueing reported block " + block + " in state " + reportedState + - " from datanode " + dn + " for later processing " + - "because " + reason + "."); + " from datanode " + storageInfo.getDatanodeDescriptor() + + " for later processing because " + reason + "."); } - pendingDNMessages.enqueueReportedBlock(dn, storageID, block, reportedState); + pendingDNMessages.enqueueReportedBlock(storageInfo, block, reportedState); } /** @@ -2099,7 +2113,7 @@ public class BlockManager { if (LOG.isDebugEnabled()) { LOG.debug("Processing previouly queued message " + rbi); } - processAndHandleReportedBlock(rbi.getNode(), rbi.getStorageID(), + processAndHandleReportedBlock(rbi.getStorageInfo(), rbi.getBlock(), rbi.getReportedState(), null); } } @@ -2156,6 +2170,16 @@ public class BlockManager { } else { return null; // not corrupt } + case UNDER_CONSTRUCTION: + if (storedBlock.getGenerationStamp() > reported.getGenerationStamp()) { + final long reportedGS = reported.getGenerationStamp(); + return new BlockToMarkCorrupt(storedBlock, reportedGS, "block is " + + ucState + " and reported state " + reportedState + + ", But reported genstamp " + reportedGS + + " does not match genstamp in block map " + + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); + } + return null; default: return null; } @@ -2219,19 +2243,20 @@ public class BlockManager { } void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock, - DatanodeDescriptor node, String storageID) throws IOException { + DatanodeStorageInfo storageInfo) throws IOException { BlockInfoUnderConstruction block = ucBlock.storedBlock; - block.addReplicaIfNotPresent(node.getStorageInfo(storageID), - ucBlock.reportedBlock, ucBlock.reportedState); + block.addReplicaIfNotPresent( + storageInfo, ucBlock.reportedBlock, ucBlock.reportedState); - if (ucBlock.reportedState == ReplicaState.FINALIZED && block.findDatanode(node) < 0) { - addStoredBlock(block, node, storageID, null, true); + if (ucBlock.reportedState == ReplicaState.FINALIZED && + block.findDatanode(storageInfo.getDatanodeDescriptor()) < 0) { + addStoredBlock(block, storageInfo, null, true); } } /** * Faster version of - * {@link #addStoredBlock(BlockInfo, DatanodeDescriptor, String, DatanodeDescriptor, boolean)} + * {@link #addStoredBlock(BlockInfo, DatanodeStorageInfo, DatanodeDescriptor, boolean)} * , intended for use with initial block report at startup. If not in startup * safe mode, will call standard addStoredBlock(). Assumes this method is * called "immediately" so there is no need to refresh the storedBlock from @@ -2242,17 +2267,17 @@ public class BlockManager { * @throws IOException */ private void addStoredBlockImmediate(BlockInfo storedBlock, - DatanodeDescriptor node, String storageID) + DatanodeStorageInfo storageInfo) throws IOException { assert (storedBlock != null && namesystem.hasWriteLock()); if (!namesystem.isInStartupSafeMode() || namesystem.isPopulatingReplQueues()) { - addStoredBlock(storedBlock, node, storageID, null, false); + addStoredBlock(storedBlock, storageInfo, null, false); return; } // just add it - node.addBlock(storageID, storedBlock); + storageInfo.addBlock(storedBlock); // Now check for completion of blocks and safe block count int numCurrentReplica = countLiveNodes(storedBlock); @@ -2274,13 +2299,13 @@ public class BlockManager { * @return the block that is stored in blockMap. */ private Block addStoredBlock(final BlockInfo block, - DatanodeDescriptor node, - String storageID, + DatanodeStorageInfo storageInfo, DatanodeDescriptor delNodeHint, boolean logEveryBlock) throws IOException { assert block != null && namesystem.hasWriteLock(); BlockInfo storedBlock; + DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); if (block instanceof BlockInfoUnderConstruction) { //refresh our copy in case the block got completed in another thread storedBlock = blocksMap.getStoredBlock(block); @@ -2300,7 +2325,7 @@ public class BlockManager { assert bc != null : "Block must belong to a file"; // add block to the datanode - boolean added = node.addBlock(storageID, storedBlock); + boolean added = storageInfo.addBlock(storedBlock); int curReplicaDelta; if (added) { @@ -2829,12 +2854,15 @@ public class BlockManager { } else { final String[] datanodeUuids = new String[locations.size()]; final String[] storageIDs = new String[datanodeUuids.length]; + final StorageType[] storageTypes = new StorageType[datanodeUuids.length]; for(int i = 0; i < locations.size(); i++) { final DatanodeStorageInfo s = locations.get(i); datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid(); storageIDs[i] = s.getStorageID(); + storageTypes[i] = s.getStorageType(); } - results.add(new BlockWithLocations(block, datanodeUuids, storageIDs)); + results.add(new BlockWithLocations(block, datanodeUuids, storageIDs, + storageTypes)); return block.getNumBytes(); } } @@ -2843,8 +2871,9 @@ public class BlockManager { * The given node is reporting that it received a certain block. */ @VisibleForTesting - void addBlock(DatanodeDescriptor node, String storageID, Block block, String delHint) + void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint) throws IOException { + DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); // Decrement number of blocks scheduled to this datanode. // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with // RECEIVED_BLOCK), we currently also decrease the approximate number. @@ -2864,12 +2893,12 @@ public class BlockManager { // Modify the blocks->datanode map and node's map. // pendingReplications.decrement(block, node); - processAndHandleReportedBlock(node, storageID, block, ReplicaState.FINALIZED, + processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED, delHintNode); } - private void processAndHandleReportedBlock(DatanodeDescriptor node, - String storageID, Block block, + private void processAndHandleReportedBlock( + DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState, DatanodeDescriptor delHintNode) throws IOException { // blockReceived reports a finalized block @@ -2877,7 +2906,9 @@ public class BlockManager { Collection toInvalidate = new LinkedList(); Collection toCorrupt = new LinkedList(); Collection toUC = new LinkedList(); - processReportedBlock(node, storageID, block, reportedState, + final DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); + + processReportedBlock(storageInfo, block, reportedState, toAdd, toInvalidate, toCorrupt, toUC); // the block is only in one of the to-do lists // if it is in none then data-node already has it @@ -2885,11 +2916,11 @@ public class BlockManager { : "The block should be only in one of the lists."; for (StatefulBlockInfo b : toUC) { - addStoredBlockUnderConstruction(b, node, storageID); + addStoredBlockUnderConstruction(b, storageInfo); } long numBlocksLogged = 0; for (BlockInfo b : toAdd) { - addStoredBlock(b, node, storageID, delHintNode, numBlocksLogged < maxNumBlocksToLog); + addStoredBlock(b, storageInfo, delHintNode, numBlocksLogged < maxNumBlocksToLog); numBlocksLogged++; } if (numBlocksLogged > maxNumBlocksToLog) { @@ -2903,7 +2934,7 @@ public class BlockManager { addToInvalidates(b, node); } for (BlockToMarkCorrupt b : toCorrupt) { - markBlockAsCorrupt(b, node, storageID); + markBlockAsCorrupt(b, storageInfo, node); } } @@ -2930,13 +2961,15 @@ public class BlockManager { "Got incremental block report from unregistered or dead node"); } - if (node.getStorageInfo(srdb.getStorage().getStorageID()) == null) { + DatanodeStorageInfo storageInfo = + node.getStorageInfo(srdb.getStorage().getStorageID()); + if (storageInfo == null) { // The DataNode is reporting an unknown storage. Usually the NN learns // about new storages from heartbeats but during NN restart we may // receive a block report or incremental report before the heartbeat. // We must handle this for protocol compatibility. This issue was // uncovered by HDFS-6094. - node.updateStorage(srdb.getStorage()); + storageInfo = node.updateStorage(srdb.getStorage()); } for (ReceivedDeletedBlockInfo rdbi : srdb.getBlocks()) { @@ -2946,14 +2979,13 @@ public class BlockManager { deleted++; break; case RECEIVED_BLOCK: - addBlock(node, srdb.getStorage().getStorageID(), - rdbi.getBlock(), rdbi.getDelHints()); + addBlock(storageInfo, rdbi.getBlock(), rdbi.getDelHints()); received++; break; case RECEIVING_BLOCK: receiving++; - processAndHandleReportedBlock(node, srdb.getStorage().getStorageID(), - rdbi.getBlock(), ReplicaState.RBW, null); + processAndHandleReportedBlock(storageInfo, rdbi.getBlock(), + ReplicaState.RBW, null); break; default: String msg = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index fcc189d9f9b..d622905f0ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -260,8 +260,8 @@ public class DatanodeDescriptor extends DatanodeInfo { } public StorageReport[] getStorageReports() { - final StorageReport[] reports = new StorageReport[storageMap.size()]; final DatanodeStorageInfo[] infos = getStorageInfos(); + final StorageReport[] reports = new StorageReport[infos.length]; for(int i = 0; i < infos.length; i++) { reports[i] = infos[i].toStorageReport(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index fa4b0e533bd..791fc3157d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -207,7 +207,7 @@ public class DatanodeStorageInfo { return blockPoolUsed; } - boolean addBlock(BlockInfo b) { + public boolean addBlock(BlockInfo b) { if(!b.addStorage(this)) return false; // add to the head of the data-node list diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java index 0a1ba65f125..5f59f0267a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import com.google.common.collect.Lists; import com.google.common.collect.Maps; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; /** * In the Standby Node, we can receive messages about blocks @@ -41,14 +42,12 @@ class PendingDataNodeMessages { static class ReportedBlockInfo { private final Block block; - private final DatanodeDescriptor dn; - private final String storageID; + private final DatanodeStorageInfo storageInfo; private final ReplicaState reportedState; - ReportedBlockInfo(DatanodeDescriptor dn, String storageID, Block block, + ReportedBlockInfo(DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState) { - this.dn = dn; - this.storageID = storageID; + this.storageInfo = storageInfo; this.block = block; this.reportedState = reportedState; } @@ -57,21 +56,18 @@ class PendingDataNodeMessages { return block; } - DatanodeDescriptor getNode() { - return dn; - } - - String getStorageID() { - return storageID; - } - ReplicaState getReportedState() { return reportedState; } + + DatanodeStorageInfo getStorageInfo() { + return storageInfo; + } @Override public String toString() { - return "ReportedBlockInfo [block=" + block + ", dn=" + dn + return "ReportedBlockInfo [block=" + block + ", dn=" + + storageInfo.getDatanodeDescriptor() + ", reportedState=" + reportedState + "]"; } } @@ -87,7 +83,7 @@ class PendingDataNodeMessages { Queue oldQueue = entry.getValue(); while (!oldQueue.isEmpty()) { ReportedBlockInfo rbi = oldQueue.remove(); - if (!rbi.getNode().equals(dn)) { + if (!rbi.getStorageInfo().getDatanodeDescriptor().equals(dn)) { newQueue.add(rbi); } else { count--; @@ -97,11 +93,11 @@ class PendingDataNodeMessages { } } - void enqueueReportedBlock(DatanodeDescriptor dn, String storageID, Block block, + void enqueueReportedBlock(DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState) { block = new Block(block); getBlockQueue(block).add( - new ReportedBlockInfo(dn, storageID, block, reportedState)); + new ReportedBlockInfo(storageInfo, block, reportedState)); count++; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index 0a6549de8f9..39e842ccfd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -21,6 +21,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Sets; + import org.apache.commons.logging.Log; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; @@ -38,6 +39,8 @@ import java.util.ArrayList; import java.util.List; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantReadWriteLock; /** * One instance per block-pool/namespace on the DN, which handles the @@ -91,6 +94,28 @@ class BPOfferService { */ private long lastActiveClaimTxId = -1; + private final ReentrantReadWriteLock mReadWriteLock = + new ReentrantReadWriteLock(); + private final Lock mReadLock = mReadWriteLock.readLock(); + private final Lock mWriteLock = mReadWriteLock.writeLock(); + + // utility methods to acquire and release read lock and write lock + void readLock() { + mReadLock.lock(); + } + + void readUnlock() { + mReadLock.unlock(); + } + + void writeLock() { + mWriteLock.lock(); + } + + void writeUnlock() { + mWriteLock.unlock(); + } + BPOfferService(List nnAddrs, DataNode dn) { Preconditions.checkArgument(!nnAddrs.isEmpty(), "Must pass at least one NN."); @@ -135,14 +160,19 @@ class BPOfferService { } return false; } - - synchronized String getBlockPoolId() { - if (bpNSInfo != null) { - return bpNSInfo.getBlockPoolID(); - } else { - LOG.warn("Block pool ID needed, but service not yet registered with NN", - new Exception("trace")); - return null; + + String getBlockPoolId() { + readLock(); + try { + if (bpNSInfo != null) { + return bpNSInfo.getBlockPoolID(); + } else { + LOG.warn("Block pool ID needed, but service not yet registered with NN", + new Exception("trace")); + return null; + } + } finally { + readUnlock(); } } @@ -150,27 +180,37 @@ class BPOfferService { return getNamespaceInfo() != null; } - synchronized NamespaceInfo getNamespaceInfo() { - return bpNSInfo; + NamespaceInfo getNamespaceInfo() { + readLock(); + try { + return bpNSInfo; + } finally { + readUnlock(); + } } @Override - public synchronized String toString() { - if (bpNSInfo == null) { - // If we haven't yet connected to our NN, we don't yet know our - // own block pool ID. - // If _none_ of the block pools have connected yet, we don't even - // know the DatanodeID ID of this DN. - String datanodeUuid = dn.getDatanodeUuid(); + public String toString() { + readLock(); + try { + if (bpNSInfo == null) { + // If we haven't yet connected to our NN, we don't yet know our + // own block pool ID. + // If _none_ of the block pools have connected yet, we don't even + // know the DatanodeID ID of this DN. + String datanodeUuid = dn.getDatanodeUuid(); - if (datanodeUuid == null || datanodeUuid.isEmpty()) { - datanodeUuid = "unassigned"; + if (datanodeUuid == null || datanodeUuid.isEmpty()) { + datanodeUuid = "unassigned"; + } + return "Block pool (Datanode Uuid " + datanodeUuid + ")"; + } else { + return "Block pool " + getBlockPoolId() + + " (Datanode Uuid " + dn.getDatanodeUuid() + + ")"; } - return "Block pool (Datanode Uuid " + datanodeUuid + ")"; - } else { - return "Block pool " + getBlockPoolId() + - " (Datanode Uuid " + dn.getDatanodeUuid() + - ")"; + } finally { + readUnlock(); } } @@ -266,32 +306,37 @@ class BPOfferService { * verifies that this namespace matches (eg to prevent a misconfiguration * where a StandbyNode from a different cluster is specified) */ - synchronized void verifyAndSetNamespaceInfo(NamespaceInfo nsInfo) throws IOException { - if (this.bpNSInfo == null) { - this.bpNSInfo = nsInfo; - boolean success = false; + void verifyAndSetNamespaceInfo(NamespaceInfo nsInfo) throws IOException { + writeLock(); + try { + if (this.bpNSInfo == null) { + this.bpNSInfo = nsInfo; + boolean success = false; - // Now that we know the namespace ID, etc, we can pass this to the DN. - // The DN can now initialize its local storage if we are the - // first BP to handshake, etc. - try { - dn.initBlockPool(this); - success = true; - } finally { - if (!success) { - // The datanode failed to initialize the BP. We need to reset - // the namespace info so that other BPService actors still have - // a chance to set it, and re-initialize the datanode. - this.bpNSInfo = null; + // Now that we know the namespace ID, etc, we can pass this to the DN. + // The DN can now initialize its local storage if we are the + // first BP to handshake, etc. + try { + dn.initBlockPool(this); + success = true; + } finally { + if (!success) { + // The datanode failed to initialize the BP. We need to reset + // the namespace info so that other BPService actors still have + // a chance to set it, and re-initialize the datanode. + this.bpNSInfo = null; + } } + } else { + checkNSEquality(bpNSInfo.getBlockPoolID(), nsInfo.getBlockPoolID(), + "Blockpool ID"); + checkNSEquality(bpNSInfo.getNamespaceID(), nsInfo.getNamespaceID(), + "Namespace ID"); + checkNSEquality(bpNSInfo.getClusterID(), nsInfo.getClusterID(), + "Cluster ID"); } - } else { - checkNSEquality(bpNSInfo.getBlockPoolID(), nsInfo.getBlockPoolID(), - "Blockpool ID"); - checkNSEquality(bpNSInfo.getNamespaceID(), nsInfo.getNamespaceID(), - "Namespace ID"); - checkNSEquality(bpNSInfo.getClusterID(), nsInfo.getClusterID(), - "Cluster ID"); + } finally { + writeUnlock(); } } @@ -300,22 +345,27 @@ class BPOfferService { * NN, it calls this function to verify that the NN it connected to * is consistent with other NNs serving the block-pool. */ - synchronized void registrationSucceeded(BPServiceActor bpServiceActor, + void registrationSucceeded(BPServiceActor bpServiceActor, DatanodeRegistration reg) throws IOException { - if (bpRegistration != null) { - checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(), - reg.getStorageInfo().getNamespaceID(), "namespace ID"); - checkNSEquality(bpRegistration.getStorageInfo().getClusterID(), - reg.getStorageInfo().getClusterID(), "cluster ID"); - } else { - bpRegistration = reg; - } - - dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId()); - // Add the initial block token secret keys to the DN's secret manager. - if (dn.isBlockTokenEnabled) { - dn.blockPoolTokenSecretManager.addKeys(getBlockPoolId(), - reg.getExportedKeys()); + writeLock(); + try { + if (bpRegistration != null) { + checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(), + reg.getStorageInfo().getNamespaceID(), "namespace ID"); + checkNSEquality(bpRegistration.getStorageInfo().getClusterID(), + reg.getStorageInfo().getClusterID(), "cluster ID"); + } else { + bpRegistration = reg; + } + + dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId()); + // Add the initial block token secret keys to the DN's secret manager. + if (dn.isBlockTokenEnabled) { + dn.blockPoolTokenSecretManager.addKeys(getBlockPoolId(), + reg.getExportedKeys()); + } + } finally { + writeUnlock(); } } @@ -333,25 +383,35 @@ class BPOfferService { } } - synchronized DatanodeRegistration createRegistration() { - Preconditions.checkState(bpNSInfo != null, - "getRegistration() can only be called after initial handshake"); - return dn.createBPRegistration(bpNSInfo); + DatanodeRegistration createRegistration() { + writeLock(); + try { + Preconditions.checkState(bpNSInfo != null, + "getRegistration() can only be called after initial handshake"); + return dn.createBPRegistration(bpNSInfo); + } finally { + writeUnlock(); + } } /** * Called when an actor shuts down. If this is the last actor * to shut down, shuts down the whole blockpool in the DN. */ - synchronized void shutdownActor(BPServiceActor actor) { - if (bpServiceToActive == actor) { - bpServiceToActive = null; - } + void shutdownActor(BPServiceActor actor) { + writeLock(); + try { + if (bpServiceToActive == actor) { + bpServiceToActive = null; + } - bpServices.remove(actor); + bpServices.remove(actor); - if (bpServices.isEmpty()) { - dn.shutdownBlockPool(this); + if (bpServices.isEmpty()) { + dn.shutdownBlockPool(this); + } + } finally { + writeUnlock(); } } @@ -392,11 +452,16 @@ class BPOfferService { * @return a proxy to the active NN, or null if the BPOS has not * acknowledged any NN as active yet. */ - synchronized DatanodeProtocolClientSideTranslatorPB getActiveNN() { - if (bpServiceToActive != null) { - return bpServiceToActive.bpNamenode; - } else { - return null; + DatanodeProtocolClientSideTranslatorPB getActiveNN() { + readLock(); + try { + if (bpServiceToActive != null) { + return bpServiceToActive.bpNamenode; + } else { + return null; + } + } finally { + readUnlock(); } } @@ -424,45 +489,50 @@ class BPOfferService { * @param actor the actor which received the heartbeat * @param nnHaState the HA-related heartbeat contents */ - synchronized void updateActorStatesFromHeartbeat( + void updateActorStatesFromHeartbeat( BPServiceActor actor, NNHAStatusHeartbeat nnHaState) { - final long txid = nnHaState.getTxId(); - - final boolean nnClaimsActive = - nnHaState.getState() == HAServiceState.ACTIVE; - final boolean bposThinksActive = bpServiceToActive == actor; - final boolean isMoreRecentClaim = txid > lastActiveClaimTxId; - - if (nnClaimsActive && !bposThinksActive) { - LOG.info("Namenode " + actor + " trying to claim ACTIVE state with " + - "txid=" + txid); - if (!isMoreRecentClaim) { - // Split-brain scenario - an NN is trying to claim active - // state when a different NN has already claimed it with a higher - // txid. - LOG.warn("NN " + actor + " tried to claim ACTIVE state at txid=" + - txid + " but there was already a more recent claim at txid=" + - lastActiveClaimTxId); - return; - } else { - if (bpServiceToActive == null) { - LOG.info("Acknowledging ACTIVE Namenode " + actor); + writeLock(); + try { + final long txid = nnHaState.getTxId(); + + final boolean nnClaimsActive = + nnHaState.getState() == HAServiceState.ACTIVE; + final boolean bposThinksActive = bpServiceToActive == actor; + final boolean isMoreRecentClaim = txid > lastActiveClaimTxId; + + if (nnClaimsActive && !bposThinksActive) { + LOG.info("Namenode " + actor + " trying to claim ACTIVE state with " + + "txid=" + txid); + if (!isMoreRecentClaim) { + // Split-brain scenario - an NN is trying to claim active + // state when a different NN has already claimed it with a higher + // txid. + LOG.warn("NN " + actor + " tried to claim ACTIVE state at txid=" + + txid + " but there was already a more recent claim at txid=" + + lastActiveClaimTxId); + return; } else { - LOG.info("Namenode " + actor + " taking over ACTIVE state from " + - bpServiceToActive + " at higher txid=" + txid); + if (bpServiceToActive == null) { + LOG.info("Acknowledging ACTIVE Namenode " + actor); + } else { + LOG.info("Namenode " + actor + " taking over ACTIVE state from " + + bpServiceToActive + " at higher txid=" + txid); + } + bpServiceToActive = actor; } - bpServiceToActive = actor; + } else if (!nnClaimsActive && bposThinksActive) { + LOG.info("Namenode " + actor + " relinquishing ACTIVE state with " + + "txid=" + nnHaState.getTxId()); + bpServiceToActive = null; } - } else if (!nnClaimsActive && bposThinksActive) { - LOG.info("Namenode " + actor + " relinquishing ACTIVE state with " + - "txid=" + nnHaState.getTxId()); - bpServiceToActive = null; - } - - if (bpServiceToActive == actor) { - assert txid >= lastActiveClaimTxId; - lastActiveClaimTxId = txid; + + if (bpServiceToActive == actor) { + assert txid >= lastActiveClaimTxId; + lastActiveClaimTxId = txid; + } + } finally { + writeUnlock(); } } @@ -533,12 +603,15 @@ class BPOfferService { actor.reRegister(); return true; } - synchronized (this) { + writeLock(); + try { if (actor == bpServiceToActive) { return processCommandFromActive(cmd, actor); } else { return processCommandFromStandby(cmd, actor); } + } finally { + writeUnlock(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index c8c3a7ec822..d065b5736e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -152,7 +152,7 @@ public class BlockPoolSliceStorage extends Storage { // During startup some of them can upgrade or roll back // while others could be up-to-date for the regular startup. for (int idx = 0; idx < getNumStorageDirs(); idx++) { - doTransition(getStorageDir(idx), nsInfo, startOpt); + doTransition(datanode, getStorageDir(idx), nsInfo, startOpt); assert getCTime() == nsInfo.getCTime() : "Data-node and name-node CTimes must be the same."; } @@ -242,7 +242,7 @@ public class BlockPoolSliceStorage extends Storage { * @param startOpt startup option * @throws IOException */ - private void doTransition(StorageDirectory sd, + private void doTransition(DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo, StartupOption startOpt) throws IOException { if (startOpt == StartupOption.ROLLBACK) { doRollback(sd, nsInfo); // rollback if applicable @@ -275,7 +275,7 @@ public class BlockPoolSliceStorage extends Storage { } if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION || this.cTime < nsInfo.getCTime()) { - doUpgrade(sd, nsInfo); // upgrade + doUpgrade(datanode, sd, nsInfo); // upgrade return; } // layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime @@ -304,7 +304,8 @@ public class BlockPoolSliceStorage extends Storage { * @param nsInfo Namespace Info from the namenode * @throws IOException on error */ - void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException { + void doUpgrade(DataNode datanode, StorageDirectory bpSd, NamespaceInfo nsInfo) + throws IOException { // Upgrading is applicable only to release with federation or after if (!DataNodeLayoutVersion.supports( LayoutVersion.Feature.FEDERATION, layoutVersion)) { @@ -312,7 +313,7 @@ public class BlockPoolSliceStorage extends Storage { } LOG.info("Upgrading block pool storage directory " + bpSd.getRoot() + ".\n old LV = " + this.getLayoutVersion() + "; old CTime = " - + this.getCTime() + ".\n new LV = " + nsInfo.getLayoutVersion() + + this.getCTime() + ".\n new LV = " + HdfsConstants.DATANODE_LAYOUT_VERSION + "; new CTime = " + nsInfo.getCTime()); // get /previous directory String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath()); @@ -340,7 +341,7 @@ public class BlockPoolSliceStorage extends Storage { rename(bpCurDir, bpTmpDir); // 3. Create new /current with block files hardlinks and VERSION - linkAllBlocks(bpTmpDir, bpCurDir); + linkAllBlocks(datanode, bpTmpDir, bpCurDir); this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION; assert this.namespaceID == nsInfo.getNamespaceID() : "Data-node and name-node layout versions must be the same."; @@ -517,14 +518,15 @@ public class BlockPoolSliceStorage extends Storage { * @param toDir the current data directory * @throws IOException if error occurs during hardlink */ - private void linkAllBlocks(File fromDir, File toDir) throws IOException { + private void linkAllBlocks(DataNode datanode, File fromDir, File toDir) + throws IOException { // do the link int diskLayoutVersion = this.getLayoutVersion(); // hardlink finalized blocks in tmpDir HardLink hardLink = new HardLink(); - DataStorage.linkBlocks(new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED), + DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED), new File(toDir,DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink); - DataStorage.linkBlocks(new File(fromDir, DataStorage.STORAGE_DIR_RBW), + DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_RBW), new File(toDir, DataStorage.STORAGE_DIR_RBW), diskLayoutVersion, hardLink); LOG.info( hardLink.linkStats.report() ); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 2f3909ba564..fb7ecd69e9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -253,7 +253,7 @@ class BlockReceiver implements Closeable { if (cause != null) { // possible disk error ioe = cause; - datanode.checkDiskError(); + datanode.checkDiskErrorAsync(); } throw ioe; @@ -329,7 +329,7 @@ class BlockReceiver implements Closeable { } // disk check if(ioe != null) { - datanode.checkDiskError(); + datanode.checkDiskErrorAsync(); throw ioe; } } @@ -639,7 +639,7 @@ class BlockReceiver implements Closeable { manageWriterOsCache(offsetInBlock); } } catch (IOException iex) { - datanode.checkDiskError(); + datanode.checkDiskErrorAsync(); throw iex; } } @@ -1208,7 +1208,7 @@ class BlockReceiver implements Closeable { } catch (IOException e) { LOG.warn("IOException in BlockReceiver.run(): ", e); if (running) { - datanode.checkDiskError(); + datanode.checkDiskErrorAsync(); LOG.info(myString, e); running = false; if (!Thread.interrupted()) { // failure not caused by interruption diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index b55abed7e46..a3ace9b5125 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -1075,6 +1075,11 @@ public class DataNode extends Configured // In the case that this is the first block pool to connect, initialize // the dataset, block scanners, etc. initStorage(nsInfo); + + // Exclude failed disks before initializing the block pools to avoid startup + // failures. + checkDiskError(); + initPeriodicScanners(conf); data.addBlockPool(nsInfo.getBlockPoolID(), conf); @@ -1510,9 +1515,9 @@ public class DataNode extends Configured /** - * Check if there is a disk failure and if so, handle the error + * Check if there is a disk failure asynchronously and if so, handle the error */ - public void checkDiskError() { + public void checkDiskErrorAsync() { synchronized(checkDiskErrorMutex) { checkDiskErrorFlag = true; if(checkDiskErrorThread == null) { @@ -1821,7 +1826,7 @@ public class DataNode extends Configured LOG.warn(bpReg + ":Failed to transfer " + b + " to " + targets[0] + " got ", ie); // check if there are any disk problem - checkDiskError(); + checkDiskErrorAsync(); } finally { xmitsInProgress.getAndDecrement(); IOUtils.closeStream(blockSender); @@ -2759,7 +2764,18 @@ public class DataNode extends Configured public ShortCircuitRegistry getShortCircuitRegistry() { return shortCircuitRegistry; } - + + /** + * Check the disk error + */ + private void checkDiskError() { + try { + data.checkDataDir(); + } catch (DiskErrorException de) { + handleDiskError(de.getMessage()); + } + } + /** * Starts a new thread which will check for disk error check request * every 5 sec @@ -2776,9 +2792,7 @@ public class DataNode extends Configured } if(tempFlag) { try { - data.checkDataDir(); - } catch (DiskErrorException de) { - handleDiskError(de.getMessage()); + checkDiskError(); } catch (Exception e) { LOG.warn("Unexpected exception occurred while checking disk error " + e); checkDiskErrorThread = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java index 26c7457645c..23e7cfe7184 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java @@ -62,7 +62,10 @@ public class DataNodeLayoutVersion { * */ public static enum Feature implements LayoutFeature { - FIRST_LAYOUT(-55, -53, "First datanode layout", false); + FIRST_LAYOUT(-55, -53, "First datanode layout", false), + BLOCKID_BASED_LAYOUT(-56, + "The block ID of a finalized block uniquely determines its position " + + "in the directory structure"); private final FeatureInfo info; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 5c5cecd58f0..5a55d094e11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -18,13 +18,19 @@ package org.apache.hadoop.hdfs.server.datanode; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.Futures; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.HardLink; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; @@ -35,13 +41,30 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DiskChecker; -import java.io.*; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; import java.nio.channels.FileLock; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; /** * Data storage information file. @@ -261,6 +284,7 @@ public class DataStorage extends Storage { STORAGE_DIR_CURRENT)); bpDataDirs.add(bpRoot); } + // mkdir for the list of BlockPoolStorage makeBlockPoolDataDir(bpDataDirs, null); BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage( @@ -488,7 +512,7 @@ public class DataStorage extends Storage { // do upgrade if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION) { - doUpgrade(sd, nsInfo); // upgrade + doUpgrade(datanode, sd, nsInfo); // upgrade return; } @@ -523,7 +547,8 @@ public class DataStorage extends Storage { * @param sd storage directory * @throws IOException on error */ - void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException { + void doUpgrade(DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo) + throws IOException { // If the existing on-disk layout version supportes federation, simply // update its layout version. if (DataNodeLayoutVersion.supports( @@ -568,7 +593,8 @@ public class DataStorage extends Storage { BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(nsInfo.getNamespaceID(), nsInfo.getBlockPoolID(), nsInfo.getCTime(), nsInfo.getClusterID()); bpStorage.format(curDir, nsInfo); - linkAllBlocks(tmpDir, bbwDir, new File(curBpDir, STORAGE_DIR_CURRENT)); + linkAllBlocks(datanode, tmpDir, bbwDir, new File(curBpDir, + STORAGE_DIR_CURRENT)); // 4. Write version file under /current layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION; @@ -746,22 +772,22 @@ public class DataStorage extends Storage { * * @throws IOException If error occurs during hardlink */ - private void linkAllBlocks(File fromDir, File fromBbwDir, File toDir) - throws IOException { + private void linkAllBlocks(DataNode datanode, File fromDir, File fromBbwDir, + File toDir) throws IOException { HardLink hardLink = new HardLink(); // do the link int diskLayoutVersion = this.getLayoutVersion(); if (DataNodeLayoutVersion.supports( LayoutVersion.Feature.APPEND_RBW_DIR, diskLayoutVersion)) { // hardlink finalized blocks in tmpDir/finalized - linkBlocks(new File(fromDir, STORAGE_DIR_FINALIZED), + linkBlocks(datanode, new File(fromDir, STORAGE_DIR_FINALIZED), new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink); // hardlink rbw blocks in tmpDir/rbw - linkBlocks(new File(fromDir, STORAGE_DIR_RBW), + linkBlocks(datanode, new File(fromDir, STORAGE_DIR_RBW), new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink); } else { // pre-RBW version // hardlink finalized blocks in tmpDir - linkBlocks(fromDir, new File(toDir, STORAGE_DIR_FINALIZED), + linkBlocks(datanode, fromDir, new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink); if (fromBbwDir.exists()) { /* @@ -770,15 +796,67 @@ public class DataStorage extends Storage { * NOT underneath the 'current' directory in those releases. See * HDFS-3731 for details. */ - linkBlocks(fromBbwDir, + linkBlocks(datanode, fromBbwDir, new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink); } } LOG.info( hardLink.linkStats.report() ); } + + private static class LinkArgs { + public File src; + public File dst; + + public LinkArgs(File src, File dst) { + this.src = src; + this.dst = dst; + } + } + + static void linkBlocks(DataNode datanode, File from, File to, int oldLV, + HardLink hl) throws IOException { + boolean upgradeToIdBasedLayout = false; + // If we are upgrading from a version older than the one where we introduced + // block ID-based layout AND we're working with the finalized directory, + // we'll need to upgrade from the old flat layout to the block ID-based one + if (oldLV > DataNodeLayoutVersion.Feature.BLOCKID_BASED_LAYOUT.getInfo(). + getLayoutVersion() && to.getName().equals(STORAGE_DIR_FINALIZED)) { + upgradeToIdBasedLayout = true; + } + + final List idBasedLayoutSingleLinks = Lists.newArrayList(); + linkBlocksHelper(from, to, oldLV, hl, upgradeToIdBasedLayout, to, + idBasedLayoutSingleLinks); + int numLinkWorkers = datanode.getConf().getInt( + DFSConfigKeys.DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS_KEY, + DFSConfigKeys.DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS); + ExecutorService linkWorkers = Executors.newFixedThreadPool(numLinkWorkers); + final int step = idBasedLayoutSingleLinks.size() / numLinkWorkers + 1; + List> futures = Lists.newArrayList(); + for (int i = 0; i < idBasedLayoutSingleLinks.size(); i += step) { + final int iCopy = i; + futures.add(linkWorkers.submit(new Callable() { + @Override + public Void call() throws IOException { + int upperBound = Math.min(iCopy + step, + idBasedLayoutSingleLinks.size()); + for (int j = iCopy; j < upperBound; j++) { + LinkArgs cur = idBasedLayoutSingleLinks.get(j); + NativeIO.link(cur.src, cur.dst); + } + return null; + } + })); + } + linkWorkers.shutdown(); + for (Future f : futures) { + Futures.get(f, IOException.class); + } + } - static void linkBlocks(File from, File to, int oldLV, HardLink hl) - throws IOException { + static void linkBlocksHelper(File from, File to, int oldLV, HardLink hl, + boolean upgradeToIdBasedLayout, File blockRoot, + List idBasedLayoutSingleLinks) throws IOException { if (!from.exists()) { return; } @@ -805,9 +883,6 @@ public class DataStorage extends Storage { // from is a directory hl.linkStats.countDirs++; - if (!to.mkdirs()) - throw new IOException("Cannot create directory " + to); - String[] blockNames = from.list(new java.io.FilenameFilter() { @Override public boolean accept(File dir, String name) { @@ -815,12 +890,36 @@ public class DataStorage extends Storage { } }); + // If we are upgrading to block ID-based layout, we don't want to recreate + // any subdirs from the source that contain blocks, since we have a new + // directory structure + if (!upgradeToIdBasedLayout || !to.getName().startsWith( + BLOCK_SUBDIR_PREFIX)) { + if (!to.mkdirs()) + throw new IOException("Cannot create directory " + to); + } + // Block files just need hard links with the same file names // but a different directory if (blockNames.length > 0) { - HardLink.createHardLinkMult(from, blockNames, to); - hl.linkStats.countMultLinks++; - hl.linkStats.countFilesMultLinks += blockNames.length; + if (upgradeToIdBasedLayout) { + for (String blockName : blockNames) { + long blockId = Block.getBlockId(blockName); + File blockLocation = DatanodeUtil.idToBlockDir(blockRoot, blockId); + if (!blockLocation.exists()) { + if (!blockLocation.mkdirs()) { + throw new IOException("Failed to mkdirs " + blockLocation); + } + } + idBasedLayoutSingleLinks.add(new LinkArgs(new File(from, blockName), + new File(blockLocation, blockName))); + hl.linkStats.countSingleLinks++; + } + } else { + HardLink.createHardLinkMult(from, blockNames, to); + hl.linkStats.countMultLinks++; + hl.linkStats.countFilesMultLinks += blockNames.length; + } } else { hl.linkStats.countEmptyDirs++; } @@ -834,8 +933,9 @@ public class DataStorage extends Storage { } }); for(int i = 0; i < otherNames.length; i++) - linkBlocks(new File(from, otherNames[i]), - new File(to, otherNames[i]), oldLV, hl); + linkBlocksHelper(new File(from, otherNames[i]), + new File(to, otherNames[i]), oldLV, hl, upgradeToIdBasedLayout, + blockRoot, idBasedLayoutSingleLinks); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java index 0a0d57bd6e3..bd1ba2f0908 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java @@ -30,6 +30,8 @@ public class DatanodeUtil { public static final String DISK_ERROR = "Possible disk error: "; + private static final String SEP = System.getProperty("file.separator"); + /** Get the cause of an I/O exception if caused by a possible disk error * @param ioe an I/O exception * @return cause if the I/O exception is caused by a possible disk error; @@ -78,4 +80,38 @@ public class DatanodeUtil { public static File getUnlinkTmpFile(File f) { return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX); } + + /** + * Checks whether there are any files anywhere in the directory tree rooted + * at dir (directories don't count as files). dir must exist + * @return true if there are no files + * @throws IOException if unable to list subdirectories + */ + public static boolean dirNoFilesRecursive(File dir) throws IOException { + File[] contents = dir.listFiles(); + if (contents == null) { + throw new IOException("Cannot list contents of " + dir); + } + for (File f : contents) { + if (!f.isDirectory() || (f.isDirectory() && !dirNoFilesRecursive(f))) { + return false; + } + } + return true; + } + + /** + * Get the directory where a finalized block with this ID should be stored. + * Do not attempt to create the directory. + * @param root the root directory where finalized blocks are stored + * @param blockId + * @return + */ + public static File idToBlockDir(File root, long blockId) { + int d1 = (int)((blockId >> 16) & 0xff); + int d2 = (int)((blockId >> 8) & 0xff); + String path = DataStorage.BLOCK_SUBDIR_PREFIX + d1 + SEP + + DataStorage.BLOCK_SUBDIR_PREFIX + d2; + return new File(root, path); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java index 738e16df78f..0dcdf0573e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java @@ -54,10 +54,10 @@ abstract public class ReplicaInfo extends Block implements Replica { private File baseDir; /** - * Ints representing the sub directory path from base dir to the directory - * containing this replica. + * Whether or not this replica's parent directory includes subdirs, in which + * case we can generate them based on the replica's block ID */ - private int[] subDirs; + private boolean hasSubdirs; private static final Map internedBaseDirs = new HashMap(); @@ -151,18 +151,8 @@ abstract public class ReplicaInfo extends Block implements Replica { * @return the parent directory path where this replica is located */ File getDir() { - if (subDirs == null) { - return null; - } - - StringBuilder sb = new StringBuilder(); - for (int i : subDirs) { - sb.append(DataStorage.BLOCK_SUBDIR_PREFIX); - sb.append(i); - sb.append("/"); - } - File ret = new File(baseDir, sb.toString()); - return ret; + return hasSubdirs ? DatanodeUtil.idToBlockDir(baseDir, + getBlockId()) : baseDir; } /** @@ -175,54 +165,46 @@ abstract public class ReplicaInfo extends Block implements Replica { private void setDirInternal(File dir) { if (dir == null) { - subDirs = null; baseDir = null; return; } - ReplicaDirInfo replicaDirInfo = parseSubDirs(dir); - this.subDirs = replicaDirInfo.subDirs; + ReplicaDirInfo dirInfo = parseBaseDir(dir); + this.hasSubdirs = dirInfo.hasSubidrs; synchronized (internedBaseDirs) { - if (!internedBaseDirs.containsKey(replicaDirInfo.baseDirPath)) { + if (!internedBaseDirs.containsKey(dirInfo.baseDirPath)) { // Create a new String path of this file and make a brand new File object // to guarantee we drop the reference to the underlying char[] storage. - File baseDir = new File(replicaDirInfo.baseDirPath); - internedBaseDirs.put(replicaDirInfo.baseDirPath, baseDir); + File baseDir = new File(dirInfo.baseDirPath); + internedBaseDirs.put(dirInfo.baseDirPath, baseDir); } - this.baseDir = internedBaseDirs.get(replicaDirInfo.baseDirPath); + this.baseDir = internedBaseDirs.get(dirInfo.baseDirPath); } } - + @VisibleForTesting public static class ReplicaDirInfo { - @VisibleForTesting public String baseDirPath; - - @VisibleForTesting - public int[] subDirs; + public boolean hasSubidrs; + + public ReplicaDirInfo (String baseDirPath, boolean hasSubidrs) { + this.baseDirPath = baseDirPath; + this.hasSubidrs = hasSubidrs; + } } @VisibleForTesting - public static ReplicaDirInfo parseSubDirs(File dir) { - ReplicaDirInfo ret = new ReplicaDirInfo(); + public static ReplicaDirInfo parseBaseDir(File dir) { File currentDir = dir; - List subDirList = new ArrayList(); + boolean hasSubdirs = false; while (currentDir.getName().startsWith(DataStorage.BLOCK_SUBDIR_PREFIX)) { - // Prepend the integer into the list. - subDirList.add(0, Integer.parseInt(currentDir.getName().replaceFirst( - DataStorage.BLOCK_SUBDIR_PREFIX, ""))); + hasSubdirs = true; currentDir = currentDir.getParentFile(); } - ret.subDirs = new int[subDirList.size()]; - for (int i = 0; i < subDirList.size(); i++) { - ret.subDirs[i] = subDirList.get(i); - } - ret.baseDirPath = currentDir.getAbsolutePath(); - - return ret; + return new ReplicaDirInfo(currentDir.getAbsolutePath(), hasSubdirs); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 6093339bdcb..af467b93f09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -59,7 +59,8 @@ class BlockPoolSlice { private final String bpid; private final FsVolumeImpl volume; // volume to which this BlockPool belongs to private final File currentDir; // StorageDirectory/current/bpid/current - private final LDir finalizedDir; // directory store Finalized replica + // directory where finalized replicas are stored + private final File finalizedDir; private final File rbwDir; // directory store RBW replica private final File tmpDir; // directory store Temporary replica private static final String DU_CACHE_FILE = "dfsUsed"; @@ -82,8 +83,13 @@ class BlockPoolSlice { this.bpid = bpid; this.volume = volume; this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); - final File finalizedDir = new File( + this.finalizedDir = new File( currentDir, DataStorage.STORAGE_DIR_FINALIZED); + if (!this.finalizedDir.exists()) { + if (!this.finalizedDir.mkdirs()) { + throw new IOException("Failed to mkdirs " + this.finalizedDir); + } + } // Files that were being written when the datanode was last shutdown // are now moved back to the data directory. It is possible that @@ -95,10 +101,6 @@ class BlockPoolSlice { FileUtil.fullyDelete(tmpDir); } this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW); - final int maxBlocksPerDir = conf.getInt( - DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY, - DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT); - this.finalizedDir = new LDir(finalizedDir, maxBlocksPerDir); if (!rbwDir.mkdirs()) { // create rbw directory if not exist if (!rbwDir.isDirectory()) { throw new IOException("Mkdirs failed to create " + rbwDir.toString()); @@ -131,7 +133,7 @@ class BlockPoolSlice { } File getFinalizedDir() { - return finalizedDir.dir; + return finalizedDir; } File getRbwDir() { @@ -239,25 +241,56 @@ class BlockPoolSlice { } File addBlock(Block b, File f) throws IOException { - File blockFile = finalizedDir.addBlock(b, f); + File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId()); + if (!blockDir.exists()) { + if (!blockDir.mkdirs()) { + throw new IOException("Failed to mkdirs " + blockDir); + } + } + File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir); File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp()); dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length()); return blockFile; } void checkDirs() throws DiskErrorException { - finalizedDir.checkDirTree(); + DiskChecker.checkDirs(finalizedDir); DiskChecker.checkDir(tmpDir); DiskChecker.checkDir(rbwDir); } void getVolumeMap(ReplicaMap volumeMap) throws IOException { // add finalized replicas - finalizedDir.getVolumeMap(bpid, volumeMap, volume); + addToReplicasMap(volumeMap, finalizedDir, true); // add rbw replicas addToReplicasMap(volumeMap, rbwDir, false); } + /** + * Recover an unlinked tmp file on datanode restart. If the original block + * does not exist, then the tmp file is renamed to be the + * original file name and the original name is returned; otherwise the tmp + * file is deleted and null is returned. + */ + File recoverTempUnlinkedBlock(File unlinkedTmp) throws IOException { + File blockFile = FsDatasetUtil.getOrigFile(unlinkedTmp); + if (blockFile.exists()) { + // If the original block file still exists, then no recovery is needed. + if (!unlinkedTmp.delete()) { + throw new IOException("Unable to cleanup unlinked tmp file " + + unlinkedTmp); + } + return null; + } else { + if (!unlinkedTmp.renameTo(blockFile)) { + throw new IOException("Unable to rename unlinked tmp file " + + unlinkedTmp); + } + return blockFile; + } + } + + /** * Add replicas under the given directory to the volume map * @param volumeMap the replicas map @@ -267,23 +300,34 @@ class BlockPoolSlice { */ void addToReplicasMap(ReplicaMap volumeMap, File dir, boolean isFinalized ) throws IOException { - File blockFiles[] = FileUtil.listFiles(dir); - for (File blockFile : blockFiles) { - if (!Block.isBlockFilename(blockFile)) + File files[] = FileUtil.listFiles(dir); + for (File file : files) { + if (file.isDirectory()) { + addToReplicasMap(volumeMap, file, isFinalized); + } + + if (isFinalized && FsDatasetUtil.isUnlinkTmpFile(file)) { + file = recoverTempUnlinkedBlock(file); + if (file == null) { // the original block still exists, so we cover it + // in another iteration and can continue here + continue; + } + } + if (!Block.isBlockFilename(file)) continue; long genStamp = FsDatasetUtil.getGenerationStampFromFile( - blockFiles, blockFile); - long blockId = Block.filename2id(blockFile.getName()); + files, file); + long blockId = Block.filename2id(file.getName()); ReplicaInfo newReplica = null; if (isFinalized) { newReplica = new FinalizedReplica(blockId, - blockFile.length(), genStamp, volume, blockFile.getParentFile()); + file.length(), genStamp, volume, file.getParentFile()); } else { boolean loadRwr = true; - File restartMeta = new File(blockFile.getParent() + - File.pathSeparator + "." + blockFile.getName() + ".restart"); + File restartMeta = new File(file.getParent() + + File.pathSeparator + "." + file.getName() + ".restart"); Scanner sc = null; try { sc = new Scanner(restartMeta); @@ -291,8 +335,8 @@ class BlockPoolSlice { if (sc.hasNextLong() && (sc.nextLong() > Time.now())) { // It didn't expire. Load the replica as a RBW. newReplica = new ReplicaBeingWritten(blockId, - validateIntegrityAndSetLength(blockFile, genStamp), - genStamp, volume, blockFile.getParentFile(), null); + validateIntegrityAndSetLength(file, genStamp), + genStamp, volume, file.getParentFile(), null); loadRwr = false; } sc.close(); @@ -301,7 +345,7 @@ class BlockPoolSlice { restartMeta.getPath()); } } catch (FileNotFoundException fnfe) { - // nothing to do here + // nothing to do hereFile dir = } finally { if (sc != null) { sc.close(); @@ -310,15 +354,15 @@ class BlockPoolSlice { // Restart meta doesn't exist or expired. if (loadRwr) { newReplica = new ReplicaWaitingToBeRecovered(blockId, - validateIntegrityAndSetLength(blockFile, genStamp), - genStamp, volume, blockFile.getParentFile()); + validateIntegrityAndSetLength(file, genStamp), + genStamp, volume, file.getParentFile()); } } ReplicaInfo oldReplica = volumeMap.add(bpid, newReplica); if (oldReplica != null) { FsDatasetImpl.LOG.warn("Two block files with the same block id exist " + - "on disk: " + oldReplica.getBlockFile() + " and " + blockFile ); + "on disk: " + oldReplica.getBlockFile() + " and " + file ); } } } @@ -405,10 +449,6 @@ class BlockPoolSlice { } } - void clearPath(File f) { - finalizedDir.clearPath(f); - } - @Override public String toString() { return currentDir.getAbsolutePath(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index e8a06aec8ac..a43ef849202 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -1151,7 +1151,7 @@ class FsDatasetImpl implements FsDatasetSpi { return f; // if file is not null, but doesn't exist - possibly disk failed - datanode.checkDiskError(); + datanode.checkDiskErrorAsync(); } if (LOG.isDebugEnabled()) { @@ -1224,13 +1224,6 @@ class FsDatasetImpl implements FsDatasetSpi { + ". Parent not found for file " + f); continue; } - ReplicaState replicaState = info.getState(); - if (replicaState == ReplicaState.FINALIZED || - (replicaState == ReplicaState.RUR && - ((ReplicaUnderRecovery)info).getOriginalReplica().getState() == - ReplicaState.FINALIZED)) { - v.clearPath(bpid, parent); - } volumeMap.remove(bpid, invalidBlks[i]); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 795fab1f3a5..adfc896f7f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.datanode.DataStorage; +import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.util.DiskChecker.DiskErrorException; @@ -235,10 +236,6 @@ class FsVolumeImpl implements FsVolumeSpi { // dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length()); bp.addToReplicasMap(volumeMap, dir, isFinalized); } - - void clearPath(String bpid, File f) throws IOException { - getBlockPoolSlice(bpid).clearPath(f); - } @Override public String toString() { @@ -274,7 +271,8 @@ class FsVolumeImpl implements FsVolumeSpi { File finalizedDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_FINALIZED); File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW); - if (finalizedDir.exists() && FileUtil.list(finalizedDir).length != 0) { + if (finalizedDir.exists() && !DatanodeUtil.dirNoFilesRecursive( + finalizedDir)) { return false; } if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) { @@ -301,7 +299,8 @@ class FsVolumeImpl implements FsVolumeSpi { if (!rbwDir.delete()) { throw new IOException("Failed to delete " + rbwDir); } - if (!finalizedDir.delete()) { + if (!DatanodeUtil.dirNoFilesRecursive(finalizedDir) || + !FileUtil.fullyDelete(finalizedDir)) { throw new IOException("Failed to delete " + finalizedDir); } FileUtil.fullyDelete(tmpDir); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LDir.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LDir.java deleted file mode 100644 index 991b58b3ae2..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LDir.java +++ /dev/null @@ -1,228 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.datanode.DataStorage; -import org.apache.hadoop.util.DiskChecker; -import org.apache.hadoop.util.DiskChecker.DiskErrorException; - -/** - * A node type that can be built into a tree reflecting the - * hierarchy of replicas on the local disk. - */ -class LDir { - final File dir; - final int maxBlocksPerDir; - - private int numBlocks = 0; - private LDir[] children = null; - private int lastChildIdx = 0; - - LDir(File dir, int maxBlocksPerDir) throws IOException { - this.dir = dir; - this.maxBlocksPerDir = maxBlocksPerDir; - - if (!dir.exists()) { - if (!dir.mkdirs()) { - throw new IOException("Failed to mkdirs " + dir); - } - } else { - File[] files = FileUtil.listFiles(dir); - List dirList = new ArrayList(); - for (int idx = 0; idx < files.length; idx++) { - if (files[idx].isDirectory()) { - dirList.add(new LDir(files[idx], maxBlocksPerDir)); - } else if (Block.isBlockFilename(files[idx])) { - numBlocks++; - } - } - if (dirList.size() > 0) { - children = dirList.toArray(new LDir[dirList.size()]); - } - } - } - - File addBlock(Block b, File src) throws IOException { - //First try without creating subdirectories - File file = addBlock(b, src, false, false); - return (file != null) ? file : addBlock(b, src, true, true); - } - - private File addBlock(Block b, File src, boolean createOk, boolean resetIdx - ) throws IOException { - if (numBlocks < maxBlocksPerDir) { - final File dest = FsDatasetImpl.moveBlockFiles(b, src, dir); - numBlocks += 1; - return dest; - } - - if (lastChildIdx < 0 && resetIdx) { - //reset so that all children will be checked - lastChildIdx = DFSUtil.getRandom().nextInt(children.length); - } - - if (lastChildIdx >= 0 && children != null) { - //Check if any child-tree has room for a block. - for (int i=0; i < children.length; i++) { - int idx = (lastChildIdx + i)%children.length; - File file = children[idx].addBlock(b, src, false, resetIdx); - if (file != null) { - lastChildIdx = idx; - return file; - } - } - lastChildIdx = -1; - } - - if (!createOk) { - return null; - } - - if (children == null || children.length == 0) { - children = new LDir[maxBlocksPerDir]; - for (int idx = 0; idx < maxBlocksPerDir; idx++) { - final File sub = new File(dir, DataStorage.BLOCK_SUBDIR_PREFIX+idx); - children[idx] = new LDir(sub, maxBlocksPerDir); - } - } - - //now pick a child randomly for creating a new set of subdirs. - lastChildIdx = DFSUtil.getRandom().nextInt(children.length); - return children[ lastChildIdx ].addBlock(b, src, true, false); - } - - void getVolumeMap(String bpid, ReplicaMap volumeMap, FsVolumeImpl volume - ) throws IOException { - if (children != null) { - for (int i = 0; i < children.length; i++) { - children[i].getVolumeMap(bpid, volumeMap, volume); - } - } - - recoverTempUnlinkedBlock(); - volume.addToReplicasMap(bpid, volumeMap, dir, true); - } - - /** - * Recover unlinked tmp files on datanode restart. If the original block - * does not exist, then the tmp file is renamed to be the - * original file name; otherwise the tmp file is deleted. - */ - private void recoverTempUnlinkedBlock() throws IOException { - File files[] = FileUtil.listFiles(dir); - for (File file : files) { - if (!FsDatasetUtil.isUnlinkTmpFile(file)) { - continue; - } - File blockFile = FsDatasetUtil.getOrigFile(file); - if (blockFile.exists()) { - // If the original block file still exists, then no recovery is needed. - if (!file.delete()) { - throw new IOException("Unable to cleanup unlinked tmp file " + file); - } - } else { - if (!file.renameTo(blockFile)) { - throw new IOException("Unable to cleanup detached file " + file); - } - } - } - } - - /** - * check if a data diretory is healthy - * @throws DiskErrorException - */ - void checkDirTree() throws DiskErrorException { - DiskChecker.checkDir(dir); - - if (children != null) { - for (int i = 0; i < children.length; i++) { - children[i].checkDirTree(); - } - } - } - - void clearPath(File f) { - String root = dir.getAbsolutePath(); - String dir = f.getAbsolutePath(); - if (dir.startsWith(root)) { - String[] dirNames = dir.substring(root.length()). - split(File.separator + DataStorage.BLOCK_SUBDIR_PREFIX); - if (clearPath(f, dirNames, 1)) - return; - } - clearPath(f, null, -1); - } - - /** - * dirNames is an array of string integers derived from - * usual directory structure data/subdirN/subdirXY/subdirM ... - * If dirName array is non-null, we only check the child at - * the children[dirNames[idx]]. This avoids iterating over - * children in common case. If directory structure changes - * in later versions, we need to revisit this. - */ - private boolean clearPath(File f, String[] dirNames, int idx) { - if ((dirNames == null || idx == dirNames.length) && - dir.compareTo(f) == 0) { - numBlocks--; - return true; - } - - if (dirNames != null) { - //guess the child index from the directory name - if (idx > (dirNames.length - 1) || children == null) { - return false; - } - int childIdx; - try { - childIdx = Integer.parseInt(dirNames[idx]); - } catch (NumberFormatException ignored) { - // layout changed? we could print a warning. - return false; - } - return (childIdx >= 0 && childIdx < children.length) ? - children[childIdx].clearPath(f, dirNames, idx+1) : false; - } - - //guesses failed. back to blind iteration. - if (children != null) { - for(int i=0; i < children.length; i++) { - if (children[i].clearPath(f, null, -1)){ - return true; - } - } - } - return false; - } - - @Override - public String toString() { - return "FSDir{dir=" + dir + ", children=" - + (children == null ? null : Arrays.asList(children)) + "}"; - } -} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index bb687d7940d..1c992765065 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1103,9 +1103,6 @@ public class FSDirectory implements Closeable { count++; } - // update inodeMap - removeFromInodeMap(Arrays.asList(allSrcInodes)); - trgInode.setModificationTime(timestamp, trgLatestSnapshot); trgParent.updateModificationTime(timestamp, trgLatestSnapshot); // update quota on the parent directory ('count' files removed, 0 space) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index da9dcfd1f9a..6de30adae7b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -4585,8 +4585,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats, // Otherwise fsck will report these blocks as MISSING, especially if the // blocksReceived from Datanodes take a long time to arrive. for (int i = 0; i < trimmedTargets.size(); i++) { - trimmedTargets.get(i).addBlock( - trimmedStorages.get(i), storedBlock); + DatanodeStorageInfo storageInfo = + trimmedTargets.get(i).getStorageInfo(trimmedStorages.get(i)); + if (storageInfo != null) { + storageInfo.addBlock(storedBlock); + } } } @@ -6066,7 +6069,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } public void processIncrementalBlockReport(final DatanodeID nodeID, - final String poolId, final StorageReceivedDeletedBlocks srdb) + final StorageReceivedDeletedBlocks srdb) throws IOException { writeLock(); try { @@ -8824,6 +8827,29 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } } + void checkAccess(String src, FsAction mode) throws AccessControlException, + FileNotFoundException, UnresolvedLinkException, IOException { + checkOperation(OperationCategory.READ); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + readLock(); + try { + checkOperation(OperationCategory.READ); + src = FSDirectory.resolvePath(src, pathComponents, dir); + if (dir.getINode(src) == null) { + throw new FileNotFoundException("Path not found"); + } + if (isPermissionEnabled) { + FSPermissionChecker pc = getPermissionChecker(); + checkPathAccess(pc, src, mode); + } + } catch (AccessControlException e) { + logAuditEvent(false, "checkAccess", src); + throw e; + } finally { + readUnlock(); + } + } + /** * Default AuditLogger implementation; used when no access logger is * defined in the config file. It can also be explicitly listed in the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 9fb0c33b6b7..9cbba2fac99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -55,6 +55,7 @@ import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.ha.HAServiceStatus; import org.apache.hadoop.ha.HealthCheckFailedException; @@ -1067,7 +1068,7 @@ class NameNodeRpcServer implements NamenodeProtocols { // for the same node and storage, so the value returned by the last // call of this loop is the final updated value for noStaleStorage. // - noStaleStorages = bm.processReport(nodeReg, r.getStorage(), poolId, blocks); + noStaleStorages = bm.processReport(nodeReg, r.getStorage(), blocks); metrics.incrStorageBlockReportOps(); } @@ -1103,7 +1104,7 @@ class NameNodeRpcServer implements NamenodeProtocols { +" blocks."); } for(StorageReceivedDeletedBlocks r : receivedAndDeletedBlocks) { - namesystem.processIncrementalBlockReport(nodeReg, poolId, r); + namesystem.processIncrementalBlockReport(nodeReg, r); } } @@ -1458,5 +1459,10 @@ class NameNodeRpcServer implements NamenodeProtocols { public void removeXAttr(String src, XAttr xAttr) throws IOException { namesystem.removeXAttr(src, xAttr); } + + @Override + public void checkAccess(String path, FsAction mode) throws IOException { + namesystem.checkAccess(path, mode); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index d7235b38727..991885b2e40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -57,6 +57,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -112,6 +113,7 @@ import org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam; import org.apache.hadoop.hdfs.web.resources.XAttrNameParam; import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam; import org.apache.hadoop.hdfs.web.resources.XAttrValueParam; +import org.apache.hadoop.hdfs.web.resources.FsActionParam; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.Server; @@ -755,10 +757,12 @@ public class NamenodeWebHdfsMethods { @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) final XAttrEncodingParam xattrEncoding, @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT) - final ExcludeDatanodesParam excludeDatanodes + final ExcludeDatanodesParam excludeDatanodes, + @QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT) + final FsActionParam fsAction ) throws IOException, InterruptedException { return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length, - renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes); + renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes, fsAction); } /** Handle HTTP GET request. */ @@ -789,11 +793,13 @@ public class NamenodeWebHdfsMethods { @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) final XAttrEncodingParam xattrEncoding, @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT) - final ExcludeDatanodesParam excludeDatanodes + final ExcludeDatanodesParam excludeDatanodes, + @QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT) + final FsActionParam fsAction ) throws IOException, InterruptedException { init(ugi, delegation, username, doAsUser, path, op, offset, length, - renewer, bufferSize, xattrEncoding, excludeDatanodes); + renewer, bufferSize, xattrEncoding, excludeDatanodes, fsAction); return ugi.doAs(new PrivilegedExceptionAction() { @Override @@ -801,7 +807,7 @@ public class NamenodeWebHdfsMethods { try { return get(ugi, delegation, username, doAsUser, path.getAbsolutePath(), op, offset, length, renewer, bufferSize, - xattrNames, xattrEncoding, excludeDatanodes); + xattrNames, xattrEncoding, excludeDatanodes, fsAction); } finally { reset(); } @@ -822,7 +828,8 @@ public class NamenodeWebHdfsMethods { final BufferSizeParam bufferSize, final List xattrNames, final XAttrEncodingParam xattrEncoding, - final ExcludeDatanodesParam excludeDatanodes + final ExcludeDatanodesParam excludeDatanodes, + final FsActionParam fsAction ) throws IOException, URISyntaxException { final NameNode namenode = (NameNode)context.getAttribute("name.node"); final NamenodeProtocols np = getRPCServer(namenode); @@ -919,6 +926,10 @@ public class NamenodeWebHdfsMethods { final String js = JsonUtil.toJsonString(xAttrs); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } + case CHECKACCESS: { + np.checkAccess(fullpath, FsAction.getFsAction(fsAction.getValue())); + return Response.ok().build(); + } default: throw new UnsupportedOperationException(op + " is not supported"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java index bc446ac7541..c907f3be5b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.hdfs.server.protocol; -import java.util.Arrays; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; /** @@ -39,12 +38,15 @@ public class BlocksWithLocations { final Block block; final String[] datanodeUuids; final String[] storageIDs; + final StorageType[] storageTypes; /** constructor */ - public BlockWithLocations(Block block, String[] datanodeUuids, String[] storageIDs) { + public BlockWithLocations(Block block, String[] datanodeUuids, + String[] storageIDs, StorageType[] storageTypes) { this.block = block; this.datanodeUuids = datanodeUuids; this.storageIDs = storageIDs; + this.storageTypes = storageTypes; } /** get the block */ @@ -61,7 +63,12 @@ public class BlocksWithLocations { public String[] getStorageIDs() { return storageIDs; } - + + /** @return the storage types */ + public StorageType[] getStorageTypes() { + return storageTypes; + } + @Override public String toString() { final StringBuilder b = new StringBuilder(); @@ -70,12 +77,18 @@ public class BlocksWithLocations { return b.append("[]").toString(); } - b.append(storageIDs[0]).append('@').append(datanodeUuids[0]); + appendString(0, b.append("[")); for(int i = 1; i < datanodeUuids.length; i++) { - b.append(", ").append(storageIDs[i]).append("@").append(datanodeUuids[i]); + appendString(i, b.append(",")); } return b.append("]").toString(); } + + private StringBuilder appendString(int i, StringBuilder b) { + return b.append("[").append(storageTypes[i]).append("]") + .append(storageIDs[i]) + .append("@").append(datanodeUuids[i]); + } } private final BlockWithLocations[] blocks; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java index b4fa791e741..7a39ba6072b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java @@ -29,8 +29,8 @@ import org.xml.sax.ContentHandler; import org.xml.sax.SAXException; import org.xml.sax.helpers.AttributesImpl; -import com.sun.org.apache.xml.internal.serialize.OutputFormat; -import com.sun.org.apache.xml.internal.serialize.XMLSerializer; +import org.apache.xml.serialize.OutputFormat; +import org.apache.xml.serialize.XMLSerializer; /** * An XmlEditsVisitor walks over an EditLog structure and writes out diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java index e3975f6d68a..8bdea1fd59e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java @@ -37,7 +37,7 @@ import com.google.common.base.Preconditions; public class EnumCounters> { /** The class of the enum. */ private final Class enumClass; - /** The counter array, counters[i] corresponds to the enumConstants[i]. */ + /** An array of longs corresponding to the enum type. */ private final long[] counters; /** @@ -75,6 +75,13 @@ public class EnumCounters> { } } + /** Reset all counters to zero. */ + public final void reset() { + for(int i = 0; i < counters.length; i++) { + this.counters[i] = 0L; + } + } + /** Add the given value to counter e. */ public final void add(final E e, final long value) { counters[e.ordinal()] += value; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java new file mode 100644 index 00000000000..126070aa016 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.util; + +import java.util.Arrays; + +import com.google.common.base.Preconditions; + +/** + * Similar to {@link EnumCounters} except that the value type is double. + * + * @param the enum type + */ +public class EnumDoubles> { + /** The class of the enum. */ + private final Class enumClass; + /** An array of doubles corresponding to the enum type. */ + private final double[] doubles; + + /** + * Construct doubles for the given enum constants. + * @param enumClass the enum class. + */ + public EnumDoubles(final Class enumClass) { + final E[] enumConstants = enumClass.getEnumConstants(); + Preconditions.checkNotNull(enumConstants); + this.enumClass = enumClass; + this.doubles = new double[enumConstants.length]; + } + + /** @return the value corresponding to e. */ + public final double get(final E e) { + return doubles[e.ordinal()]; + } + + /** Negate all values. */ + public final void negation() { + for(int i = 0; i < doubles.length; i++) { + doubles[i] = -doubles[i]; + } + } + + /** Set e to the given value. */ + public final void set(final E e, final double value) { + doubles[e.ordinal()] = value; + } + + /** Set the values of this object to that object. */ + public final void set(final EnumDoubles that) { + for(int i = 0; i < doubles.length; i++) { + this.doubles[i] = that.doubles[i]; + } + } + + /** Reset all values to zero. */ + public final void reset() { + for(int i = 0; i < doubles.length; i++) { + this.doubles[i] = 0.0; + } + } + + /** Add the given value to e. */ + public final void add(final E e, final double value) { + doubles[e.ordinal()] += value; + } + + /** Add the values of that object to this. */ + public final void add(final EnumDoubles that) { + for(int i = 0; i < doubles.length; i++) { + this.doubles[i] += that.doubles[i]; + } + } + + /** Subtract the given value from e. */ + public final void subtract(final E e, final double value) { + doubles[e.ordinal()] -= value; + } + + /** Subtract the values of this object from that object. */ + public final void subtract(final EnumDoubles that) { + for(int i = 0; i < doubles.length; i++) { + this.doubles[i] -= that.doubles[i]; + } + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } else if (obj == null || !(obj instanceof EnumDoubles)) { + return false; + } + final EnumDoubles that = (EnumDoubles)obj; + return this.enumClass == that.enumClass + && Arrays.equals(this.doubles, that.doubles); + } + + @Override + public int hashCode() { + return Arrays.hashCode(doubles); + } + + @Override + public String toString() { + final E[] enumConstants = enumClass.getEnumConstants(); + final StringBuilder b = new StringBuilder(); + for(int i = 0; i < doubles.length; i++) { + final String name = enumConstants[i].name(); + b.append(name).append("=").append(doubles[i]).append(", "); + } + return b.substring(0, b.length() - 2); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 78062ad0b5f..cf6233f5a35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -54,6 +54,7 @@ import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; @@ -1356,6 +1357,12 @@ public class WebHdfsFileSystem extends FileSystem }.run(); } + @Override + public void access(final Path path, final FsAction mode) throws IOException { + final HttpOpParam.Op op = GetOpParam.Op.CHECKACCESS; + new FsPathRunner(op, path, new FsActionParam(mode)).run(); + } + @Override public ContentSummary getContentSummary(final Path p) throws IOException { statistics.incrementReadOps(1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java new file mode 100644 index 00000000000..c8401960034 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web.resources; + +import org.apache.hadoop.fs.permission.FsAction; + +import java.util.regex.Pattern; + +/** {@link FsAction} Parameter */ +public class FsActionParam extends StringParam { + + /** Parameter name. */ + public static final String NAME = "fsaction"; + + /** Default parameter value. */ + public static final String DEFAULT = NULL; + + private static String FS_ACTION_PATTERN = "[rwx-]{3}"; + + private static final Domain DOMAIN = new Domain(NAME, + Pattern.compile(FS_ACTION_PATTERN)); + + /** + * Constructor. + * @param str a string representation of the parameter value. + */ + public FsActionParam(final String str) { + super(DOMAIN, str == null || str.equals(DEFAULT)? null: str); + } + + /** + * Constructor. + * @param value the parameter value. + */ + public FsActionParam(final FsAction value) { + super(DOMAIN, value == null? null: value.SYMBOL); + } + + @Override + public String getName() { + return NAME; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java index bf5a6a23e57..f63ed443924 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java @@ -39,7 +39,9 @@ public class GetOpParam extends HttpOpParam { GETXATTRS(false, HttpURLConnection.HTTP_OK), LISTXATTRS(false, HttpURLConnection.HTTP_OK), - NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED); + NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED), + + CHECKACCESS(false, HttpURLConnection.HTTP_OK); final boolean redirect; final int expectedHttpResponseCode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 9a98372d536..8eb662d21bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -656,6 +656,14 @@ message DeleteSnapshotRequestProto { message DeleteSnapshotResponseProto { // void response } +message CheckAccessRequestProto { + required string path = 1; + required AclEntryProto.FsActionProto mode = 2; +} + +message CheckAccessResponseProto { // void response +} + service ClientNamenodeProtocol { rpc getBlockLocations(GetBlockLocationsRequestProto) returns(GetBlockLocationsResponseProto); @@ -785,6 +793,8 @@ service ClientNamenodeProtocol { returns(ListXAttrsResponseProto); rpc removeXAttr(RemoveXAttrRequestProto) returns(RemoveXAttrResponseProto); + rpc checkAccess(CheckAccessRequestProto) + returns(CheckAccessResponseProto); rpc createEncryptionZone(CreateEncryptionZoneRequestProto) returns(CreateEncryptionZoneResponseProto); rpc listEncryptionZones(ListEncryptionZonesRequestProto) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 1b69dcab15b..a410224f02e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -424,6 +424,7 @@ message BlockWithLocationsProto { required BlockProto block = 1; // Block repeated string datanodeUuids = 2; // Datanodes with replicas of the block repeated string storageUuids = 3; // Storages with replicas of the block + repeated StorageTypeProto storageTypes = 4; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 3606180de32..0b0657b2666 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -2052,6 +2052,14 @@ + + dfs.datanode.block.id.layout.upgrade.threads + 12 + The number of threads to use when creating hard links from + current to previous blocks during upgrade of a DataNode to block ID-based + block layout (see HDFS-6482 for details on the layout). + + dfs.namenode.list.encryption.zones.num.responses 100 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm index 863ba39a739..4375895649b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm @@ -47,18 +47,21 @@ HDFS NFS Gateway The NFS-gateway uses proxy user to proxy all the users accessing the NFS mounts. In non-secure mode, the user running the gateway is the proxy user, while in secure mode the user in Kerberos keytab is the proxy user. Suppose the proxy user is 'nfsserver' - and users belonging to the groups 'nfs-users1' - and 'nfs-users2' use the NFS mounts, then in core-site.xml of the NameNode, the following + and users belonging to the groups 'users-group1' + and 'users-group2' use the NFS mounts, then in core-site.xml of the NameNode, the following two properities must be set and only NameNode needs restart after the configuration change (NOTE: replace the string 'nfsserver' with the proxy user name in your cluster): ---- hadoop.proxyuser.nfsserver.groups - nfs-users1,nfs-users2 + root,users-group1,users-group2 - The 'nfsserver' user is allowed to proxy all members of the 'nfs-users1' and - 'nfs-users2' groups. Set this to '*' to allow nfsserver user to proxy any group. + The 'nfsserver' user is allowed to proxy all members of the 'users-group1' and + 'users-group2' groups. Note that in most cases you will need to include the + group "root" because the user "root" (which usually belonges to "root" group) will + generally be the user that initially executes the mount on the NFS client system. + Set this to '*' to allow nfsserver user to proxy any group. ---- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm index 51bc574095d..c3f6a6b813b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm @@ -82,6 +82,9 @@ WebHDFS REST API * {{{List all XAttrs}<<>>}} (see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs) + * {{{Check access}<<>>}} + (see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access) + * HTTP PUT * {{{Create and Write to a File}<<>>}} @@ -927,6 +930,28 @@ Transfer-Encoding: chunked {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus +** {Check access} + + * Submit a HTTP GET request. + ++--------------------------------- +curl -i -X PUT "http://:/webhdfs/v1/?op=CHECKACCESS + &fsaction= ++--------------------------------- + + The client receives a response with zero content length: + ++--------------------------------- +HTTP/1.1 200 OK +Content-Length: 0 ++--------------------------------- + + [] + + See also: + {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access + + * {Extended Attributes(XAttrs) Operations} ** {Set XAttr} @@ -2166,6 +2191,25 @@ var tokenProperties = {{Proxy Users}} +** {Fs Action} + +*----------------+-------------------------------------------------------------------+ +|| Name | <<>> | +*----------------+-------------------------------------------------------------------+ +|| Description | File system operation read/write/execute | +*----------------+-------------------------------------------------------------------+ +|| Type | String | +*----------------+-------------------------------------------------------------------+ +|| Default Value | null (an invalid value) | +*----------------+-------------------------------------------------------------------+ +|| Valid Values | Strings matching regex pattern \"[rwx-]\{3\}\" | +*----------------+-------------------------------------------------------------------+ +|| Syntax | \"[rwx-]\{3\}\" | +*----------------+-------------------------------------------------------------------+ + + See also: + {{{Check access}<<>>}}, + ** {Group} *----------------+-------------------------------------------------------------------+ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java index 664a478f27a..3c73c28c2a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java @@ -47,7 +47,6 @@ import org.mockito.Mockito; public class TestGenericRefresh { private static MiniDFSCluster cluster; private static Configuration config; - private static final int NNPort = 54222; private static RefreshHandler firstHandler; private static RefreshHandler secondHandler; @@ -57,8 +56,8 @@ public class TestGenericRefresh { config = new Configuration(); config.set("hadoop.security.authorization", "true"); - FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort); - cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build(); + FileSystem.setDefaultUri(config, "hdfs://localhost:0"); + cluster = new MiniDFSCluster.Builder(config).build(); cluster.waitActive(); } @@ -103,7 +102,8 @@ public class TestGenericRefresh { @Test public void testInvalidIdentifier() throws Exception { DFSAdmin admin = new DFSAdmin(config); - String [] args = new String[]{"-refresh", "localhost:" + NNPort, "unregisteredIdentity"}; + String [] args = new String[]{"-refresh", "localhost:" + + cluster.getNameNodePort(), "unregisteredIdentity"}; int exitCode = admin.run(args); assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode); } @@ -111,7 +111,8 @@ public class TestGenericRefresh { @Test public void testValidIdentifier() throws Exception { DFSAdmin admin = new DFSAdmin(config); - String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"}; + String[] args = new String[]{"-refresh", + "localhost:" + cluster.getNameNodePort(), "firstHandler"}; int exitCode = admin.run(args); assertEquals("DFSAdmin should succeed", 0, exitCode); @@ -124,11 +125,13 @@ public class TestGenericRefresh { @Test public void testVariableArgs() throws Exception { DFSAdmin admin = new DFSAdmin(config); - String[] args = new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one"}; + String[] args = new String[]{"-refresh", "localhost:" + + cluster.getNameNodePort(), "secondHandler", "one"}; int exitCode = admin.run(args); assertEquals("DFSAdmin should return 2", 2, exitCode); - exitCode = admin.run(new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one", "two"}); + exitCode = admin.run(new String[]{"-refresh", "localhost:" + + cluster.getNameNodePort(), "secondHandler", "one", "two"}); assertEquals("DFSAdmin should now return 3", 3, exitCode); Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"}); @@ -141,7 +144,8 @@ public class TestGenericRefresh { // And now this should fail DFSAdmin admin = new DFSAdmin(config); - String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"}; + String[] args = new String[]{"-refresh", "localhost:" + + cluster.getNameNodePort(), "firstHandler"}; int exitCode = admin.run(args); assertEquals("DFSAdmin should return -1", -1, exitCode); } @@ -161,7 +165,8 @@ public class TestGenericRefresh { // this should trigger both DFSAdmin admin = new DFSAdmin(config); - String[] args = new String[]{"-refresh", "localhost:" + NNPort, "sharedId", "one"}; + String[] args = new String[]{"-refresh", "localhost:" + + cluster.getNameNodePort(), "sharedId", "one"}; int exitCode = admin.run(args); assertEquals(-1, exitCode); // -1 because one of the responses is unregistered @@ -189,7 +194,8 @@ public class TestGenericRefresh { // We refresh both DFSAdmin admin = new DFSAdmin(config); - String[] args = new String[]{"-refresh", "localhost:" + NNPort, "shared"}; + String[] args = new String[]{"-refresh", "localhost:" + + cluster.getNameNodePort(), "shared"}; int exitCode = admin.run(args); assertEquals(-1, exitCode); // We get -1 because of our logic for melding non-zero return codes @@ -215,7 +221,8 @@ public class TestGenericRefresh { RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler); DFSAdmin admin = new DFSAdmin(config); - String[] args = new String[]{"-refresh", "localhost:" + NNPort, "exceptional"}; + String[] args = new String[]{"-refresh", "localhost:" + + cluster.getNameNodePort(), "exceptional"}; int exitCode = admin.run(args); assertEquals(-1, exitCode); // Exceptions result in a -1 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java index 9b0acbc8ca6..f66f9b6105f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java @@ -24,6 +24,8 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; +import java.net.BindException; +import java.util.Random; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; @@ -42,24 +44,42 @@ public class TestRefreshCallQueue { private FileSystem fs; static int mockQueueConstructions; static int mockQueuePuts; - private static final int NNPort = 54222; - private static String CALLQUEUE_CONFIG_KEY = "ipc." + NNPort + ".callqueue.impl"; + private String callQueueConfigKey = ""; + private final Random rand = new Random(); @Before public void setUp() throws Exception { // We want to count additional events, so we reset here mockQueueConstructions = 0; mockQueuePuts = 0; + int portRetries = 5; + int nnPort; - config = new Configuration(); - config.setClass(CALLQUEUE_CONFIG_KEY, - MockCallQueue.class, BlockingQueue.class); - config.set("hadoop.security.authorization", "true"); + for (; portRetries > 0; --portRetries) { + // Pick a random port in the range [30000,60000). + nnPort = 30000 + rand.nextInt(30000); + config = new Configuration(); + callQueueConfigKey = "ipc." + nnPort + ".callqueue.impl"; + config.setClass(callQueueConfigKey, + MockCallQueue.class, BlockingQueue.class); + config.set("hadoop.security.authorization", "true"); - FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort); - fs = FileSystem.get(config); - cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build(); - cluster.waitActive(); + FileSystem.setDefaultUri(config, "hdfs://localhost:" + nnPort); + fs = FileSystem.get(config); + + try { + cluster = new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build(); + cluster.waitActive(); + break; + } catch (BindException be) { + // Retry with a different port number. + } + } + + if (portRetries == 0) { + // Bail if we get very unlucky with our choice of ports. + fail("Failed to pick an ephemeral port for the NameNode RPC server."); + } } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index c316684138b..fe298d33118 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -2353,8 +2353,8 @@ public class MiniDFSCluster { * @return data file corresponding to the block */ public static File getBlockFile(File storageDir, ExtendedBlock blk) { - return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()), - blk.getBlockName()); + return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir, + blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName()); } /** @@ -2364,10 +2364,32 @@ public class MiniDFSCluster { * @return metadata file corresponding to the block */ public static File getBlockMetadataFile(File storageDir, ExtendedBlock blk) { - return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()), - blk.getBlockName() + "_" + blk.getGenerationStamp() + - Block.METADATA_EXTENSION); - + return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir, + blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName() + "_" + + blk.getGenerationStamp() + Block.METADATA_EXTENSION); + } + + /** + * Return all block metadata files in given directory (recursive search) + */ + public static List getAllBlockMetadataFiles(File storageDir) { + List results = new ArrayList(); + File[] files = storageDir.listFiles(); + if (files == null) { + return null; + } + for (File f : files) { + if (f.getName().startsWith("blk_") && f.getName().endsWith( + Block.METADATA_EXTENSION)) { + results.add(f); + } else if (f.isDirectory()) { + List subdirResults = getAllBlockMetadataFiles(f); + if (subdirResults != null) { + results.addAll(subdirResults); + } + } + } + return results; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java index d2a03d698d7..d33d7562cf6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java @@ -52,6 +52,7 @@ import org.apache.hadoop.io.retry.DefaultFailoverProxyProvider; import org.apache.hadoop.io.retry.FailoverProxyProvider; import org.apache.hadoop.net.ConnectTimeoutException; import org.apache.hadoop.net.StandardSocketFactory; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; @@ -89,6 +90,11 @@ public class TestDFSClientFailover { cluster.shutdown(); } + @After + public void clearConfig() { + SecurityUtil.setTokenServiceUseIp(true); + } + /** * Make sure that client failover works when an active NN dies and the standby * takes over. @@ -323,6 +329,7 @@ public class TestDFSClientFailover { /** * Test to verify legacy proxy providers are correctly wrapped. */ + @Test public void testWrappedFailoverProxyProvider() throws Exception { // setup the config with the dummy provider class Configuration config = new HdfsConfiguration(conf); @@ -332,6 +339,9 @@ public class TestDFSClientFailover { DummyLegacyFailoverProxyProvider.class.getName()); Path p = new Path("hdfs://" + logicalName + "/"); + // not to use IP address for token service + SecurityUtil.setTokenServiceUseIp(false); + // Logical URI should be used. assertTrue("Legacy proxy providers should use logical URI.", HAUtil.useLogicalUri(config, p.toUri())); @@ -340,6 +350,7 @@ public class TestDFSClientFailover { /** * Test to verify IPFailoverProxyProvider is not requiring logical URI. */ + @Test public void testIPFailoverProxyProviderLogicalUri() throws Exception { // setup the config with the IP failover proxy provider class Configuration config = new HdfsConfiguration(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java index 6a994494c6d..01bfb0d2fef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java @@ -79,8 +79,8 @@ public class TestDFSFinalize { File dnCurDirs[] = new File[dataNodeDirs.length]; for (int i = 0; i < dataNodeDirs.length; i++) { dnCurDirs[i] = new File(dataNodeDirs[i],"current"); - assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i]), - UpgradeUtilities.checksumMasterDataNodeContents()); + assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i], + false), UpgradeUtilities.checksumMasterDataNodeContents()); } for (int i = 0; i < nameNodeDirs.length; i++) { assertFalse(new File(nameNodeDirs[i],"previous").isDirectory()); @@ -96,8 +96,9 @@ public class TestDFSFinalize { assertFalse(new File(bpRoot,"previous").isDirectory()); File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED); - assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurFinalizeDir), - UpgradeUtilities.checksumMasterBlockPoolFinalizedContents()); + assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, + bpCurFinalizeDir, true), + UpgradeUtilities.checksumMasterBlockPoolFinalizedContents()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java index 7d2b0ff7040..68349a2ac67 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java @@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import java.io.FileNotFoundException; import java.io.IOException; +import java.security.PrivilegedExceptionAction; import java.util.HashMap; import java.util.Map; import java.util.Random; @@ -36,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; @@ -421,6 +425,79 @@ public class TestDFSPermission { } } + @Test + public void testAccessOwner() throws IOException, InterruptedException { + FileSystem rootFs = FileSystem.get(conf); + Path p1 = new Path("/p1"); + rootFs.mkdirs(p1); + rootFs.setOwner(p1, USER1_NAME, GROUP1_NAME); + fs = USER1.doAs(new PrivilegedExceptionAction() { + @Override + public FileSystem run() throws Exception { + return FileSystem.get(conf); + } + }); + fs.setPermission(p1, new FsPermission((short) 0444)); + fs.access(p1, FsAction.READ); + try { + fs.access(p1, FsAction.WRITE); + fail("The access call should have failed."); + } catch (AccessControlException e) { + // expected + } + + Path badPath = new Path("/bad/bad"); + try { + fs.access(badPath, FsAction.READ); + fail("The access call should have failed"); + } catch (FileNotFoundException e) { + // expected + } + } + + @Test + public void testAccessGroupMember() throws IOException, InterruptedException { + FileSystem rootFs = FileSystem.get(conf); + Path p2 = new Path("/p2"); + rootFs.mkdirs(p2); + rootFs.setOwner(p2, UserGroupInformation.getCurrentUser().getShortUserName(), GROUP1_NAME); + rootFs.setPermission(p2, new FsPermission((short) 0740)); + fs = USER1.doAs(new PrivilegedExceptionAction() { + @Override + public FileSystem run() throws Exception { + return FileSystem.get(conf); + } + }); + fs.access(p2, FsAction.READ); + try { + fs.access(p2, FsAction.EXECUTE); + fail("The access call should have failed."); + } catch (AccessControlException e) { + // expected + } + } + + @Test + public void testAccessOthers() throws IOException, InterruptedException { + FileSystem rootFs = FileSystem.get(conf); + Path p3 = new Path("/p3"); + rootFs.mkdirs(p3); + rootFs.setPermission(p3, new FsPermission((short) 0774)); + fs = USER1.doAs(new PrivilegedExceptionAction() { + @Override + public FileSystem run() throws Exception { + return FileSystem.get(conf); + } + }); + fs.access(p3, FsAction.READ); + try { + fs.access(p3, FsAction.READ_WRITE); + fail("The access call should have failed."); + } catch (AccessControlException e) { + // expected + } + } + /* Check if namenode performs permission checking correctly * for the given user for operations mkdir, open, setReplication, * getFileInfo, isDirectory, exists, getContentLength, list, rename, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index 7a541e6622c..68687edea1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -81,7 +81,7 @@ public class TestDFSRollback { break; case DATA_NODE: assertEquals( - UpgradeUtilities.checksumContents(nodeType, curDir), + UpgradeUtilities.checksumContents(nodeType, curDir, false), UpgradeUtilities.checksumMasterDataNodeContents()); break; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java index e4d22cdd76a..176e9cc26cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java @@ -239,7 +239,7 @@ public class TestDFSStorageStateRecovery { assertTrue(new File(baseDirs[i],"previous").isDirectory()); assertEquals( UpgradeUtilities.checksumContents( - NAME_NODE, new File(baseDirs[i],"previous")), + NAME_NODE, new File(baseDirs[i],"previous"), false), UpgradeUtilities.checksumMasterNameNodeContents()); } } @@ -259,7 +259,8 @@ public class TestDFSStorageStateRecovery { if (currentShouldExist) { for (int i = 0; i < baseDirs.length; i++) { assertEquals( - UpgradeUtilities.checksumContents(DATA_NODE, new File(baseDirs[i],"current")), + UpgradeUtilities.checksumContents(DATA_NODE, + new File(baseDirs[i],"current"), false), UpgradeUtilities.checksumMasterDataNodeContents()); } } @@ -267,7 +268,8 @@ public class TestDFSStorageStateRecovery { for (int i = 0; i < baseDirs.length; i++) { assertTrue(new File(baseDirs[i],"previous").isDirectory()); assertEquals( - UpgradeUtilities.checksumContents(DATA_NODE, new File(baseDirs[i],"previous")), + UpgradeUtilities.checksumContents(DATA_NODE, + new File(baseDirs[i],"previous"), false), UpgradeUtilities.checksumMasterDataNodeContents()); } } @@ -290,8 +292,8 @@ public class TestDFSStorageStateRecovery { if (currentShouldExist) { for (int i = 0; i < baseDirs.length; i++) { File bpCurDir = new File(baseDirs[i], Storage.STORAGE_DIR_CURRENT); - assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir), - UpgradeUtilities.checksumMasterBlockPoolContents()); + assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir, + false), UpgradeUtilities.checksumMasterBlockPoolContents()); } } if (previousShouldExist) { @@ -299,8 +301,8 @@ public class TestDFSStorageStateRecovery { File bpPrevDir = new File(baseDirs[i], Storage.STORAGE_DIR_PREVIOUS); assertTrue(bpPrevDir.isDirectory()); assertEquals( - UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir), - UpgradeUtilities.checksumMasterBlockPoolContents()); + UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir, + false), UpgradeUtilities.checksumMasterBlockPoolContents()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java index ee9e91dc733..104b043f1d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java @@ -100,7 +100,7 @@ public class TestDFSUpgrade { File previous = new File(baseDir, "previous"); assertExists(previous); - assertEquals(UpgradeUtilities.checksumContents(NAME_NODE, previous), + assertEquals(UpgradeUtilities.checksumContents(NAME_NODE, previous, false), UpgradeUtilities.checksumMasterNameNodeContents()); } } @@ -114,23 +114,25 @@ public class TestDFSUpgrade { void checkDataNode(String[] baseDirs, String bpid) throws IOException { for (int i = 0; i < baseDirs.length; i++) { File current = new File(baseDirs[i], "current/" + bpid + "/current"); - assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, current), + assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, current, false), UpgradeUtilities.checksumMasterDataNodeContents()); // block files are placed under /current//current/finalized File currentFinalized = MiniDFSCluster.getFinalizedDir(new File(baseDirs[i]), bpid); - assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, currentFinalized), + assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, + currentFinalized, true), UpgradeUtilities.checksumMasterBlockPoolFinalizedContents()); File previous = new File(baseDirs[i], "current/" + bpid + "/previous"); assertTrue(previous.isDirectory()); - assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previous), + assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previous, false), UpgradeUtilities.checksumMasterDataNodeContents()); File previousFinalized = new File(baseDirs[i], "current/" + bpid + "/previous"+"/finalized"); - assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previousFinalized), + assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, + previousFinalized, true), UpgradeUtilities.checksumMasterBlockPoolFinalizedContents()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index f5dbdceaa17..88ad0cc2dac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -24,6 +24,7 @@ import static org.junit.Assert.fail; import java.io.BufferedReader; import java.io.File; +import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.FileReader; import java.io.IOException; @@ -80,7 +81,7 @@ public class TestDFSUpgradeFromImage { long checksum; } - private static final Configuration upgradeConf; + static final Configuration upgradeConf; static { upgradeConf = new HdfsConfiguration(); @@ -95,7 +96,7 @@ public class TestDFSUpgradeFromImage { boolean printChecksum = false; - private void unpackStorage(String tarFileName) + void unpackStorage(String tarFileName, String referenceName) throws IOException { String tarFile = System.getProperty("test.cache.data", "build/test/cache") + "/" + tarFileName; @@ -110,7 +111,7 @@ public class TestDFSUpgradeFromImage { BufferedReader reader = new BufferedReader(new FileReader( System.getProperty("test.cache.data", "build/test/cache") - + "/" + HADOOP_DFS_DIR_TXT)); + + "/" + referenceName)); String line; while ( (line = reader.readLine()) != null ) { @@ -285,7 +286,7 @@ public class TestDFSUpgradeFromImage { */ @Test public void testUpgradeFromRel22Image() throws IOException { - unpackStorage(HADOOP22_IMAGE); + unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT); upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf). numDataNodes(4)); } @@ -296,7 +297,7 @@ public class TestDFSUpgradeFromImage { */ @Test public void testUpgradeFromCorruptRel22Image() throws IOException { - unpackStorage(HADOOP22_IMAGE); + unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT); // Overwrite the md5 stored in the VERSION files File baseDir = new File(MiniDFSCluster.getBaseDirectory()); @@ -333,7 +334,7 @@ public class TestDFSUpgradeFromImage { */ @Test public void testUpgradeFromRel1ReservedImage() throws Exception { - unpackStorage(HADOOP1_RESERVED_IMAGE); + unpackStorage(HADOOP1_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT); MiniDFSCluster cluster = null; // Try it once without setting the upgrade flag to ensure it fails final Configuration conf = new Configuration(); @@ -403,7 +404,7 @@ public class TestDFSUpgradeFromImage { */ @Test public void testUpgradeFromRel023ReservedImage() throws Exception { - unpackStorage(HADOOP023_RESERVED_IMAGE); + unpackStorage(HADOOP023_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT); MiniDFSCluster cluster = null; // Try it once without setting the upgrade flag to ensure it fails final Configuration conf = new Configuration(); @@ -468,7 +469,7 @@ public class TestDFSUpgradeFromImage { */ @Test public void testUpgradeFromRel2ReservedImage() throws Exception { - unpackStorage(HADOOP2_RESERVED_IMAGE); + unpackStorage(HADOOP2_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT); MiniDFSCluster cluster = null; // Try it once without setting the upgrade flag to ensure it fails final Configuration conf = new Configuration(); @@ -572,7 +573,7 @@ public class TestDFSUpgradeFromImage { } while (dirList.hasMore()); } - private void upgradeAndVerify(MiniDFSCluster.Builder bld) + void upgradeAndVerify(MiniDFSCluster.Builder bld) throws IOException { MiniDFSCluster cluster = null; try { @@ -601,7 +602,7 @@ public class TestDFSUpgradeFromImage { */ @Test public void testUpgradeFromRel1BBWImage() throws IOException { - unpackStorage(HADOOP1_BBW_IMAGE); + unpackStorage(HADOOP1_BBW_IMAGE, HADOOP_DFS_DIR_TXT); Configuration conf = new Configuration(upgradeConf); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, System.getProperty("test.build.data") + File.separator + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java index a2899eec9c7..1b4b3172394 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java @@ -445,19 +445,14 @@ public class TestDatanodeBlockScanner { @Test public void testReplicaInfoParsing() throws Exception { - testReplicaInfoParsingSingle(BASE_PATH, new int[0]); - testReplicaInfoParsingSingle(BASE_PATH + "/subdir1", new int[]{1}); - testReplicaInfoParsingSingle(BASE_PATH + "/subdir43", new int[]{43}); - testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir3", new int[]{1, 2, 3}); - testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir43", new int[]{1, 2, 43}); - testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir23/subdir3", new int[]{1, 23, 3}); - testReplicaInfoParsingSingle(BASE_PATH + "/subdir13/subdir2/subdir3", new int[]{13, 2, 3}); + testReplicaInfoParsingSingle(BASE_PATH); + testReplicaInfoParsingSingle(BASE_PATH + "/subdir1"); + testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir3"); } - private static void testReplicaInfoParsingSingle(String subDirPath, int[] expectedSubDirs) { + private static void testReplicaInfoParsingSingle(String subDirPath) { File testFile = new File(subDirPath); - assertArrayEquals(expectedSubDirs, ReplicaInfo.parseSubDirs(testFile).subDirs); - assertEquals(BASE_PATH, ReplicaInfo.parseSubDirs(testFile).baseDirPath); + assertEquals(BASE_PATH, ReplicaInfo.parseBaseDir(testFile).baseDirPath); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java new file mode 100644 index 00000000000..0966301cb4e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.conf.Configuration; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; + +public class TestDatanodeLayoutUpgrade { + private static final String HADOOP_DATANODE_DIR_TXT = + "hadoop-datanode-dir.txt"; + private static final String HADOOP24_DATANODE = "hadoop-24-datanode-dir.tgz"; + + @Test + // Upgrade from LDir-based layout to block ID-based layout -- change described + // in HDFS-6482 + public void testUpgradeToIdBasedLayout() throws IOException { + TestDFSUpgradeFromImage upgrade = new TestDFSUpgradeFromImage(); + upgrade.unpackStorage(HADOOP24_DATANODE, HADOOP_DATANODE_DIR_TXT); + Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf); + conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, + System.getProperty("test.build.data") + File.separator + + "dfs" + File.separator + "data"); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + System.getProperty("test.build.data") + File.separator + + "dfs" + File.separator + "name"); + upgrade.upgradeAndVerify(new MiniDFSCluster.Builder(conf).numDataNodes(1) + .manageDataDfsDirs(false).manageNameDfsDirs(false)); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java index 4ea534acdd6..d8400778ec9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.FileNotFoundException; @@ -32,6 +33,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.HardLink; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -39,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; import org.junit.Assert; import org.junit.Test; @@ -169,6 +172,7 @@ public class TestFileAppend{ } } finally { + client.close(); fs.close(); cluster.shutdown(); } @@ -380,4 +384,57 @@ public class TestFileAppend{ } } + /** + * Old replica of the block should not be accepted as valid for append/read + */ + @Test + public void testFailedAppendBlockRejection() throws Exception { + Configuration conf = new HdfsConfiguration(); + conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", + "false"); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) + .build(); + DistributedFileSystem fs = null; + try { + fs = cluster.getFileSystem(); + Path path = new Path("/test"); + FSDataOutputStream out = fs.create(path); + out.writeBytes("hello\n"); + out.close(); + + // stop one datanode + DataNodeProperties dnProp = cluster.stopDataNode(0); + String dnAddress = dnProp.datanode.getXferAddress().toString(); + if (dnAddress.startsWith("/")) { + dnAddress = dnAddress.substring(1); + } + + // append again to bump genstamps + for (int i = 0; i < 2; i++) { + out = fs.append(path); + out.writeBytes("helloagain\n"); + out.close(); + } + + // re-open and make the block state as underconstruction + out = fs.append(path); + cluster.restartDataNode(dnProp, true); + // wait till the block report comes + Thread.sleep(2000); + // check the block locations, this should not contain restarted datanode + BlockLocation[] locations = fs.getFileBlockLocations(path, 0, + Long.MAX_VALUE); + String[] names = locations[0].getNames(); + for (String node : names) { + if (node.equals(dnAddress)) { + fail("Failed append should not be present in latest block locations."); + } + } + out.close(); + } finally { + IOUtils.closeStream(fs); + cluster.shutdown(); + } + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java index 81077c5fd8b..a7c6a69ac9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java @@ -27,6 +27,7 @@ import java.io.DataOutputStream; import java.io.File; import java.io.FileOutputStream; import java.util.ArrayList; +import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -35,6 +36,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.common.GenerationStamp; @@ -137,13 +139,15 @@ public class TestFileCorruption { final String bpid = cluster.getNamesystem().getBlockPoolId(); File storageDir = cluster.getInstanceStorageDir(0, 0); File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); + assertTrue("Data directory does not exist", dataDir.exists()); ExtendedBlock blk = getBlock(bpid, dataDir); if (blk == null) { storageDir = cluster.getInstanceStorageDir(0, 1); dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); blk = getBlock(bpid, dataDir); } - assertFalse(blk==null); + assertFalse("Data directory does not contain any blocks or there was an " + + "IO error", blk==null); // start a third datanode cluster.startDataNodes(conf, 1, true, null, null); @@ -174,33 +178,15 @@ public class TestFileCorruption { } - private ExtendedBlock getBlock(String bpid, File dataDir) { - assertTrue("data directory does not exist", dataDir.exists()); - File[] blocks = dataDir.listFiles(); - assertTrue("Blocks do not exist in dataDir", (blocks != null) && (blocks.length > 0)); - - int idx = 0; - String blockFileName = null; - for (; idx < blocks.length; idx++) { - blockFileName = blocks[idx].getName(); - if (blockFileName.startsWith("blk_") && !blockFileName.endsWith(".meta")) { - break; - } - } - if (blockFileName == null) { + public static ExtendedBlock getBlock(String bpid, File dataDir) { + List metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir); + if (metadataFiles == null || metadataFiles.isEmpty()) { return null; } - long blockId = Long.parseLong(blockFileName.substring("blk_".length())); - long blockTimeStamp = GenerationStamp.GRANDFATHER_GENERATION_STAMP; - for (idx=0; idx < blocks.length; idx++) { - String fileName = blocks[idx].getName(); - if (fileName.startsWith(blockFileName) && fileName.endsWith(".meta")) { - int startIndex = blockFileName.length()+1; - int endIndex = fileName.length() - ".meta".length(); - blockTimeStamp = Long.parseLong(fileName.substring(startIndex, endIndex)); - break; - } - } - return new ExtendedBlock(bpid, blockId, blocks[idx].length(), blockTimeStamp); + File metadataFile = metadataFiles.get(0); + File blockFile = Block.metaToBlockFile(metadataFile); + return new ExtendedBlock(bpid, Block.getBlockId(blockFile.getName()), + blockFile.length(), Block.getGenerationStamp(metadataFile.getName())); } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java index 25ec8c9eb0e..bda95c07525 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java @@ -26,6 +26,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; +import java.security.PrivilegedExceptionAction; import java.util.List; import org.apache.commons.logging.Log; @@ -36,6 +37,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; @@ -47,6 +49,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Before; @@ -297,7 +301,8 @@ public class TestSafeMode { * assert that they are either allowed or fail as expected. */ @Test - public void testOperationsWhileInSafeMode() throws IOException { + public void testOperationsWhileInSafeMode() throws IOException, + InterruptedException { final Path file1 = new Path("/file1"); assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET)); @@ -407,6 +412,22 @@ public class TestSafeMode { fail("getAclStatus failed while in SM"); } + // Test access + UserGroupInformation ugiX = UserGroupInformation.createRemoteUser("userX"); + FileSystem myfs = ugiX.doAs(new PrivilegedExceptionAction() { + @Override + public FileSystem run() throws IOException { + return FileSystem.get(conf); + } + }); + myfs.access(file1, FsAction.READ); + try { + myfs.access(file1, FsAction.WRITE); + fail("The access call should have failed."); + } catch (AccessControlException e) { + // expected + } + assertFalse("Could not leave SM", dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java index 4f26e087cc8..bbaf3ed0e3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java @@ -158,21 +158,23 @@ public class UpgradeUtilities { FileUtil.fullyDelete(new File(datanodeStorage,"in_use.lock")); } namenodeStorageChecksum = checksumContents(NAME_NODE, - new File(namenodeStorage, "current")); + new File(namenodeStorage, "current"), false); File dnCurDir = new File(datanodeStorage, "current"); - datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir); + datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir, false); File bpCurDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current"); - blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir); + blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir, false); File bpCurFinalizeDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current/"+DataStorage.STORAGE_DIR_FINALIZED); - blockPoolFinalizedStorageChecksum = checksumContents(DATA_NODE, bpCurFinalizeDir); + blockPoolFinalizedStorageChecksum = checksumContents(DATA_NODE, + bpCurFinalizeDir, true); File bpCurRbwDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current/"+DataStorage.STORAGE_DIR_RBW); - blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir); + blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir, + false); } // Private helper method that writes a file to the given file system. @@ -266,36 +268,47 @@ public class UpgradeUtilities { /** * Compute the checksum of all the files in the specified directory. - * The contents of subdirectories are not included. This method provides - * an easy way to ensure equality between the contents of two directories. + * This method provides an easy way to ensure equality between the contents + * of two directories. * * @param nodeType if DATA_NODE then any file named "VERSION" is ignored. * This is because this file file is changed every time * the Datanode is started. - * @param dir must be a directory. Subdirectories are ignored. + * @param dir must be a directory + * @param recursive whether or not to consider subdirectories * * @throws IllegalArgumentException if specified directory is not a directory * @throws IOException if an IOException occurs while reading the files * @return the computed checksum value */ - public static long checksumContents(NodeType nodeType, File dir) throws IOException { + public static long checksumContents(NodeType nodeType, File dir, + boolean recursive) throws IOException { + CRC32 checksum = new CRC32(); + checksumContentsHelper(nodeType, dir, checksum, recursive); + return checksum.getValue(); + } + + public static void checksumContentsHelper(NodeType nodeType, File dir, + CRC32 checksum, boolean recursive) throws IOException { if (!dir.isDirectory()) { throw new IllegalArgumentException( - "Given argument is not a directory:" + dir); + "Given argument is not a directory:" + dir); } File[] list = dir.listFiles(); Arrays.sort(list); - CRC32 checksum = new CRC32(); for (int i = 0; i < list.length; i++) { if (!list[i].isFile()) { + if (recursive) { + checksumContentsHelper(nodeType, list[i], checksum, recursive); + } continue; } // skip VERSION and dfsUsed file for DataNodes - if (nodeType == DATA_NODE && - (list[i].getName().equals("VERSION") || - list[i].getName().equals("dfsUsed"))) { - continue; + if (nodeType == DATA_NODE && + (list[i].getName().equals("VERSION") || + list[i].getName().equals("dfsUsed"))) { + continue; } FileInputStream fis = null; @@ -312,7 +325,6 @@ public class UpgradeUtilities { } } } - return checksum.getValue(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index 440b4f3a6cc..cb85c7deb61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -184,8 +184,10 @@ public class TestPBHelper { private static BlockWithLocations getBlockWithLocations(int bid) { final String[] datanodeUuids = {"dn1", "dn2", "dn3"}; final String[] storageIDs = {"s1", "s2", "s3"}; + final StorageType[] storageTypes = { + StorageType.DISK, StorageType.DISK, StorageType.DISK}; return new BlockWithLocations(new Block(bid, 0, 1), - datanodeUuids, storageIDs); + datanodeUuids, storageIDs, storageTypes); } private void compare(BlockWithLocations locs1, BlockWithLocations locs2) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index fe774aaac9f..5da3cd177a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -18,17 +18,23 @@ package org.apache.hadoop.hdfs.server.balancer; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.io.File; import java.io.IOException; +import java.io.PrintWriter; import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.HashSet; import java.util.List; import java.util.Random; +import java.util.Set; import java.util.concurrent.TimeoutException; +import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -48,6 +54,8 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.balancer.Balancer.Cli; +import org.apache.hadoop.hdfs.server.balancer.Balancer.Parameters; +import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; @@ -255,6 +263,18 @@ public class TestBalancer { } } } + + /** + * Wait until balanced: each datanode gives utilization within + * BALANCE_ALLOWED_VARIANCE of average + * @throws IOException + * @throws TimeoutException + */ + static void waitForBalancer(long totalUsedSpace, long totalCapacity, + ClientProtocol client, MiniDFSCluster cluster, Balancer.Parameters p) + throws IOException, TimeoutException { + waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, 0); + } /** * Wait until balanced: each datanode gives utilization within @@ -263,11 +283,17 @@ public class TestBalancer { * @throws TimeoutException */ static void waitForBalancer(long totalUsedSpace, long totalCapacity, - ClientProtocol client, MiniDFSCluster cluster) - throws IOException, TimeoutException { + ClientProtocol client, MiniDFSCluster cluster, Balancer.Parameters p, + int expectedExcludedNodes) throws IOException, TimeoutException { long timeout = TIMEOUT; long failtime = (timeout <= 0L) ? Long.MAX_VALUE : Time.now() + timeout; + if (!p.nodesToBeIncluded.isEmpty()) { + totalCapacity = p.nodesToBeIncluded.size() * CAPACITY; + } + if (!p.nodesToBeExcluded.isEmpty()) { + totalCapacity -= p.nodesToBeExcluded.size() * CAPACITY; + } final double avgUtilization = ((double)totalUsedSpace) / totalCapacity; boolean balanced; do { @@ -275,9 +301,20 @@ public class TestBalancer { client.getDatanodeReport(DatanodeReportType.ALL); assertEquals(datanodeReport.length, cluster.getDataNodes().size()); balanced = true; + int actualExcludedNodeCount = 0; for (DatanodeInfo datanode : datanodeReport) { double nodeUtilization = ((double)datanode.getDfsUsed()) / datanode.getCapacity(); + if (Balancer.Util.shouldBeExcluded(p.nodesToBeExcluded, datanode)) { + assertTrue(nodeUtilization == 0); + actualExcludedNodeCount++; + continue; + } + if (!Balancer.Util.shouldBeIncluded(p.nodesToBeIncluded, datanode)) { + assertTrue(nodeUtilization == 0); + actualExcludedNodeCount++; + continue; + } if (Math.abs(avgUtilization - nodeUtilization) > BALANCE_ALLOWED_VARIANCE) { balanced = false; if (Time.now() > failtime) { @@ -294,6 +331,7 @@ public class TestBalancer { break; } } + assertEquals(expectedExcludedNodes,actualExcludedNodeCount); } while (!balanced); } @@ -307,22 +345,118 @@ public class TestBalancer { } return b.append("]").toString(); } - /** This test start a cluster with specified number of nodes, + /** + * Class which contains information about the + * new nodes to be added to the cluster for balancing. + */ + static abstract class NewNodeInfo { + + Set nodesToBeExcluded = new HashSet(); + Set nodesToBeIncluded = new HashSet(); + + abstract String[] getNames(); + abstract int getNumberofNewNodes(); + abstract int getNumberofIncludeNodes(); + abstract int getNumberofExcludeNodes(); + + public Set getNodesToBeIncluded() { + return nodesToBeIncluded; + } + public Set getNodesToBeExcluded() { + return nodesToBeExcluded; + } + } + + /** + * The host names of new nodes are specified + */ + static class HostNameBasedNodes extends NewNodeInfo { + String[] hostnames; + + public HostNameBasedNodes(String[] hostnames, + Set nodesToBeExcluded, Set nodesToBeIncluded) { + this.hostnames = hostnames; + this.nodesToBeExcluded = nodesToBeExcluded; + this.nodesToBeIncluded = nodesToBeIncluded; + } + + @Override + String[] getNames() { + return hostnames; + } + @Override + int getNumberofNewNodes() { + return hostnames.length; + } + @Override + int getNumberofIncludeNodes() { + return nodesToBeIncluded.size(); + } + @Override + int getNumberofExcludeNodes() { + return nodesToBeExcluded.size(); + } + } + + /** + * The number of data nodes to be started are specified. + * The data nodes will have same host name, but different port numbers. + * + */ + static class PortNumberBasedNodes extends NewNodeInfo { + int newNodes; + int excludeNodes; + int includeNodes; + + public PortNumberBasedNodes(int newNodes, int excludeNodes, int includeNodes) { + this.newNodes = newNodes; + this.excludeNodes = excludeNodes; + this.includeNodes = includeNodes; + } + + @Override + String[] getNames() { + return null; + } + @Override + int getNumberofNewNodes() { + return newNodes; + } + @Override + int getNumberofIncludeNodes() { + return includeNodes; + } + @Override + int getNumberofExcludeNodes() { + return excludeNodes; + } + } + + private void doTest(Configuration conf, long[] capacities, String[] racks, + long newCapacity, String newRack, boolean useTool) throws Exception { + doTest(conf, capacities, racks, newCapacity, newRack, null, useTool, false); + } + + /** This test start a cluster with specified number of nodes, * and fills it to be 30% full (with a single file replicated identically * to all datanodes); * It then adds one new empty node and starts balancing. - * + * * @param conf - configuration * @param capacities - array of capacities of original nodes in cluster * @param racks - array of racks for original nodes in cluster * @param newCapacity - new node's capacity * @param newRack - new node's rack + * @param nodes - information about new nodes to be started. * @param useTool - if true run test via Cli with command-line argument * parsing, etc. Otherwise invoke balancer API directly. + * @param useFile - if true, the hosts to included or excluded will be stored in a + * file and then later read from the file. * @throws Exception */ - private void doTest(Configuration conf, long[] capacities, String[] racks, - long newCapacity, String newRack, boolean useTool) throws Exception { + private void doTest(Configuration conf, long[] capacities, + String[] racks, long newCapacity, String newRack, NewNodeInfo nodes, + boolean useTool, boolean useFile) throws Exception { LOG.info("capacities = " + long2String(capacities)); LOG.info("racks = " + Arrays.asList(racks)); LOG.info("newCapacity= " + newCapacity); @@ -346,17 +480,75 @@ public class TestBalancer { long totalUsedSpace = totalCapacity*3/10; createFile(cluster, filePath, totalUsedSpace / numOfDatanodes, (short) numOfDatanodes, 0); - // start up an empty node with the same capacity and on the same rack - cluster.startDataNodes(conf, 1, true, null, - new String[]{newRack}, new long[]{newCapacity}); - totalCapacity += newCapacity; + if (nodes == null) { // there is no specification of new nodes. + // start up an empty node with the same capacity and on the same rack + cluster.startDataNodes(conf, 1, true, null, + new String[]{newRack}, null,new long[]{newCapacity}); + totalCapacity += newCapacity; + } else { + //if running a test with "include list", include original nodes as well + if (nodes.getNumberofIncludeNodes()>0) { + for (DataNode dn: cluster.getDataNodes()) + nodes.getNodesToBeIncluded().add(dn.getDatanodeId().getHostName()); + } + String[] newRacks = new String[nodes.getNumberofNewNodes()]; + long[] newCapacities = new long[nodes.getNumberofNewNodes()]; + for (int i=0; i < nodes.getNumberofNewNodes(); i++) { + newRacks[i] = newRack; + newCapacities[i] = newCapacity; + } + // if host names are specified for the new nodes to be created. + if (nodes.getNames() != null) { + cluster.startDataNodes(conf, nodes.getNumberofNewNodes(), true, null, + newRacks, nodes.getNames(), newCapacities); + totalCapacity += newCapacity*nodes.getNumberofNewNodes(); + } else { // host names are not specified + cluster.startDataNodes(conf, nodes.getNumberofNewNodes(), true, null, + newRacks, null, newCapacities); + totalCapacity += newCapacity*nodes.getNumberofNewNodes(); + //populate the include nodes + if (nodes.getNumberofIncludeNodes() > 0) { + int totalNodes = cluster.getDataNodes().size(); + for (int i=0; i < nodes.getNumberofIncludeNodes(); i++) { + nodes.getNodesToBeIncluded().add (cluster.getDataNodes().get( + totalNodes-1-i).getDatanodeId().getXferAddr()); + } + } + //polulate the exclude nodes + if (nodes.getNumberofExcludeNodes() > 0) { + int totalNodes = cluster.getDataNodes().size(); + for (int i=0; i < nodes.getNumberofExcludeNodes(); i++) { + nodes.getNodesToBeExcluded().add (cluster.getDataNodes().get( + totalNodes-1-i).getDatanodeId().getXferAddr()); + } + } + } + } + // run balancer and validate results + Balancer.Parameters p = Balancer.Parameters.DEFAULT; + if (nodes != null) { + p = new Balancer.Parameters( + Balancer.Parameters.DEFAULT.policy, + Balancer.Parameters.DEFAULT.threshold, + nodes.getNodesToBeExcluded(), nodes.getNodesToBeIncluded()); + } + + int expectedExcludedNodes = 0; + if (nodes != null) { + if (!nodes.getNodesToBeExcluded().isEmpty()) { + expectedExcludedNodes = nodes.getNodesToBeExcluded().size(); + } else if (!nodes.getNodesToBeIncluded().isEmpty()) { + expectedExcludedNodes = + cluster.getDataNodes().size() - nodes.getNodesToBeIncluded().size(); + } + } // run balancer and validate results if (useTool) { - runBalancerCli(conf, totalUsedSpace, totalCapacity); + runBalancerCli(conf, totalUsedSpace, totalCapacity, p, useFile, expectedExcludedNodes); } else { - runBalancer(conf, totalUsedSpace, totalCapacity); + runBalancer(conf, totalUsedSpace, totalCapacity, p, expectedExcludedNodes); } } finally { cluster.shutdown(); @@ -365,11 +557,17 @@ public class TestBalancer { private void runBalancer(Configuration conf, long totalUsedSpace, long totalCapacity) throws Exception { + runBalancer(conf, totalUsedSpace, totalCapacity, Balancer.Parameters.DEFAULT, 0); + } + + private void runBalancer(Configuration conf, + long totalUsedSpace, long totalCapacity, Balancer.Parameters p, + int excludedNodes) throws Exception { waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); // start rebalancing Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); - final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf); + final int r = Balancer.run(namenodes, p, conf); if (conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT) ==0) { assertEquals(Balancer.ReturnStatus.NO_MOVE_PROGRESS.code, r); @@ -379,22 +577,66 @@ public class TestBalancer { } waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); LOG.info("Rebalancing with default ctor."); - waitForBalancer(totalUsedSpace, totalCapacity, client, cluster); + waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, excludedNodes); } - - private void runBalancerCli(Configuration conf, - long totalUsedSpace, long totalCapacity) throws Exception { - waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); - final String[] args = { "-policy", "datanode" }; + private void runBalancerCli(Configuration conf, + long totalUsedSpace, long totalCapacity, + Balancer.Parameters p, boolean useFile, int expectedExcludedNodes) throws Exception { + waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); + List args = new ArrayList(); + args.add("-policy"); + args.add("datanode"); + + File excludeHostsFile = null; + if (!p.nodesToBeExcluded.isEmpty()) { + args.add("-exclude"); + if (useFile) { + excludeHostsFile = new File ("exclude-hosts-file"); + PrintWriter pw = new PrintWriter(excludeHostsFile); + for (String host: p.nodesToBeExcluded) { + pw.write( host + "\n"); + } + pw.close(); + args.add("-f"); + args.add("exclude-hosts-file"); + } else { + args.add(StringUtils.join(p.nodesToBeExcluded, ',')); + } + } + + File includeHostsFile = null; + if (!p.nodesToBeIncluded.isEmpty()) { + args.add("-include"); + if (useFile) { + includeHostsFile = new File ("include-hosts-file"); + PrintWriter pw = new PrintWriter(includeHostsFile); + for (String host: p.nodesToBeIncluded){ + pw.write( host + "\n"); + } + pw.close(); + args.add("-f"); + args.add("include-hosts-file"); + } else { + args.add(StringUtils.join(p.nodesToBeIncluded, ',')); + } + } + final Tool tool = new Cli(); tool.setConf(conf); - final int r = tool.run(args); // start rebalancing + final int r = tool.run(args.toArray(new String[0])); // start rebalancing assertEquals("Tools should exit 0 on success", 0, r); waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); LOG.info("Rebalancing with default ctor."); - waitForBalancer(totalUsedSpace, totalCapacity, client, cluster); + waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, expectedExcludedNodes); + + if (excludeHostsFile != null && excludeHostsFile.exists()) { + excludeHostsFile.delete(); + } + if (includeHostsFile != null && includeHostsFile.exists()) { + includeHostsFile.delete(); + } } /** one-node cluster test*/ @@ -416,6 +658,71 @@ public class TestBalancer { oneNodeTest(conf, false); } + /* we first start a cluster and fill the cluster up to a certain size. + * then redistribute blocks according the required distribution. + * Then we start an empty datanode. + * Afterwards a balancer is run to balance the cluster. + * A partially filled datanode is excluded during balancing. + * This triggers a situation where one of the block's location is unknown. + */ + @Test(timeout=100000) + public void testUnknownDatanode() throws Exception { + Configuration conf = new HdfsConfiguration(); + initConf(conf); + long distribution[] = new long[] {50*CAPACITY/100, 70*CAPACITY/100, 0*CAPACITY/100}; + long capacities[] = new long[]{CAPACITY, CAPACITY, CAPACITY}; + String racks[] = new String[] {RACK0, RACK1, RACK1}; + + int numDatanodes = distribution.length; + if (capacities.length != numDatanodes || racks.length != numDatanodes) { + throw new IllegalArgumentException("Array length is not the same"); + } + + // calculate total space that need to be filled + final long totalUsedSpace = sum(distribution); + + // fill the cluster + ExtendedBlock[] blocks = generateBlocks(conf, totalUsedSpace, + (short) numDatanodes); + + // redistribute blocks + Block[][] blocksDN = distributeBlocks( + blocks, (short)(numDatanodes-1), distribution); + + // restart the cluster: do NOT format the cluster + conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f"); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .format(false) + .racks(racks) + .simulatedCapacities(capacities) + .build(); + try { + cluster.waitActive(); + client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), + ClientProtocol.class).getProxy(); + + for(int i = 0; i < 3; i++) { + cluster.injectBlocks(i, Arrays.asList(blocksDN[i]), null); + } + + cluster.startDataNodes(conf, 1, true, null, + new String[]{RACK0}, null,new long[]{CAPACITY}); + cluster.triggerHeartbeats(); + + Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Set datanodes = new HashSet(); + datanodes.add(cluster.getDataNodes().get(0).getDatanodeId().getHostName()); + Balancer.Parameters p = new Balancer.Parameters( + Balancer.Parameters.DEFAULT.policy, + Balancer.Parameters.DEFAULT.threshold, + datanodes, Balancer.Parameters.DEFAULT.nodesToBeIncluded); + final int r = Balancer.run(namenodes, p, conf); + assertEquals(Balancer.ReturnStatus.SUCCESS.code, r); + } finally { + cluster.shutdown(); + } + } + /** * Test parse method in Balancer#Cli class with threshold value out of * boundaries. @@ -440,7 +747,7 @@ public class TestBalancer { } } - /** Test a cluster with even distribution, + /** Test a cluster with even distribution, * then a new empty node is added to the cluster*/ @Test(timeout=100000) public void testBalancer0() throws Exception { @@ -547,7 +854,35 @@ public class TestBalancer { } catch (IllegalArgumentException e) { } - parameters = new String[] { "-threshold 1 -policy" }; + parameters = new String[] {"-threshold", "1", "-policy"}; + try { + Balancer.Cli.parse(parameters); + fail(reason); + } catch (IllegalArgumentException e) { + + } + parameters = new String[] {"-threshold", "1", "-include"}; + try { + Balancer.Cli.parse(parameters); + fail(reason); + } catch (IllegalArgumentException e) { + + } + parameters = new String[] {"-threshold", "1", "-exclude"}; + try { + Balancer.Cli.parse(parameters); + fail(reason); + } catch (IllegalArgumentException e) { + + } + parameters = new String[] {"-include", "-f"}; + try { + Balancer.Cli.parse(parameters); + fail(reason); + } catch (IllegalArgumentException e) { + + } + parameters = new String[] {"-exclude", "-f"}; try { Balancer.Cli.parse(parameters); fail(reason); @@ -555,6 +890,13 @@ public class TestBalancer { } + parameters = new String[] {"-include", "testnode1", "-exclude", "testnode2"}; + try { + Balancer.Cli.parse(parameters); + fail("IllegalArgumentException is expected when both -exclude and -include are specified"); + } catch (IllegalArgumentException e) { + + } } /** @@ -569,6 +911,183 @@ public class TestBalancer { oneNodeTest(conf, true); } + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the exclude list + */ + @Test(timeout=100000) + public void testBalancerWithExcludeList() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + Set excludeHosts = new HashSet(); + excludeHosts.add( "datanodeY"); + excludeHosts.add( "datanodeZ"); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2, + new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"}, + excludeHosts, Parameters.DEFAULT.nodesToBeIncluded), false, false); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the exclude list + */ + @Test(timeout=100000) + public void testBalancerWithExcludeListWithPorts() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, + CAPACITY, RACK2, new PortNumberBasedNodes(3, 2, 0), false, false); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the exclude list + */ + @Test(timeout=100000) + public void testBalancerCliWithExcludeList() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + Set excludeHosts = new HashSet(); + excludeHosts.add( "datanodeY"); + excludeHosts.add( "datanodeZ"); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2, + new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"}, excludeHosts, + Parameters.DEFAULT.nodesToBeIncluded), true, false); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the exclude list + */ + @Test(timeout=100000) + public void testBalancerCliWithExcludeListWithPorts() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, + CAPACITY, RACK2, new PortNumberBasedNodes(3, 2, 0), true, false); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the exclude list in a file + */ + @Test(timeout=100000) + public void testBalancerCliWithExcludeListInAFile() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + Set excludeHosts = new HashSet(); + excludeHosts.add( "datanodeY"); + excludeHosts.add( "datanodeZ"); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2, + new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"}, + excludeHosts, Parameters.DEFAULT.nodesToBeIncluded), true, true); + } + + /** + * Test a cluster with even distribution,G + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the exclude list + */ + @Test(timeout=100000) + public void testBalancerCliWithExcludeListWithPortsInAFile() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, + CAPACITY, RACK2, new PortNumberBasedNodes(3, 2, 0), true, true); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the include list + */ + @Test(timeout=100000) + public void testBalancerWithIncludeList() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + Set includeHosts = new HashSet(); + includeHosts.add( "datanodeY"); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2, + new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"}, + Parameters.DEFAULT.nodesToBeExcluded, includeHosts), false, false); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the include list + */ + @Test(timeout=100000) + public void testBalancerWithIncludeListWithPorts() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, + CAPACITY, RACK2, new PortNumberBasedNodes(3, 0, 1), false, false); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the include list + */ + @Test(timeout=100000) + public void testBalancerCliWithIncludeList() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + Set includeHosts = new HashSet(); + includeHosts.add( "datanodeY"); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2, + new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"}, + Parameters.DEFAULT.nodesToBeExcluded, includeHosts), true, false); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the include list + */ + @Test(timeout=100000) + public void testBalancerCliWithIncludeListWithPorts() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, + CAPACITY, RACK2, new PortNumberBasedNodes(3, 0, 1), true, false); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the include list + */ + @Test(timeout=100000) + public void testBalancerCliWithIncludeListInAFile() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + Set includeHosts = new HashSet(); + includeHosts.add( "datanodeY"); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2, + new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"}, + Parameters.DEFAULT.nodesToBeExcluded, includeHosts), true, true); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the include list + */ + @Test(timeout=100000) + public void testBalancerCliWithIncludeListWithPortsInAFile() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, + CAPACITY, RACK2, new PortNumberBasedNodes(3, 0, 1), true, true); + } + /** * @param args */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index 1a309910eb9..9652f8636a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -97,10 +97,10 @@ public class TestBalancerWithHANameNodes { Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); assertEquals(1, namenodes.size()); assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster))); - final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf); + final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf); assertEquals(Balancer.ReturnStatus.SUCCESS.code, r); TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client, - cluster); + cluster, Balancer.Parameters.DEFAULT); } finally { cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index f5848041bcf..1a7ddd331b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -159,7 +159,7 @@ public class TestBalancerWithMultipleNameNodes { // start rebalancing final Collection namenodes = DFSUtil.getNsServiceRpcUris(s.conf); - final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, s.conf); + final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, s.conf); Assert.assertEquals(Balancer.ReturnStatus.SUCCESS.code, r); LOG.info("BALANCER 2"); @@ -195,7 +195,7 @@ public class TestBalancerWithMultipleNameNodes { balanced = true; for(int d = 0; d < used.length; d++) { final double p = used[d]*100.0/cap[d]; - balanced = p <= avg + Balancer.Parameters.DEFALUT.threshold; + balanced = p <= avg + Balancer.Parameters.DEFAULT.threshold; if (!balanced) { if (i % 100 == 0) { LOG.warn("datanodes " + d + " is not yet balanced: " diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java index 667204c0c9b..153ced3a243 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java @@ -175,7 +175,7 @@ public class TestBalancerWithNodeGroup { // start rebalancing Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); - final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf); + final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf); assertEquals(Balancer.ReturnStatus.SUCCESS.code, r); waitForHeartBeat(totalUsedSpace, totalCapacity); @@ -189,7 +189,7 @@ public class TestBalancerWithNodeGroup { // start rebalancing Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); - final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf); + final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf); Assert.assertTrue(r == Balancer.ReturnStatus.SUCCESS.code || (r == Balancer.ReturnStatus.NO_MOVE_PROGRESS.code)); waitForHeartBeat(totalUsedSpace, totalCapacity); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index e632ed1ca97..41af2370d14 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -368,7 +368,7 @@ public class TestBlockManager { DatanodeStorageInfo[] pipeline) throws IOException { for (int i = 1; i < pipeline.length; i++) { DatanodeStorageInfo storage = pipeline[i]; - bm.addBlock(storage.getDatanodeDescriptor(), storage.getStorageID(), blockInfo, null); + bm.addBlock(storage, blockInfo, null); blockInfo.addStorage(storage); } } @@ -549,12 +549,12 @@ public class TestBlockManager { // send block report, should be processed reset(node); - bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool", + bm.processReport(node, new DatanodeStorage(ds.getStorageID()), new BlockListAsLongs(null, null)); assertEquals(1, ds.getBlockReportCount()); // send block report again, should NOT be processed reset(node); - bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool", + bm.processReport(node, new DatanodeStorage(ds.getStorageID()), new BlockListAsLongs(null, null)); assertEquals(1, ds.getBlockReportCount()); @@ -566,7 +566,7 @@ public class TestBlockManager { assertEquals(0, ds.getBlockReportCount()); // ready for report again // send block report, should be processed after restart reset(node); - bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool", + bm.processReport(node, new DatanodeStorage(ds.getStorageID()), new BlockListAsLongs(null, null)); assertEquals(1, ds.getBlockReportCount()); } @@ -595,7 +595,7 @@ public class TestBlockManager { // send block report while pretending to already have blocks reset(node); doReturn(1).when(node).numBlocks(); - bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool", + bm.processReport(node, new DatanodeStorage(ds.getStorageID()), new BlockListAsLongs(null, null)); assertEquals(1, ds.getBlockReportCount()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java index 12674eb318a..2d7eaf3dcfe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java @@ -63,16 +63,16 @@ public class TestDatanodeDescriptor { assertTrue(storages.length > 0); final String storageID = storages[0].getStorageID(); // add first block - assertTrue(dd.addBlock(storageID, blk)); + assertTrue(storages[0].addBlock(blk)); assertEquals(1, dd.numBlocks()); // remove a non-existent block assertFalse(dd.removeBlock(blk1)); assertEquals(1, dd.numBlocks()); // add an existent block - assertFalse(dd.addBlock(storageID, blk)); + assertFalse(storages[0].addBlock(blk)); assertEquals(1, dd.numBlocks()); // add second block - assertTrue(dd.addBlock(storageID, blk1)); + assertTrue(storages[0].addBlock(blk1)); assertEquals(2, dd.numBlocks()); // remove first block assertTrue(dd.removeBlock(blk)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java index a17d32e6672..981ae76a10a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.junit.Test; import com.google.common.base.Joiner; @@ -43,8 +44,10 @@ public class TestPendingDataNodeMessages { @Test public void testQueues() { DatanodeDescriptor fakeDN = DFSTestUtil.getLocalDatanodeDescriptor(); - msgs.enqueueReportedBlock(fakeDN, "STORAGE_ID", block1Gs1, ReplicaState.FINALIZED); - msgs.enqueueReportedBlock(fakeDN, "STORAGE_ID", block1Gs2, ReplicaState.FINALIZED); + DatanodeStorage storage = new DatanodeStorage("STORAGE_ID"); + DatanodeStorageInfo storageInfo = new DatanodeStorageInfo(fakeDN, storage); + msgs.enqueueReportedBlock(storageInfo, block1Gs1, ReplicaState.FINALIZED); + msgs.enqueueReportedBlock(storageInfo, block1Gs2, ReplicaState.FINALIZED); assertEquals(2, msgs.count()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 73c3ec86498..e575ceeb7ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -82,7 +82,7 @@ public class TestReplicationPolicy { private static NameNode namenode; private static BlockPlacementPolicy replicator; private static final String filename = "/dummyfile.txt"; - private static DatanodeDescriptor dataNodes[]; + private static DatanodeDescriptor[] dataNodes; private static DatanodeStorageInfo[] storages; // The interval for marking a datanode as stale, private static final long staleInterval = @@ -1118,8 +1118,7 @@ public class TestReplicationPolicy { // Adding this block will increase its current replication, and that will // remove it from the queue. bm.addStoredBlockUnderConstruction(new StatefulBlockInfo(info, info, - ReplicaState.FINALIZED), TestReplicationPolicy.dataNodes[0], - "STORAGE"); + ReplicaState.FINALIZED), TestReplicationPolicy.storages[0]); // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block // from QUEUE_VERY_UNDER_REPLICATED. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index a3622a465ad..67805c08f63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -590,7 +590,6 @@ public class TestBlockRecovery { Configuration conf = new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, "1000"); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .nnTopology(MiniDFSNNTopology.simpleSingleNN(8020, 50070)) .numDataNodes(1).build(); try { cluster.waitClusterUp(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index 38403eb8258..b1172a0806f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -25,6 +25,7 @@ import java.io.FilenameFilter; import java.io.IOException; import java.net.InetSocketAddress; import java.net.Socket; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -384,7 +385,7 @@ public class TestDataNodeVolumeFailure { continue; } - String [] res = metaFilesInDir(dir); + List res = MiniDFSCluster.getAllBlockMetadataFiles(dir); if(res == null) { System.out.println("res is null for dir = " + dir + " i=" + i + " and j=" + j); continue; @@ -392,7 +393,8 @@ public class TestDataNodeVolumeFailure { //System.out.println("for dn" + i + "." + j + ": " + dir + "=" + res.length+ " files"); //int ii = 0; - for(String s: res) { + for(File f: res) { + String s = f.getName(); // cut off "blk_-" at the beginning and ".meta" at the end assertNotNull("Block file name should not be null", s); String bid = s.substring(s.indexOf("_")+1, s.lastIndexOf("_")); @@ -408,25 +410,9 @@ public class TestDataNodeVolumeFailure { //System.out.println("dir1="+dir.getPath() + "blocks=" + res.length); //System.out.println("dir2="+dir2.getPath() + "blocks=" + res2.length); - total += res.length; + total += res.size(); } } return total; } - - /* - * count how many files *.meta are in the dir - */ - private String [] metaFilesInDir(File dir) { - String [] res = dir.list( - new FilenameFilter() { - @Override - public boolean accept(File dir, String name) { - return name.startsWith("blk_") && - name.endsWith(Block.METADATA_EXTENSION); - } - } - ); - return res; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java index d16a4bb9e06..755d49922c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java @@ -103,9 +103,10 @@ public class TestDeleteBlockPool { fs1.delete(new Path("/alpha"), true); // Wait till all blocks are deleted from the dn2 for bpid1. - while ((MiniDFSCluster.getFinalizedDir(dn2StorageDir1, - bpid1).list().length != 0) || (MiniDFSCluster.getFinalizedDir( - dn2StorageDir2, bpid1).list().length != 0)) { + File finalDir1 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid1); + File finalDir2 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid2); + while ((!DatanodeUtil.dirNoFilesRecursive(finalDir1)) || + (!DatanodeUtil.dirNoFilesRecursive(finalDir2))) { try { Thread.sleep(3000); } catch (Exception ignored) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java index 798b7b7c705..4b5b6e1ec4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java @@ -201,7 +201,7 @@ public class TestDiskError { } /** - * Checks whether {@link DataNode#checkDiskError()} is being called or not. + * Checks whether {@link DataNode#checkDiskErrorAsync()} is being called or not. * Before refactoring the code the above function was not getting called * @throws IOException, InterruptedException */ @@ -214,7 +214,7 @@ public class TestDiskError { DataNode dataNode = cluster.getDataNodes().get(0); long slackTime = dataNode.checkDiskErrorInterval/2; //checking for disk error - dataNode.checkDiskError(); + dataNode.checkDiskErrorAsync(); Thread.sleep(dataNode.checkDiskErrorInterval); long lastDiskErrorCheck = dataNode.getLastDiskErrorCheck(); assertTrue("Disk Error check is not performed within " + dataNode.checkDiskErrorInterval + " ms", ((Time.monotonicNow()-lastDiskErrorCheck) < (dataNode.checkDiskErrorInterval + slackTime))); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java index f36483e642d..1ddc774c842 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -1256,6 +1257,33 @@ public abstract class FSAclBaseTest { fsAsDiana.getAclStatus(bruceFile); } + @Test + public void testAccess() throws IOException, InterruptedException { + Path p1 = new Path("/p1"); + fs.mkdirs(p1); + fs.setOwner(p1, BRUCE.getShortUserName(), "groupX"); + fsAsBruce.setAcl(p1, Lists.newArrayList( + aclEntry(ACCESS, USER, READ), + aclEntry(ACCESS, USER, "bruce", READ), + aclEntry(ACCESS, GROUP, NONE), + aclEntry(ACCESS, OTHER, NONE))); + fsAsBruce.access(p1, FsAction.READ); + try { + fsAsBruce.access(p1, FsAction.WRITE); + fail("The access call should have failed."); + } catch (AccessControlException e) { + // expected + } + + Path badPath = new Path("/bad/bad"); + try { + fsAsBruce.access(badPath, FsAction.READ); + fail("The access call should have failed"); + } catch (FileNotFoundException e) { + // expected + } + } + /** * Creates a FileSystem for the super-user. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 61fa4310129..4cddd60f3f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -41,6 +41,7 @@ import java.net.InetSocketAddress; import java.nio.channels.FileChannel; import java.security.PrivilegedExceptionAction; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; @@ -63,6 +64,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -750,15 +752,14 @@ public class TestFsck { for (int j=0; j<=1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); - File[] blocks = data_dir.listFiles(); - if (blocks == null) + List metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( + data_dir); + if (metadataFiles == null) continue; - - for (int idx = 0; idx < blocks.length; idx++) { - if (!blocks[idx].getName().startsWith("blk_")) { - continue; - } - assertTrue("Cannot remove file.", blocks[idx].delete()); + for (File metadataFile : metadataFiles) { + File blockFile = Block.metaToBlockFile(metadataFile); + assertTrue("Cannot remove file.", blockFile.delete()); + assertTrue("Cannot remove file.", metadataFile.delete()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 704bc1669d0..a739b7aa6ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -45,6 +45,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSClient; @@ -581,6 +582,7 @@ public class TestINodeFile { fs.getAclStatus(testFileInodePath); fs.getXAttrs(testFileInodePath); fs.listXAttrs(testFileInodePath); + fs.access(testFileInodePath, FsAction.READ_WRITE); } // symbolic link related tests diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java index 18f83ef8691..7e36acb48c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java @@ -25,6 +25,7 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.Collection; +import java.util.List; import java.util.Random; import org.apache.commons.logging.Log; @@ -39,7 +40,11 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.TestFileCorruption; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; import org.apache.hadoop.util.StringUtils; import org.junit.Test; @@ -87,36 +92,29 @@ public class TestListCorruptFileBlocks { File storageDir = cluster.getInstanceStorageDir(0, 1); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); assertTrue("data directory does not exist", data_dir.exists()); - File[] blocks = data_dir.listFiles(); - assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0)); - for (int idx = 0; idx < blocks.length; idx++) { - if (blocks[idx].getName().startsWith("blk_") && - blocks[idx].getName().endsWith(".meta")) { - // - // shorten .meta file - // - RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw"); - FileChannel channel = file.getChannel(); - long position = channel.size() - 2; - int length = 2; - byte[] buffer = new byte[length]; - random.nextBytes(buffer); - channel.write(ByteBuffer.wrap(buffer), position); - file.close(); - LOG.info("Deliberately corrupting file " + blocks[idx].getName() + - " at offset " + position + " length " + length); + List metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir); + assertTrue("Data directory does not contain any blocks or there was an " + + "IO error", metaFiles != null && !metaFiles.isEmpty()); + File metaFile = metaFiles.get(0); + RandomAccessFile file = new RandomAccessFile(metaFile, "rw"); + FileChannel channel = file.getChannel(); + long position = channel.size() - 2; + int length = 2; + byte[] buffer = new byte[length]; + random.nextBytes(buffer); + channel.write(ByteBuffer.wrap(buffer), position); + file.close(); + LOG.info("Deliberately corrupting file " + metaFile.getName() + + " at offset " + position + " length " + length); - // read all files to trigger detection of corrupted replica - try { - util.checkFiles(fs, "/srcdat10"); - } catch (BlockMissingException e) { - System.out.println("Received BlockMissingException as expected."); - } catch (IOException e) { - assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " + - " but received IOException " + e, false); - } - break; - } + // read all files to trigger detection of corrupted replica + try { + util.checkFiles(fs, "/srcdat10"); + } catch (BlockMissingException e) { + System.out.println("Received BlockMissingException as expected."); + } catch (IOException e) { + assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " + + " but received IOException " + e, false); } // fetch bad file list from namenode. There should be one file. @@ -174,38 +172,30 @@ public class TestListCorruptFileBlocks { File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, cluster.getNamesystem().getBlockPoolId()); assertTrue("data directory does not exist", data_dir.exists()); - File[] blocks = data_dir.listFiles(); - assertTrue("Blocks do not exist in data-dir", (blocks != null) && - (blocks.length > 0)); - for (int idx = 0; idx < blocks.length; idx++) { - if (blocks[idx].getName().startsWith("blk_") && - blocks[idx].getName().endsWith(".meta")) { - // - // shorten .meta file - // - RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw"); - FileChannel channel = file.getChannel(); - long position = channel.size() - 2; - int length = 2; - byte[] buffer = new byte[length]; - random.nextBytes(buffer); - channel.write(ByteBuffer.wrap(buffer), position); - file.close(); - LOG.info("Deliberately corrupting file " + blocks[idx].getName() + - " at offset " + position + " length " + length); + List metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir); + assertTrue("Data directory does not contain any blocks or there was an " + + "IO error", metaFiles != null && !metaFiles.isEmpty()); + File metaFile = metaFiles.get(0); + RandomAccessFile file = new RandomAccessFile(metaFile, "rw"); + FileChannel channel = file.getChannel(); + long position = channel.size() - 2; + int length = 2; + byte[] buffer = new byte[length]; + random.nextBytes(buffer); + channel.write(ByteBuffer.wrap(buffer), position); + file.close(); + LOG.info("Deliberately corrupting file " + metaFile.getName() + + " at offset " + position + " length " + length); - // read all files to trigger detection of corrupted replica - try { - util.checkFiles(fs, "/srcdat10"); - } catch (BlockMissingException e) { - System.out.println("Received BlockMissingException as expected."); - } catch (IOException e) { - assertTrue("Corrupted replicas not handled properly. " + - "Expecting BlockMissingException " + - " but received IOException " + e, false); - } - break; - } + // read all files to trigger detection of corrupted replica + try { + util.checkFiles(fs, "/srcdat10"); + } catch (BlockMissingException e) { + System.out.println("Received BlockMissingException as expected."); + } catch (IOException e) { + assertTrue("Corrupted replicas not handled properly. " + + "Expecting BlockMissingException " + + " but received IOException " + e, false); } // fetch bad file list from namenode. There should be one file. @@ -295,17 +285,18 @@ public class TestListCorruptFileBlocks { for (int j = 0; j <= 1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); - File[] blocks = data_dir.listFiles(); - if (blocks == null) + List metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( + data_dir); + if (metadataFiles == null) continue; // assertTrue("Blocks do not exist in data-dir", (blocks != null) && // (blocks.length > 0)); - for (int idx = 0; idx < blocks.length; idx++) { - if (!blocks[idx].getName().startsWith("blk_")) { - continue; - } - LOG.info("Deliberately removing file " + blocks[idx].getName()); - assertTrue("Cannot remove file.", blocks[idx].delete()); + for (File metadataFile : metadataFiles) { + File blockFile = Block.metaToBlockFile(metadataFile); + LOG.info("Deliberately removing file " + blockFile.getName()); + assertTrue("Cannot remove file.", blockFile.delete()); + LOG.info("Deliberately removing file " + metadataFile.getName()); + assertTrue("Cannot remove file.", metadataFile.delete()); // break; } } @@ -405,17 +396,18 @@ public class TestListCorruptFileBlocks { for (int i = 0; i < 2; i++) { File storageDir = cluster.getInstanceStorageDir(0, i); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); - File[] blocks = data_dir.listFiles(); - if (blocks == null) + List metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( + data_dir); + if (metadataFiles == null) continue; // assertTrue("Blocks do not exist in data-dir", (blocks != null) && // (blocks.length > 0)); - for (int idx = 0; idx < blocks.length; idx++) { - if (!blocks[idx].getName().startsWith("blk_")) { - continue; - } - LOG.info("Deliberately removing file " + blocks[idx].getName()); - assertTrue("Cannot remove file.", blocks[idx].delete()); + for (File metadataFile : metadataFiles) { + File blockFile = Block.metaToBlockFile(metadataFile); + LOG.info("Deliberately removing file " + blockFile.getName()); + assertTrue("Cannot remove file.", blockFile.delete()); + LOG.info("Deliberately removing file " + metadataFile.getName()); + assertTrue("Cannot remove file.", metadataFile.delete()); // break; } } @@ -482,15 +474,14 @@ public class TestListCorruptFileBlocks { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); LOG.info("Removing files from " + data_dir); - File[] blocks = data_dir.listFiles(); - if (blocks == null) + List metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( + data_dir); + if (metadataFiles == null) continue; - - for (int idx = 0; idx < blocks.length; idx++) { - if (!blocks[idx].getName().startsWith("blk_")) { - continue; - } - assertTrue("Cannot remove file.", blocks[idx].delete()); + for (File metadataFile : metadataFiles) { + File blockFile = Block.metaToBlockFile(metadataFile); + assertTrue("Cannot remove file.", blockFile.delete()); + assertTrue("Cannot remove file.", metadataFile.delete()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index 785877a30ea..899b888f442 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -50,6 +50,7 @@ import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -725,7 +726,12 @@ public class TestRetryCacheWithHA { client.getNamenode().updatePipeline(client.getClientName(), oldBlock, newBlock, newNodes, storageIDs); - out.close(); + // close can fail if the out.close() commit the block after block received + // notifications from Datanode. + // Since datanodes and output stream have still old genstamps, these + // blocks will be marked as corrupt after HDFS-5723 if RECEIVED + // notifications reaches namenode first and close() will fail. + DFSTestUtil.abortStream((DFSOutputStream) out.getWrappedStream()); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java index 3deb47ff3af..3be1d36ca51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -674,6 +675,13 @@ public class TestAclWithSnapshot { } catch (AccessControlException e) { // expected } + + try { + fs.access(pathToCheck, FsAction.READ); + fail("The access call should have failed for "+pathToCheck); + } catch (AccessControlException e) { + // expected + } } /** @@ -689,6 +697,7 @@ public class TestAclWithSnapshot { UserGroupInformation user, Path pathToCheck) throws Exception { try { fs.listStatus(pathToCheck); + fs.access(pathToCheck, FsAction.READ); } catch (AccessControlException e) { fail("expected permission granted for user " + user + ", path = " + pathToCheck); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java index 09f025c65ac..46e433d6df8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystemContractBaseTest; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.AppendTestUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -49,6 +50,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.junit.Assert; +import org.junit.Test; public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest { private static final Configuration conf = new Configuration(); @@ -530,4 +532,35 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest { } } } + + @Test + public void testAccess() throws IOException, InterruptedException { + Path p1 = new Path("/pathX"); + try { + UserGroupInformation ugi = UserGroupInformation.createUserForTesting("alpha", + new String[]{"beta"}); + WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, + WebHdfsFileSystem.SCHEME); + + fs.mkdirs(p1); + fs.setPermission(p1, new FsPermission((short) 0444)); + fs.access(p1, FsAction.READ); + try { + fs.access(p1, FsAction.WRITE); + fail("The access call should have failed."); + } catch (AccessControlException e) { + // expected + } + + Path badPath = new Path("/bad"); + try { + fs.access(badPath, FsAction.READ); + fail("The access call should have failed"); + } catch (FileNotFoundException e) { + // expected + } + } finally { + fs.delete(p1, true); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java index a84918e13d1..45cd8fe3afb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java @@ -31,6 +31,7 @@ import java.util.Arrays; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -40,6 +41,7 @@ import org.apache.hadoop.hdfs.web.resources.GetOpParam; import org.apache.hadoop.hdfs.web.resources.PutOpParam; import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; import org.apache.hadoop.hdfs.web.resources.UserParam; +import org.apache.hadoop.hdfs.web.resources.FsActionParam; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; @@ -283,6 +285,28 @@ public class TestWebHdfsUrl { }, fileStatusUrl); } + + @Test(timeout=60000) + public void testCheckAccessUrl() throws IOException { + Configuration conf = new Configuration(); + + UserGroupInformation ugi = + UserGroupInformation.createRemoteUser("test-user"); + UserGroupInformation.setLoginUser(ugi); + + WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf); + Path fsPath = new Path("/p1"); + + URL checkAccessUrl = webhdfs.toUrl(GetOpParam.Op.CHECKACCESS, + fsPath, new FsActionParam(FsAction.READ_WRITE)); + checkQueryParams( + new String[]{ + GetOpParam.Op.CHECKACCESS.toQueryString(), + new UserParam(ugi.getShortUserName()).toString(), + FsActionParam.NAME + "=" + FsAction.READ_WRITE.SYMBOL + }, + checkAccessUrl); + } private void checkQueryParams(String[] expected, URL url) { Arrays.sort(expected); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java index 13a9610a346..bc41edc1107 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java @@ -27,6 +27,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; +import java.io.FileNotFoundException; import java.security.PrivilegedExceptionAction; import java.util.Arrays; @@ -39,6 +40,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestWrapper; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -393,4 +395,37 @@ public class TestPermissionSymlinks { GenericTestUtils.assertExceptionContains("Permission denied", e); } } + + @Test + public void testAccess() throws Exception { + fs.setPermission(target, new FsPermission((short) 0002)); + fs.setAcl(target, Arrays.asList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, NONE), + aclEntry(ACCESS, USER, user.getShortUserName(), WRITE), + aclEntry(ACCESS, OTHER, WRITE))); + FileContext myfc = user.doAs(new PrivilegedExceptionAction() { + @Override + public FileContext run() throws IOException { + return FileContext.getFileContext(conf); + } + }); + + // Path to targetChild via symlink + myfc.access(link, FsAction.WRITE); + try { + myfc.access(link, FsAction.ALL); + fail("The access call should have failed."); + } catch (AccessControlException e) { + // expected + } + + Path badPath = new Path(link, "bad"); + try { + myfc.access(badPath, FsAction.READ); + fail("The access call should have failed"); + } catch (FileNotFoundException e) { + // expected + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-24-datanode-dir.tgz b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-24-datanode-dir.tgz new file mode 100644 index 00000000000..9f666fed090 Binary files /dev/null and b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-24-datanode-dir.tgz differ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-datanode-dir.txt b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-datanode-dir.txt new file mode 100644 index 00000000000..e5890ccda1e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-datanode-dir.txt @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Similar to hadoop-dfs-dir.txt, except this is used for a datanode layout +# upgrade test. +# Uncomment the following line to produce checksum info for a new DFS image. +#printChecksums + +/small 2976363016 +overallCRC 4099869518 + diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 5760cef3060..86ae7139b8c 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -83,6 +83,9 @@ Trunk (Unreleased) MAPREDUCE-5912. Task.calculateOutputSize does not handle Windows files after MAPREDUCE-5196. (Remus Rusanu via cnauroth) + MAPREDUCE-6019. MapReduce changes for exposing YARN/MR endpoints on multiple + interfaces. (Craig Welch, Milan Potocnik, Arpit Agarwal via xgong) + BUG FIXES MAPREDUCE-5714. Removed forceful JVM exit in shutDownJob. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java index 074e3f0fbb8..5f39edd72e8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java @@ -141,7 +141,9 @@ public class TaskAttemptListenerImpl extends CompositeService } server.start(); - this.address = NetUtils.getConnectAddress(server); + this.address = NetUtils.createSocketAddrForHost( + context.getNMHostname(), + server.getListenerAddress().getPort()); } catch (IOException e) { throw new YarnRuntimeException(e); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java index 6f036c4a74a..31e282a63e9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java @@ -66,4 +66,5 @@ public interface AppContext { boolean hasSuccessfullyUnregistered(); + String getNMHostname(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index 8c1892af392..59e72490496 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -1018,6 +1018,11 @@ public class MRAppMaster extends CompositeService { public void resetIsLastAMRetry() { isLastAMRetry = false; } + + @Override + public String getNMHostname() { + return nmHost; + } } @SuppressWarnings("unchecked") diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java index 3c0e100b5cc..11235322f6a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java @@ -131,7 +131,8 @@ public class MRClientService extends AbstractService implements ClientService { } server.start(); - this.bindAddress = NetUtils.getConnectAddress(server); + this.bindAddress = NetUtils.createSocketAddrForHost(appContext.getNMHostname(), + server.getListenerAddress().getPort()); LOG.info("Instantiated MRClientService at " + this.bindAddress); try { // Explicitly disabling SSL for map reduce task as we can't allow MR users diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java index 256f0b7bb7a..2c81cf06d2d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java @@ -61,6 +61,13 @@ public class TestTaskAttemptListenerImpl { public static class MockTaskAttemptListenerImpl extends TaskAttemptListenerImpl { + public MockTaskAttemptListenerImpl(AppContext context, + JobTokenSecretManager jobTokenSecretManager, + RMHeartbeatHandler rmHeartbeatHandler, AMPreemptionPolicy policy) { + + super(context, jobTokenSecretManager, rmHeartbeatHandler, policy); + } + public MockTaskAttemptListenerImpl(AppContext context, JobTokenSecretManager jobTokenSecretManager, RMHeartbeatHandler rmHeartbeatHandler, @@ -210,7 +217,7 @@ public class TestTaskAttemptListenerImpl { when(appCtx.getEventHandler()).thenReturn(ea); CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy(); policy.init(appCtx); - TaskAttemptListenerImpl listener = new TaskAttemptListenerImpl( + TaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl( appCtx, secret, rmHeartbeatHandler, policy) { @Override protected void registerHeartbeatHandler(Configuration conf) { @@ -271,7 +278,7 @@ public class TestTaskAttemptListenerImpl { when(appCtx.getEventHandler()).thenReturn(ea); CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy(); policy.init(appCtx); - TaskAttemptListenerImpl listener = new TaskAttemptListenerImpl( + TaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl( appCtx, secret, rmHeartbeatHandler, policy) { @Override protected void registerHeartbeatHandler(Configuration conf) { @@ -326,7 +333,7 @@ public class TestTaskAttemptListenerImpl { when(appCtx.getEventHandler()).thenReturn(ea); CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy(); policy.init(appCtx); - TaskAttemptListenerImpl listener = new TaskAttemptListenerImpl( + TaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl( appCtx, secret, rmHeartbeatHandler, policy) { @Override protected void registerHeartbeatHandler(Configuration conf) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java index 511731a9ba1..dae0aa77fc0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java @@ -143,4 +143,9 @@ public class MockAppContext implements AppContext { return true; } +@Override + public String getNMHostname() { + // bogus - Not Required + return null; + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java index 6fadf350f82..c25cf5060e9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java @@ -879,5 +879,10 @@ public class TestRuntimeEstimators { return true; } + @Override + public String getNMHostname() { + // bogus - Not Required + return null; + } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java index 2e1a22e4310..9fa8a090a4b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java @@ -38,7 +38,9 @@ public class JHAdminConfig { public static final int DEFAULT_MR_HISTORY_PORT = 10020; public static final String DEFAULT_MR_HISTORY_ADDRESS = "0.0.0.0:" + DEFAULT_MR_HISTORY_PORT; - + public static final String MR_HISTORY_BIND_HOST = MR_HISTORY_PREFIX + + "bind-host"; + /** The address of the History server admin interface. */ public static final String JHS_ADMIN_ADDRESS = MR_HISTORY_PREFIX + "admin.address"; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java index 2d453f1d308..cac01191fcd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java @@ -29,6 +29,7 @@ import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.ipc.RPCUtil; import java.net.InetAddress; import java.net.InetSocketAddress; @@ -105,11 +106,15 @@ public class MRWebAppUtil { public static InetSocketAddress getJHSWebBindAddress(Configuration conf) { if (httpPolicyInJHS == Policy.HTTPS_ONLY) { - return conf.getSocketAddr(JHAdminConfig.MR_HISTORY_WEBAPP_HTTPS_ADDRESS, + return conf.getSocketAddr( + JHAdminConfig.MR_HISTORY_BIND_HOST, + JHAdminConfig.MR_HISTORY_WEBAPP_HTTPS_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_HTTPS_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_HTTPS_PORT); } else { - return conf.getSocketAddr(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, + return conf.getSocketAddr( + JHAdminConfig.MR_HISTORY_BIND_HOST, + JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_PORT); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java index 96b81054460..001608b2596 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java @@ -83,6 +83,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.webapp.WebApp; @@ -119,6 +120,7 @@ public class HistoryClientService extends AbstractService { YarnRPC rpc = YarnRPC.create(conf); initializeWebApp(conf); InetSocketAddress address = conf.getSocketAddr( + JHAdminConfig.MR_HISTORY_BIND_HOST, JHAdminConfig.MR_HISTORY_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_PORT); @@ -137,9 +139,11 @@ public class HistoryClientService extends AbstractService { } server.start(); - this.bindAddress = conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_ADDRESS, + this.bindAddress = conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_BIND_HOST, + JHAdminConfig.MR_HISTORY_ADDRESS, + JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS, server.getListenerAddress()); - LOG.info("Instantiated MRClientService at " + this.bindAddress); + LOG.info("Instantiated HistoryClientService at " + this.bindAddress); super.serviceStart(); } @@ -158,8 +162,9 @@ public class HistoryClientService extends AbstractService { JHAdminConfig.MR_WEBAPP_SPNEGO_USER_NAME_KEY) .at(NetUtils.getHostPortString(bindAddress)).start(webApp); + String connectHost = MRWebAppUtil.getJHSWebappURLWithoutScheme(conf).split(":")[0]; MRWebAppUtil.setJHSWebappURLWithoutScheme(conf, - NetUtils.getHostPortString(webApp.getListenerAddress())); + connectHost + ":" + webApp.getListenerAddress().getPort()); } @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java index b7823a0c50d..194b85a5a29 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java @@ -394,4 +394,9 @@ public class JobHistory extends AbstractService implements HistoryContext { return true; } + @Override + public String getNMHostname() { + // bogus - Not Required + return null; + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java index 23a34a47b94..858d945dfe1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java @@ -34,6 +34,7 @@ import org.apache.hadoop.security.Groups; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.logaggregation.AggregatedLogDeletionService; import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService; import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolPB; @@ -94,7 +95,9 @@ public class HSAdminServer extends AbstractService implements HSAdminProtocol { WritableRpcEngine.ensureInitialized(); - clientRpcAddress = conf.getSocketAddr(JHAdminConfig.JHS_ADMIN_ADDRESS, + clientRpcAddress = conf.getSocketAddr( + JHAdminConfig.MR_HISTORY_BIND_HOST, + JHAdminConfig.JHS_ADMIN_ADDRESS, JHAdminConfig.DEFAULT_JHS_ADMIN_ADDRESS, JHAdminConfig.DEFAULT_JHS_ADMIN_PORT); clientRpcServer = new RPC.Builder(conf) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java index 1d781be1451..33220c6cbdb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java @@ -82,13 +82,13 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.NMDBSchemaVersionProto; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto; import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext; import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext; import org.apache.hadoop.yarn.server.api.AuxiliaryService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; -import org.apache.hadoop.yarn.server.nodemanager.recovery.records.NMDBSchemaVersion; -import org.apache.hadoop.yarn.server.nodemanager.recovery.records.impl.pb.NMDBSchemaVersionPBImpl; +import org.apache.hadoop.yarn.server.records.Version; +import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; import org.apache.hadoop.yarn.server.utils.LeveldbIterator; import org.apache.hadoop.yarn.util.ConverterUtils; import org.fusesource.leveldbjni.JniDBFactory; @@ -151,8 +151,8 @@ public class ShuffleHandler extends AuxiliaryService { private static final String STATE_DB_NAME = "mapreduce_shuffle_state"; private static final String STATE_DB_SCHEMA_VERSION_KEY = "shuffle-schema-version"; - protected static final NMDBSchemaVersion CURRENT_VERSION_INFO = - NMDBSchemaVersion.newInstance(1, 0); + protected static final Version CURRENT_VERSION_INFO = + Version.newInstance(1, 0); private int port; private ChannelFactory selector; @@ -491,21 +491,21 @@ public class ShuffleHandler extends AuxiliaryService { } @VisibleForTesting - NMDBSchemaVersion loadVersion() throws IOException { + Version loadVersion() throws IOException { byte[] data = stateDb.get(bytes(STATE_DB_SCHEMA_VERSION_KEY)); // if version is not stored previously, treat it as 1.0. if (data == null || data.length == 0) { - return NMDBSchemaVersion.newInstance(1, 0); + return Version.newInstance(1, 0); } - NMDBSchemaVersion version = - new NMDBSchemaVersionPBImpl(NMDBSchemaVersionProto.parseFrom(data)); + Version version = + new VersionPBImpl(VersionProto.parseFrom(data)); return version; } - private void storeSchemaVersion(NMDBSchemaVersion version) throws IOException { + private void storeSchemaVersion(Version version) throws IOException { String key = STATE_DB_SCHEMA_VERSION_KEY; byte[] data = - ((NMDBSchemaVersionPBImpl) version).getProto().toByteArray(); + ((VersionPBImpl) version).getProto().toByteArray(); try { stateDb.put(bytes(key), data); } catch (DBException e) { @@ -519,11 +519,11 @@ public class ShuffleHandler extends AuxiliaryService { // Only used for test @VisibleForTesting - void storeVersion(NMDBSchemaVersion version) throws IOException { + void storeVersion(Version version) throws IOException { storeSchemaVersion(version); } - protected NMDBSchemaVersion getCurrentVersion() { + protected Version getCurrentVersion() { return CURRENT_VERSION_INFO; } @@ -538,7 +538,7 @@ public class ShuffleHandler extends AuxiliaryService { * upgrade shuffle info or remove incompatible old state. */ private void checkVersion() throws IOException { - NMDBSchemaVersion loadedVersion = loadVersion(); + Version loadedVersion = loadVersion(); LOG.info("Loaded state DB schema version info " + loadedVersion); if (loadedVersion.equals(getCurrentVersion())) { return; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java index 0974cc6ab1f..70536536617 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java @@ -75,7 +75,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext; import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; -import org.apache.hadoop.yarn.server.nodemanager.recovery.records.NMDBSchemaVersion; +import org.apache.hadoop.yarn.server.records.Version; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelFuture; import org.jboss.netty.channel.ChannelHandlerContext; @@ -764,11 +764,11 @@ public class TestShuffleHandler { // verify we are still authorized to shuffle to the old application rc = getShuffleResponseCode(shuffle, jt); Assert.assertEquals(HttpURLConnection.HTTP_OK, rc); - NMDBSchemaVersion version = NMDBSchemaVersion.newInstance(1, 0); + Version version = Version.newInstance(1, 0); Assert.assertEquals(version, shuffle.getCurrentVersion()); // emulate shuffle handler restart with compatible version - NMDBSchemaVersion version11 = NMDBSchemaVersion.newInstance(1, 1); + Version version11 = Version.newInstance(1, 1); // update version info before close shuffle shuffle.storeVersion(version11); Assert.assertEquals(version11, shuffle.loadVersion()); @@ -785,7 +785,7 @@ public class TestShuffleHandler { Assert.assertEquals(HttpURLConnection.HTTP_OK, rc); // emulate shuffle handler restart with incompatible version - NMDBSchemaVersion version21 = NMDBSchemaVersion.newInstance(2, 1); + Version version21 = Version.newInstance(2, 1); shuffle.storeVersion(version21); Assert.assertEquals(version21, shuffle.loadVersion()); shuffle.close(); diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml index e6dc75d3ca0..33f5a04c577 100644 --- a/hadoop-project-dist/pom.xml +++ b/hadoop-project-dist/pom.xml @@ -102,6 +102,8 @@ findbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml + true + 2048 diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index f996479ee68..7f0c179f13a 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -789,7 +789,13 @@ com.microsoft.windowsazure.storage microsoft-windowsazure-storage-sdk 0.6.0 - + + + + xerces + xercesImpl + 2.9.1 + diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index ad368cc95e8..eed76f7581f 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -35,6 +35,9 @@ Release 2.6.0 - UNRELEASED YARN-2181. Added preemption info to logs and RM web UI. (Wangda Tan via jianhe) + YARN-1354. Recover applications upon nodemanager restart. (Jason Lowe via + junping_du) + IMPROVEMENTS YARN-2242. Improve exception information on AM launch crashes. (Li Lu @@ -74,6 +77,17 @@ Release 2.6.0 - UNRELEASED YARN-2328. FairScheduler: Verify update and continuous scheduling threads are stopped when the scheduler is stopped. (kasha) + YARN-2347. Consolidated RMStateVersion and NMDBSchemaVersion into Version in + yarn-server-common. (Junping Du via zjshen) + + YARN-1994. Expose YARN/MR endpoints on multiple interfaces. (Craig Welch, + Milan Potocnik, Arpit Agarwal via xgong) + + YARN-2343. Improve NMToken expire exception message. (Li Lu via jianhe) + + YARN-2370. Fix comment in o.a.h.y.server.resourcemanager.schedulerAppSchedulingInfo + (Wenwu Peng via junping_du) + OPTIMIZATIONS BUG FIXES @@ -114,6 +128,9 @@ Release 2.6.0 - UNRELEASED YARN-2354. DistributedShell may allocate more containers than client specified after AM restarts. (Li Lu via jianhe) + YARN-2051. Fix bug in PBimpls and add more unit tests with reflection. + (Binglin Chang via junping_du) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java index 4cc0b70e4af..7fc58d67aef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java @@ -305,6 +305,15 @@ public abstract class GetApplicationsRequest { @Unstable public abstract LongRange getStartRange(); + /** + * Set the range of start times to filter applications on + * + * @param range + */ + @Private + @Unstable + public abstract void setStartRange(LongRange range); + /** * Set the range of start times to filter applications on * @@ -326,6 +335,15 @@ public abstract class GetApplicationsRequest { @Unstable public abstract LongRange getFinishRange(); + /** + * Set the range of finish times to filter applications on + * + * @param range + */ + @Private + @Unstable + public abstract void setFinishRange(LongRange range); + /** * Set the range of finish times to filter applications on * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java index d6393505f81..380f38d74a1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java @@ -31,6 +31,8 @@ public abstract class ResourceOption { int overCommitTimeout){ ResourceOption resourceOption = Records.newRecord(ResourceOption.class); resourceOption.setResource(resource); + resourceOption.setOverCommitTimeout(overCommitTimeout); + resourceOption.build(); return resourceOption; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index ab6b20e574e..9e08ef52008 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -126,6 +126,10 @@ public class YarnConfiguration extends Configuration { public static final String DEFAULT_RM_ADDRESS = "0.0.0.0:" + DEFAULT_RM_PORT; + /** The actual bind address for the RM.*/ + public static final String RM_BIND_HOST = + RM_PREFIX + "bind-host"; + /** The number of threads used to handle applications manager requests.*/ public static final String RM_CLIENT_THREAD_COUNT = RM_PREFIX + "client.thread-count"; @@ -545,6 +549,10 @@ public class YarnConfiguration extends Configuration { public static final String DEFAULT_NM_ADDRESS = "0.0.0.0:" + DEFAULT_NM_PORT; + /** The actual bind address or the NM.*/ + public static final String NM_BIND_HOST = + NM_PREFIX + "bind-host"; + /** who will execute(launch) the containers.*/ public static final String NM_CONTAINER_EXECUTOR = NM_PREFIX + "container-executor.class"; @@ -1132,6 +1140,10 @@ public class YarnConfiguration extends Configuration { public static final String DEFAULT_TIMELINE_SERVICE_ADDRESS = "0.0.0.0:" + DEFAULT_TIMELINE_SERVICE_PORT; + /** The listening endpoint for the timeline service application.*/ + public static final String TIMELINE_SERVICE_BIND_HOST = + TIMELINE_SERVICE_PREFIX + "bind-host"; + /** The number of threads to handle client RPC API requests. */ public static final String TIMELINE_SERVICE_HANDLER_THREAD_COUNT = TIMELINE_SERVICE_PREFIX + "handler-thread-count"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto index 2eb61487504..08c937f68d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto @@ -130,11 +130,6 @@ message ApplicationAttemptStateDataProto { optional int32 am_container_exit_status = 9 [default = -1000]; } -message RMStateVersionProto { - optional int32 major_version = 1; - optional int32 minor_version = 2; -} - message EpochProto { optional int64 epoch = 1; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java index 4fd49bcee8d..a8996f0298a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java @@ -326,6 +326,11 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { return this.start; } + @Override + public void setStartRange(LongRange range) { + this.start = range; + } + @Override public void setStartRange(long begin, long end) throws IllegalArgumentException { @@ -349,6 +354,11 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { return this.finish; } + @Override + public void setFinishRange(LongRange range) { + this.finish = range; + } + @Override public void setFinishRange(long begin, long end) { if (begin > end) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java index 7e19d8fa2f8..dd3e2bc2136 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java @@ -479,6 +479,7 @@ public class ApplicationReportPBImpl extends ApplicationReport { builder.setAmRmToken(convertToProtoFormat(this.amRmToken)); } if (this.applicationTags != null && !this.applicationTags.isEmpty()) { + builder.clearApplicationTags(); builder.addAllApplicationTags(this.applicationTags); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java index c4a3a721990..c2f3268073e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java @@ -107,6 +107,7 @@ extends ApplicationSubmissionContext { builder.setResource(convertToProtoFormat(this.resource)); } if (this.applicationTags != null && !this.applicationTags.isEmpty()) { + builder.clearApplicationTags(); builder.addAllApplicationTags(this.applicationTags); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceBlacklistRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceBlacklistRequestPBImpl.java index 743e5d12c3f..45d89488ac2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceBlacklistRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceBlacklistRequestPBImpl.java @@ -90,7 +90,7 @@ public class ResourceBlacklistRequestPBImpl extends ResourceBlacklistRequest { private void addBlacklistRemovalsToProto() { maybeInitBuilder(); - builder.clearBlacklistAdditions(); + builder.clearBlacklistRemovals(); if (this.blacklistRemovals == null) { return; } @@ -159,5 +159,14 @@ public class ResourceBlacklistRequestPBImpl extends ResourceBlacklistRequest { public int hashCode() { return getProto().hashCode(); } - + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java index 5440a8491e7..79f479ee99d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java @@ -86,4 +86,19 @@ public class ResourceOptionPBImpl extends ResourceOption { builder = null; } + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/TokenPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/TokenPBImpl.java index 2835cbb65f6..7aeb460d525 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/TokenPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/TokenPBImpl.java @@ -48,7 +48,7 @@ public class TokenPBImpl extends Token { } public synchronized TokenProto getProto() { - mergeLocalToProto(); + mergeLocalToProto(); proto = viaProto ? proto : builder.build(); viaProto = true; return proto; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceRequestPBImpl.java index 413e4a00c15..d44599664a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceRequestPBImpl.java @@ -162,5 +162,19 @@ public class UpdateNodeResourceRequestPBImpl extends UpdateNodeResourceRequest { }; this.builder.addAllNodeResourceMap(values); } - + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java index 29fd8c14e6d..6cbe6f94d6f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java @@ -34,6 +34,7 @@ import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.HAUtil; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.util.RMHAUtils; @Private @@ -170,6 +171,37 @@ public class WebAppUtils { return sb.toString(); } + /** + * Get the URL to use for binding where bind hostname can be specified + * to override the hostname in the webAppURLWithoutScheme. Port specified in the + * webAppURLWithoutScheme will be used. + * + * @param conf the configuration + * @param hostProperty bind host property name + * @param webAppURLWithoutScheme web app URL without scheme String + * @return String representing bind URL + */ + public static String getWebAppBindURL( + Configuration conf, + String hostProperty, + String webAppURLWithoutScheme) { + + // If the bind-host setting exists then it overrides the hostname + // portion of the corresponding webAppURLWithoutScheme + String host = conf.getTrimmed(hostProperty); + if (host != null && !host.isEmpty()) { + if (webAppURLWithoutScheme.contains(":")) { + webAppURLWithoutScheme = host + ":" + webAppURLWithoutScheme.split(":")[1]; + } + else { + throw new YarnRuntimeException("webAppURLWithoutScheme must include port specification but doesn't: " + + webAppURLWithoutScheme); + } + } + + return webAppURLWithoutScheme; + } + public static String getNMWebAppURLWithoutScheme(Configuration conf) { if (YarnConfiguration.useHttps(conf)) { return conf.get(YarnConfiguration.NM_WEBAPP_HTTPS_ADDRESS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index edc2f8cab61..94b4d7f20ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -70,6 +70,17 @@ ${yarn.resourcemanager.hostname}:8032 + + + The actual address the server will bind to. If this optional address is + set, the RPC and webapp servers will bind to this address and the port specified in + yarn.resourcemanager.address and yarn.resourcemanager.webapp.address, respectively. This + is most useful for making RM listen to all interfaces by setting to 0.0.0.0. + + yarn.resourcemanager.bind-host + + + The number of threads used to handle applications manager requests. yarn.resourcemanager.client.thread-count @@ -635,6 +646,17 @@ ${yarn.nodemanager.hostname}:0 + + + The actual address the server will bind to. If this optional address is + set, the RPC and webapp servers will bind to this address and the port specified in + yarn.nodemanager.address and yarn.nodemanager.webapp.address, respectively. This is + most useful for making NM listen to all interfaces by setting to 0.0.0.0. + + yarn.nodemanager.bind-host + + + Environment variables that should be forwarded from the NodeManager's environment to the container's. yarn.nodemanager.admin-env @@ -1172,6 +1194,18 @@ ${yarn.timeline-service.hostname}:8190 + + + The actual address the server will bind to. If this optional address is + set, the RPC and webapp servers will bind to this address and the port specified in + yarn.timeline-service.address and yarn.timeline-service.webapp.address, respectively. + This is most useful for making the service listen to all interfaces by setting to + 0.0.0.0. + + yarn.timeline-service.bind-host + + + Store class name for timeline store. yarn.timeline-service.store-class diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java new file mode 100644 index 00000000000..c6572e9f387 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java @@ -0,0 +1,895 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.api; +import java.io.IOException; +import java.lang.reflect.Array; +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.nio.ByteBuffer; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Random; +import java.util.Set; + +import org.apache.commons.lang.math.LongRange; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.security.proto.SecurityProtos.*; +import org.apache.hadoop.yarn.api.protocolrecords.*; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.*; +import org.apache.hadoop.yarn.api.records.*; +import org.apache.hadoop.yarn.api.records.impl.pb.*; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.*; +import org.apache.hadoop.yarn.proto.YarnProtos.*; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.*; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.*; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +public class TestPBImplRecords { + static final Log LOG = LogFactory.getLog(TestPBImplRecords.class); + + private static HashMap typeValueCache = new HashMap(); + private static Random rand = new Random(); + private static byte [] bytes = new byte[] {'1', '2', '3', '4'}; + + @SuppressWarnings({"rawtypes", "unchecked"}) + private static Object genTypeValue(Type type) { + Object ret = typeValueCache.get(type); + if (ret != null) { + return ret; + } + // only use positive primitive values + if (type.equals(boolean.class)) { + return rand.nextBoolean(); + } else if (type.equals(byte.class)) { + return bytes[rand.nextInt(4)]; + } else if (type.equals(int.class)) { + return rand.nextInt(1000000); + } else if (type.equals(long.class)) { + return Long.valueOf(rand.nextInt(1000000)); + } else if (type.equals(float.class)) { + return rand.nextFloat(); + } else if (type.equals(double.class)) { + return rand.nextDouble(); + } else if (type.equals(String.class)) { + return String.format("%c%c%c", + 'a' + rand.nextInt(26), + 'a' + rand.nextInt(26), + 'a' + rand.nextInt(26)); + } else if (type instanceof Class) { + Class clazz = (Class)type; + if (clazz.isArray()) { + Class compClass = clazz.getComponentType(); + if (compClass != null) { + ret = Array.newInstance(compClass, 2); + Array.set(ret, 0, genTypeValue(compClass)); + Array.set(ret, 1, genTypeValue(compClass)); + } + } else if (clazz.isEnum()) { + Object [] values = clazz.getEnumConstants(); + ret = values[rand.nextInt(values.length)]; + } else if (clazz.equals(ByteBuffer.class)) { + // return new ByteBuffer every time + // to prevent potential side effects + ByteBuffer buff = ByteBuffer.allocate(4); + rand.nextBytes(buff.array()); + return buff; + } + } else if (type instanceof ParameterizedType) { + ParameterizedType pt = (ParameterizedType)type; + Type rawType = pt.getRawType(); + Type [] params = pt.getActualTypeArguments(); + // only support EnumSet, List, Set, Map + if (rawType.equals(EnumSet.class)) { + if (params[0] instanceof Class) { + Class c = (Class)(params[0]); + return EnumSet.allOf(c); + } + } if (rawType.equals(List.class)) { + ret = Lists.newArrayList(genTypeValue(params[0])); + } else if (rawType.equals(Set.class)) { + ret = Sets.newHashSet(genTypeValue(params[0])); + } else if (rawType.equals(Map.class)) { + Map map = Maps.newHashMap(); + map.put(genTypeValue(params[0]), genTypeValue(params[1])); + ret = map; + } + } + if (ret == null) { + throw new IllegalArgumentException("type " + type + " is not supported"); + } + typeValueCache.put(type, ret); + return ret; + } + + /** + * this method generate record instance by calling newIntance + * using reflection, add register the generated value to typeValueCache + */ + @SuppressWarnings("rawtypes") + private static Object generateByNewInstance(Class clazz) throws Exception { + Object ret = typeValueCache.get(clazz); + if (ret != null) { + return ret; + } + Method newInstance = null; + Type [] paramTypes = new Type[0]; + // get newInstance method with most parameters + for (Method m : clazz.getMethods()) { + int mod = m.getModifiers(); + if (m.getDeclaringClass().equals(clazz) && + Modifier.isPublic(mod) && + Modifier.isStatic(mod) && + m.getName().equals("newInstance")) { + Type [] pts = m.getGenericParameterTypes(); + if (newInstance == null + || (pts.length > paramTypes.length)) { + newInstance = m; + paramTypes = pts; + } + } + } + if (newInstance == null) { + throw new IllegalArgumentException("type " + clazz.getName() + + " does not have newInstance method"); + } + Object [] args = new Object[paramTypes.length]; + for (int i=0;i Map getGetSetPairs(Class recordClass) + throws Exception { + Map ret = new HashMap(); + Method [] methods = recordClass.getDeclaredMethods(); + // get all get methods + for (int i = 0; i < methods.length; i++) { + Method m = methods[i]; + int mod = m.getModifiers(); + if (m.getDeclaringClass().equals(recordClass) && + Modifier.isPublic(mod) && + (!Modifier.isStatic(mod))) { + String name = m.getName(); + if (name.equals("getProto")) { + continue; + } + if ((name.length() > 3) && name.startsWith("get") && + (m.getParameterTypes().length == 0)) { + String propertyName = name.substring(3); + Type valueType = m.getGenericReturnType(); + GetSetPair p = ret.get(propertyName); + if (p == null) { + p = new GetSetPair(); + p.propertyName = propertyName; + p.type = valueType; + p.getMethod = m; + ret.put(propertyName, p); + } else { + Assert.fail("Multiple get method with same name: " + recordClass + + p.propertyName); + } + } + } + } + // match get methods with set methods + for (int i = 0; i < methods.length; i++) { + Method m = methods[i]; + int mod = m.getModifiers(); + if (m.getDeclaringClass().equals(recordClass) && + Modifier.isPublic(mod) && + (!Modifier.isStatic(mod))) { + String name = m.getName(); + if (name.startsWith("set") && (m.getParameterTypes().length == 1)) { + String propertyName = name.substring(3); + Type valueType = m.getGenericParameterTypes()[0]; + GetSetPair p = ret.get(propertyName); + if (p != null && p.type.equals(valueType)) { + p.setMethod = m; + } + } + } + } + // exclude incomplete get/set pair, and generate test value + Iterator> itr = ret.entrySet().iterator(); + while (itr.hasNext()) { + Entry cur = itr.next(); + GetSetPair gsp = cur.getValue(); + if ((gsp.getMethod == null) || + (gsp.setMethod == null)) { + LOG.info(String.format("Exclude protential property: %s\n", gsp.propertyName)); + itr.remove(); + } else { + LOG.info(String.format("New property: %s type: %s", gsp.toString(), gsp.type)); + gsp.testValue = genTypeValue(gsp.type); + LOG.info(String.format(" testValue: %s\n", gsp.testValue)); + } + } + return ret; + } + + private void validatePBImplRecord(Class recordClass, + Class

protoClass) + throws Exception { + LOG.info(String.format("Validate %s %s\n", recordClass.getName(), + protoClass.getName())); + Constructor emptyConstructor = recordClass.getConstructor(); + Constructor pbConstructor = recordClass.getConstructor(protoClass); + Method getProto = recordClass.getDeclaredMethod("getProto"); + Map getSetPairs = getGetSetPairs(recordClass); + R origRecord = emptyConstructor.newInstance(); + for (GetSetPair gsp : getSetPairs.values()) { + gsp.setMethod.invoke(origRecord, gsp.testValue); + } + Object ret = getProto.invoke(origRecord); + Assert.assertNotNull(recordClass.getName() + "#getProto returns null", ret); + if (!(protoClass.isAssignableFrom(ret.getClass()))) { + Assert.fail("Illegal getProto method return type: " + ret.getClass()); + } + R deserRecord = pbConstructor.newInstance(ret); + Assert.assertEquals("whole " + recordClass + " records should be equal", + origRecord, deserRecord); + for (GetSetPair gsp : getSetPairs.values()) { + Object origValue = gsp.getMethod.invoke(origRecord); + Object deserValue = gsp.getMethod.invoke(deserRecord); + Assert.assertEquals("property " + recordClass.getName() + "#" + + gsp.propertyName + " should be equal", origValue, deserValue); + } + } + + @Test + public void testAllocateRequestPBImpl() throws Exception { + validatePBImplRecord(AllocateRequestPBImpl.class, AllocateRequestProto.class); + } + + @Test + public void testAllocateResponsePBImpl() throws Exception { + validatePBImplRecord(AllocateResponsePBImpl.class, AllocateResponseProto.class); + } + + @Test + public void testCancelDelegationTokenRequestPBImpl() throws Exception { + validatePBImplRecord(CancelDelegationTokenRequestPBImpl.class, + CancelDelegationTokenRequestProto.class); + } + + @Test + public void testCancelDelegationTokenResponsePBImpl() throws Exception { + validatePBImplRecord(CancelDelegationTokenResponsePBImpl.class, + CancelDelegationTokenResponseProto.class); + } + + @Test + public void testFinishApplicationMasterRequestPBImpl() throws Exception { + validatePBImplRecord(FinishApplicationMasterRequestPBImpl.class, + FinishApplicationMasterRequestProto.class); + } + + @Test + public void testFinishApplicationMasterResponsePBImpl() throws Exception { + validatePBImplRecord(FinishApplicationMasterResponsePBImpl.class, + FinishApplicationMasterResponseProto.class); + } + + @Test + public void testGetApplicationAttemptReportRequestPBImpl() throws Exception { + validatePBImplRecord(GetApplicationAttemptReportRequestPBImpl.class, + GetApplicationAttemptReportRequestProto.class); + } + + @Test + public void testGetApplicationAttemptReportResponsePBImpl() throws Exception { + validatePBImplRecord(GetApplicationAttemptReportResponsePBImpl.class, + GetApplicationAttemptReportResponseProto.class); + } + + @Test + public void testGetApplicationAttemptsRequestPBImpl() throws Exception { + validatePBImplRecord(GetApplicationAttemptsRequestPBImpl.class, + GetApplicationAttemptsRequestProto.class); + } + + @Test + public void testGetApplicationAttemptsResponsePBImpl() throws Exception { + validatePBImplRecord(GetApplicationAttemptsResponsePBImpl.class, + GetApplicationAttemptsResponseProto.class); + } + + @Test + public void testGetApplicationReportRequestPBImpl() throws Exception { + validatePBImplRecord(GetApplicationReportRequestPBImpl.class, + GetApplicationReportRequestProto.class); + } + + @Test + public void testGetApplicationReportResponsePBImpl() throws Exception { + validatePBImplRecord(GetApplicationReportResponsePBImpl.class, + GetApplicationReportResponseProto.class); + } + + @Test + public void testGetApplicationsRequestPBImpl() throws Exception { + validatePBImplRecord(GetApplicationsRequestPBImpl.class, + GetApplicationsRequestProto.class); + } + + @Test + public void testGetApplicationsResponsePBImpl() throws Exception { + validatePBImplRecord(GetApplicationsResponsePBImpl.class, + GetApplicationsResponseProto.class); + } + + @Test + public void testGetClusterMetricsRequestPBImpl() throws Exception { + validatePBImplRecord(GetClusterMetricsRequestPBImpl.class, + GetClusterMetricsRequestProto.class); + } + + @Test + public void testGetClusterMetricsResponsePBImpl() throws Exception { + validatePBImplRecord(GetClusterMetricsResponsePBImpl.class, + GetClusterMetricsResponseProto.class); + } + + @Test + public void testGetClusterNodesRequestPBImpl() throws Exception { + validatePBImplRecord(GetClusterNodesRequestPBImpl.class, + GetClusterNodesRequestProto.class); + } + + @Test + public void testGetClusterNodesResponsePBImpl() throws Exception { + validatePBImplRecord(GetClusterNodesResponsePBImpl.class, + GetClusterNodesResponseProto.class); + } + + @Test + public void testGetContainerReportRequestPBImpl() throws Exception { + validatePBImplRecord(GetContainerReportRequestPBImpl.class, + GetContainerReportRequestProto.class); + } + + @Test + public void testGetContainerReportResponsePBImpl() throws Exception { + validatePBImplRecord(GetContainerReportResponsePBImpl.class, + GetContainerReportResponseProto.class); + } + + @Test + public void testGetContainersRequestPBImpl() throws Exception { + validatePBImplRecord(GetContainersRequestPBImpl.class, + GetContainersRequestProto.class); + } + + @Test + public void testGetContainersResponsePBImpl() throws Exception { + validatePBImplRecord(GetContainersResponsePBImpl.class, + GetContainersResponseProto.class); + } + + @Test + public void testGetContainerStatusesRequestPBImpl() throws Exception { + validatePBImplRecord(GetContainerStatusesRequestPBImpl.class, + GetContainerStatusesRequestProto.class); + } + + @Test + public void testGetContainerStatusesResponsePBImpl() throws Exception { + validatePBImplRecord(GetContainerStatusesResponsePBImpl.class, + GetContainerStatusesResponseProto.class); + } + + @Test + public void testGetDelegationTokenRequestPBImpl() throws Exception { + validatePBImplRecord(GetDelegationTokenRequestPBImpl.class, + GetDelegationTokenRequestProto.class); + } + + @Test + public void testGetDelegationTokenResponsePBImpl() throws Exception { + validatePBImplRecord(GetDelegationTokenResponsePBImpl.class, + GetDelegationTokenResponseProto.class); + } + + @Test + public void testGetNewApplicationRequestPBImpl() throws Exception { + validatePBImplRecord(GetNewApplicationRequestPBImpl.class, + GetNewApplicationRequestProto.class); + } + + @Test + public void testGetNewApplicationResponsePBImpl() throws Exception { + validatePBImplRecord(GetNewApplicationResponsePBImpl.class, + GetNewApplicationResponseProto.class); + } + + @Test + public void testGetQueueInfoRequestPBImpl() throws Exception { + validatePBImplRecord(GetQueueInfoRequestPBImpl.class, + GetQueueInfoRequestProto.class); + } + + @Test + public void testGetQueueInfoResponsePBImpl() throws Exception { + validatePBImplRecord(GetQueueInfoResponsePBImpl.class, + GetQueueInfoResponseProto.class); + } + + @Test + public void testGetQueueUserAclsInfoRequestPBImpl() throws Exception { + validatePBImplRecord(GetQueueUserAclsInfoRequestPBImpl.class, + GetQueueUserAclsInfoRequestProto.class); + } + + @Test + public void testGetQueueUserAclsInfoResponsePBImpl() throws Exception { + validatePBImplRecord(GetQueueUserAclsInfoResponsePBImpl.class, + GetQueueUserAclsInfoResponseProto.class); + } + + @Test + public void testKillApplicationRequestPBImpl() throws Exception { + validatePBImplRecord(KillApplicationRequestPBImpl.class, + KillApplicationRequestProto.class); + } + + @Test + public void testKillApplicationResponsePBImpl() throws Exception { + validatePBImplRecord(KillApplicationResponsePBImpl.class, + KillApplicationResponseProto.class); + } + + @Test + public void testMoveApplicationAcrossQueuesRequestPBImpl() throws Exception { + validatePBImplRecord(MoveApplicationAcrossQueuesRequestPBImpl.class, + MoveApplicationAcrossQueuesRequestProto.class); + } + + @Test + public void testMoveApplicationAcrossQueuesResponsePBImpl() throws Exception { + validatePBImplRecord(MoveApplicationAcrossQueuesResponsePBImpl.class, + MoveApplicationAcrossQueuesResponseProto.class); + } + + @Test + public void testRegisterApplicationMasterRequestPBImpl() throws Exception { + validatePBImplRecord(RegisterApplicationMasterRequestPBImpl.class, + RegisterApplicationMasterRequestProto.class); + } + + @Test + public void testRegisterApplicationMasterResponsePBImpl() throws Exception { + validatePBImplRecord(RegisterApplicationMasterResponsePBImpl.class, + RegisterApplicationMasterResponseProto.class); + } + + @Test + public void testRenewDelegationTokenRequestPBImpl() throws Exception { + validatePBImplRecord(RenewDelegationTokenRequestPBImpl.class, + RenewDelegationTokenRequestProto.class); + } + + @Test + public void testRenewDelegationTokenResponsePBImpl() throws Exception { + validatePBImplRecord(RenewDelegationTokenResponsePBImpl.class, + RenewDelegationTokenResponseProto.class); + } + + @Test + public void testStartContainerRequestPBImpl() throws Exception { + validatePBImplRecord(StartContainerRequestPBImpl.class, + StartContainerRequestProto.class); + } + + @Test + public void testStartContainersRequestPBImpl() throws Exception { + validatePBImplRecord(StartContainersRequestPBImpl.class, + StartContainersRequestProto.class); + } + + @Test + public void testStartContainersResponsePBImpl() throws Exception { + validatePBImplRecord(StartContainersResponsePBImpl.class, + StartContainersResponseProto.class); + } + + @Test + public void testStopContainersRequestPBImpl() throws Exception { + validatePBImplRecord(StopContainersRequestPBImpl.class, + StopContainersRequestProto.class); + } + + @Test + public void testStopContainersResponsePBImpl() throws Exception { + validatePBImplRecord(StopContainersResponsePBImpl.class, + StopContainersResponseProto.class); + } + + @Test + public void testSubmitApplicationRequestPBImpl() throws Exception { + validatePBImplRecord(SubmitApplicationRequestPBImpl.class, + SubmitApplicationRequestProto.class); + } + + @Test + public void testSubmitApplicationResponsePBImpl() throws Exception { + validatePBImplRecord(SubmitApplicationResponsePBImpl.class, + SubmitApplicationResponseProto.class); + } + + @Test + @Ignore + // ignore cause ApplicationIdPBImpl is immutable + public void testApplicationAttemptIdPBImpl() throws Exception { + validatePBImplRecord(ApplicationAttemptIdPBImpl.class, + ApplicationAttemptIdProto.class); + } + + @Test + public void testApplicationAttemptReportPBImpl() throws Exception { + validatePBImplRecord(ApplicationAttemptReportPBImpl.class, + ApplicationAttemptReportProto.class); + } + + @Test + @Ignore + // ignore cause ApplicationIdPBImpl is immutable + public void testApplicationIdPBImpl() throws Exception { + validatePBImplRecord(ApplicationIdPBImpl.class, ApplicationIdProto.class); + } + + @Test + public void testApplicationReportPBImpl() throws Exception { + validatePBImplRecord(ApplicationReportPBImpl.class, + ApplicationReportProto.class); + } + + @Test + public void testApplicationResourceUsageReportPBImpl() throws Exception { + validatePBImplRecord(ApplicationResourceUsageReportPBImpl.class, + ApplicationResourceUsageReportProto.class); + } + + @Test + public void testApplicationSubmissionContextPBImpl() throws Exception { + validatePBImplRecord(ApplicationSubmissionContextPBImpl.class, + ApplicationSubmissionContextProto.class); + } + + @Test + @Ignore + // ignore cause ApplicationIdPBImpl is immutable + public void testContainerIdPBImpl() throws Exception { + validatePBImplRecord(ContainerIdPBImpl.class, ContainerIdProto.class); + } + + @Test + public void testContainerLaunchContextPBImpl() throws Exception { + validatePBImplRecord(ContainerLaunchContextPBImpl.class, + ContainerLaunchContextProto.class); + } + + @Test + public void testContainerPBImpl() throws Exception { + validatePBImplRecord(ContainerPBImpl.class, ContainerProto.class); + } + + @Test + public void testContainerReportPBImpl() throws Exception { + validatePBImplRecord(ContainerReportPBImpl.class, ContainerReportProto.class); + } + + @Test + public void testContainerResourceDecreasePBImpl() throws Exception { + validatePBImplRecord(ContainerResourceDecreasePBImpl.class, + ContainerResourceDecreaseProto.class); + } + + @Test + public void testContainerResourceIncreasePBImpl() throws Exception { + validatePBImplRecord(ContainerResourceIncreasePBImpl.class, + ContainerResourceIncreaseProto.class); + } + + @Test + public void testContainerResourceIncreaseRequestPBImpl() throws Exception { + validatePBImplRecord(ContainerResourceIncreaseRequestPBImpl.class, + ContainerResourceIncreaseRequestProto.class); + } + + @Test + public void testContainerStatusPBImpl() throws Exception { + validatePBImplRecord(ContainerStatusPBImpl.class, ContainerStatusProto.class); + } + + @Test + public void testLocalResourcePBImpl() throws Exception { + validatePBImplRecord(LocalResourcePBImpl.class, LocalResourceProto.class); + } + + @Test + public void testNMTokenPBImpl() throws Exception { + validatePBImplRecord(NMTokenPBImpl.class, NMTokenProto.class); + } + + @Test + @Ignore + // ignore cause ApplicationIdPBImpl is immutable + public void testNodeIdPBImpl() throws Exception { + validatePBImplRecord(NodeIdPBImpl.class, NodeIdProto.class); + } + + @Test + public void testNodeReportPBImpl() throws Exception { + validatePBImplRecord(NodeReportPBImpl.class, NodeReportProto.class); + } + + @Test + public void testPreemptionContainerPBImpl() throws Exception { + validatePBImplRecord(PreemptionContainerPBImpl.class, + PreemptionContainerProto.class); + } + + @Test + public void testPreemptionContractPBImpl() throws Exception { + validatePBImplRecord(PreemptionContractPBImpl.class, + PreemptionContractProto.class); + } + + @Test + public void testPreemptionMessagePBImpl() throws Exception { + validatePBImplRecord(PreemptionMessagePBImpl.class, + PreemptionMessageProto.class); + } + + @Test + public void testPreemptionResourceRequestPBImpl() throws Exception { + validatePBImplRecord(PreemptionResourceRequestPBImpl.class, + PreemptionResourceRequestProto.class); + } + + @Test + public void testPriorityPBImpl() throws Exception { + validatePBImplRecord(PriorityPBImpl.class, PriorityProto.class); + } + + @Test + public void testQueueInfoPBImpl() throws Exception { + validatePBImplRecord(QueueInfoPBImpl.class, QueueInfoProto.class); + } + + @Test + public void testQueueUserACLInfoPBImpl() throws Exception { + validatePBImplRecord(QueueUserACLInfoPBImpl.class, + QueueUserACLInfoProto.class); + } + + @Test + public void testResourceBlacklistRequestPBImpl() throws Exception { + validatePBImplRecord(ResourceBlacklistRequestPBImpl.class, + ResourceBlacklistRequestProto.class); + } + + @Test + @Ignore + // ignore as ResourceOptionPBImpl is immutable + public void testResourceOptionPBImpl() throws Exception { + validatePBImplRecord(ResourceOptionPBImpl.class, ResourceOptionProto.class); + } + + @Test + public void testResourcePBImpl() throws Exception { + validatePBImplRecord(ResourcePBImpl.class, ResourceProto.class); + } + + @Test + public void testResourceRequestPBImpl() throws Exception { + validatePBImplRecord(ResourceRequestPBImpl.class, ResourceRequestProto.class); + } + + @Test + public void testSerializedExceptionPBImpl() throws Exception { + validatePBImplRecord(SerializedExceptionPBImpl.class, + SerializedExceptionProto.class); + } + + @Test + public void testStrictPreemptionContractPBImpl() throws Exception { + validatePBImplRecord(StrictPreemptionContractPBImpl.class, + StrictPreemptionContractProto.class); + } + + @Test + public void testTokenPBImpl() throws Exception { + validatePBImplRecord(TokenPBImpl.class, TokenProto.class); + } + + @Test + public void testURLPBImpl() throws Exception { + validatePBImplRecord(URLPBImpl.class, URLProto.class); + } + + @Test + public void testYarnClusterMetricsPBImpl() throws Exception { + validatePBImplRecord(YarnClusterMetricsPBImpl.class, + YarnClusterMetricsProto.class); + } + + @Test + public void testRefreshAdminAclsRequestPBImpl() throws Exception { + validatePBImplRecord(RefreshAdminAclsRequestPBImpl.class, + RefreshAdminAclsRequestProto.class); + } + + @Test + public void testRefreshAdminAclsResponsePBImpl() throws Exception { + validatePBImplRecord(RefreshAdminAclsResponsePBImpl.class, + RefreshAdminAclsResponseProto.class); + } + + @Test + public void testRefreshNodesRequestPBImpl() throws Exception { + validatePBImplRecord(RefreshNodesRequestPBImpl.class, + RefreshNodesRequestProto.class); + } + + @Test + public void testRefreshNodesResponsePBImpl() throws Exception { + validatePBImplRecord(RefreshNodesResponsePBImpl.class, + RefreshNodesResponseProto.class); + } + + @Test + public void testRefreshQueuesRequestPBImpl() throws Exception { + validatePBImplRecord(RefreshQueuesRequestPBImpl.class, + RefreshQueuesRequestProto.class); + } + + @Test + public void testRefreshQueuesResponsePBImpl() throws Exception { + validatePBImplRecord(RefreshQueuesResponsePBImpl.class, + RefreshQueuesResponseProto.class); + } + + @Test + public void testRefreshServiceAclsRequestPBImpl() throws Exception { + validatePBImplRecord(RefreshServiceAclsRequestPBImpl.class, + RefreshServiceAclsRequestProto.class); + } + + @Test + public void testRefreshServiceAclsResponsePBImpl() throws Exception { + validatePBImplRecord(RefreshServiceAclsResponsePBImpl.class, + RefreshServiceAclsResponseProto.class); + } + + @Test + public void testRefreshSuperUserGroupsConfigurationRequestPBImpl() + throws Exception { + validatePBImplRecord(RefreshSuperUserGroupsConfigurationRequestPBImpl.class, + RefreshSuperUserGroupsConfigurationRequestProto.class); + } + + @Test + public void testRefreshSuperUserGroupsConfigurationResponsePBImpl() + throws Exception { + validatePBImplRecord(RefreshSuperUserGroupsConfigurationResponsePBImpl.class, + RefreshSuperUserGroupsConfigurationResponseProto.class); + } + + @Test + public void testRefreshUserToGroupsMappingsRequestPBImpl() throws Exception { + validatePBImplRecord(RefreshUserToGroupsMappingsRequestPBImpl.class, + RefreshUserToGroupsMappingsRequestProto.class); + } + + @Test + public void testRefreshUserToGroupsMappingsResponsePBImpl() throws Exception { + validatePBImplRecord(RefreshUserToGroupsMappingsResponsePBImpl.class, + RefreshUserToGroupsMappingsResponseProto.class); + } + + @Test + public void testUpdateNodeResourceRequestPBImpl() throws Exception { + validatePBImplRecord(UpdateNodeResourceRequestPBImpl.class, + UpdateNodeResourceRequestProto.class); + } + + @Test + public void testUpdateNodeResourceResponsePBImpl() throws Exception { + validatePBImplRecord(UpdateNodeResourceResponsePBImpl.class, + UpdateNodeResourceResponseProto.class); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java index 5e40e5d5000..1d925a733d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java @@ -28,6 +28,7 @@ import java.net.SocketAddress; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; public class TestYarnConfiguration { @@ -75,4 +76,131 @@ public class TestYarnConfiguration { YarnConfiguration.DEFAULT_NM_PORT); assertEquals(1234, addr.getPort()); } + + @Test + public void testGetSocketAddr() throws Exception { + + YarnConfiguration conf; + InetSocketAddress resourceTrackerAddress; + + //all default + conf = new YarnConfiguration(); + resourceTrackerAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); + assertEquals( + new InetSocketAddress( + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0], + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT), + resourceTrackerAddress); + + //with address + conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "10.0.0.1"); + resourceTrackerAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); + assertEquals( + new InetSocketAddress( + "10.0.0.1", + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT), + resourceTrackerAddress); + + //address and socket + conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "10.0.0.2:5001"); + resourceTrackerAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); + assertEquals( + new InetSocketAddress( + "10.0.0.2", + 5001), + resourceTrackerAddress); + + //bind host only + conf = new YarnConfiguration(); + conf.set(YarnConfiguration.RM_BIND_HOST, "10.0.0.3"); + resourceTrackerAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); + assertEquals( + new InetSocketAddress( + "10.0.0.3", + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT), + resourceTrackerAddress); + + //bind host and address no port + conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0"); + conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "10.0.0.2"); + resourceTrackerAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); + assertEquals( + new InetSocketAddress( + "0.0.0.0", + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT), + resourceTrackerAddress); + + //bind host and address with port + conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0"); + conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "10.0.0.2:5003"); + resourceTrackerAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); + assertEquals( + new InetSocketAddress( + "0.0.0.0", + 5003), + resourceTrackerAddress); + + } + + @Test + public void testUpdateConnectAddr() throws Exception { + YarnConfiguration conf; + InetSocketAddress resourceTrackerConnectAddress; + InetSocketAddress serverAddress; + + //no override, old behavior. Won't work on a host named "yo.yo.yo" + conf = new YarnConfiguration(); + conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "yo.yo.yo"); + serverAddress = new InetSocketAddress( + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0], + Integer.valueOf(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1])); + + resourceTrackerConnectAddress = conf.updateConnectAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + serverAddress); + + assertFalse(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo")); + + //cause override with address + conf = new YarnConfiguration(); + conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "yo.yo.yo"); + conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0"); + serverAddress = new InetSocketAddress( + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0], + Integer.valueOf(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1])); + + resourceTrackerConnectAddress = conf.updateConnectAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + serverAddress); + + assertTrue(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo")); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java index e15198b13f0..6372056cb5d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java @@ -56,6 +56,7 @@ import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; public class ApplicationHistoryClientService extends AbstractService { @@ -75,10 +76,11 @@ public class ApplicationHistoryClientService extends AbstractService { protected void serviceStart() throws Exception { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); - InetSocketAddress address = - conf.getSocketAddr(YarnConfiguration.TIMELINE_SERVICE_ADDRESS, - YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS, - YarnConfiguration.DEFAULT_TIMELINE_SERVICE_PORT); + InetSocketAddress address = conf.getSocketAddr( + YarnConfiguration.TIMELINE_SERVICE_BIND_HOST, + YarnConfiguration.TIMELINE_SERVICE_ADDRESS, + YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS, + YarnConfiguration.DEFAULT_TIMELINE_SERVICE_PORT); server = rpc.getServer(ApplicationHistoryProtocol.class, protocolHandler, @@ -88,8 +90,10 @@ public class ApplicationHistoryClientService extends AbstractService { server.start(); this.bindAddress = - conf.updateConnectAddr(YarnConfiguration.TIMELINE_SERVICE_ADDRESS, - server.getListenerAddress()); + conf.updateConnectAddr(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST, + YarnConfiguration.TIMELINE_SERVICE_ADDRESS, + YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS, + server.getListenerAddress()); LOG.info("Instantiated ApplicationHistoryClientService at " + this.bindAddress); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java index 02a3bb12fc0..ce05d503986 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java @@ -192,7 +192,9 @@ public class ApplicationHistoryServer extends CompositeService { TimelineAuthenticationFilterInitializer.class.getName() + initializers); } - String bindAddress = WebAppUtils.getAHSWebAppURLWithoutScheme(conf); + String bindAddress = WebAppUtils.getWebAppBindURL(conf, + YarnConfiguration.TIMELINE_SERVICE_BIND_HOST, + WebAppUtils.getAHSWebAppURLWithoutScheme(conf)); LOG.info("Instantiating AHSWebApp at " + bindAddress); try { AHSWebApp ahsWebApp = AHSWebApp.getInstance(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/records/NMDBSchemaVersion.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/Version.java similarity index 74% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/records/NMDBSchemaVersion.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/Version.java index 1ee59ea4d83..66042ea8ad2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/records/NMDBSchemaVersion.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/Version.java @@ -15,21 +15,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.yarn.server.nodemanager.recovery.records; -import org.apache.hadoop.classification.InterfaceAudience.Private; +package org.apache.hadoop.yarn.server.records; + +import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; /** - * The version information of DB Schema for NM. + * The version information for state get stored in YARN components, + * i.e. RMState, NMState, etc., which include: majorVersion and + * minorVersion. + * The major version update means incompatible changes happen while + * minor version update indicates compatible changes. */ -@Private +@LimitedPrivate({"YARN", "MapReduce"}) @Unstable -public abstract class NMDBSchemaVersion { +public abstract class Version { - public static NMDBSchemaVersion newInstance(int majorVersion, int minorVersion) { - NMDBSchemaVersion version = Records.newRecord(NMDBSchemaVersion.class); + public static Version newInstance(int majorVersion, int minorVersion) { + Version version = Records.newRecord(Version.class); version.setMajorVersion(majorVersion); version.setMinorVersion(minorVersion); return version; @@ -47,7 +52,7 @@ public abstract class NMDBSchemaVersion { return getMajorVersion() + "." + getMinorVersion(); } - public boolean isCompatibleTo(NMDBSchemaVersion version) { + public boolean isCompatibleTo(Version version) { return getMajorVersion() == version.getMajorVersion(); } @@ -68,7 +73,7 @@ public abstract class NMDBSchemaVersion { return false; if (getClass() != obj.getClass()) return false; - NMDBSchemaVersion other = (NMDBSchemaVersion) obj; + Version other = (Version) obj; if (this.getMajorVersion() == other.getMajorVersion() && this.getMinorVersion() == other.getMinorVersion()) { return true; @@ -76,5 +81,4 @@ public abstract class NMDBSchemaVersion { return false; } } - } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/RMStateVersionPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/impl/pb/VersionPBImpl.java similarity index 62% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/RMStateVersionPBImpl.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/impl/pb/VersionPBImpl.java index f960413ce64..a99f22af5ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/RMStateVersionPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/impl/pb/VersionPBImpl.java @@ -16,28 +16,29 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb; +package org.apache.hadoop.yarn.server.records.impl.pb; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMStateVersionProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMStateVersionProtoOrBuilder; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProtoOrBuilder; -public class RMStateVersionPBImpl extends RMStateVersion { +import org.apache.hadoop.yarn.server.records.Version; - RMStateVersionProto proto = RMStateVersionProto.getDefaultInstance(); - RMStateVersionProto.Builder builder = null; +public class VersionPBImpl extends Version { + + VersionProto proto = VersionProto.getDefaultInstance(); + VersionProto.Builder builder = null; boolean viaProto = false; - public RMStateVersionPBImpl() { - builder = RMStateVersionProto.newBuilder(); + public VersionPBImpl() { + builder = VersionProto.newBuilder(); } - public RMStateVersionPBImpl(RMStateVersionProto proto) { + public VersionPBImpl(VersionProto proto) { this.proto = proto; viaProto = true; } - public RMStateVersionProto getProto() { + public VersionProto getProto() { proto = viaProto ? proto : builder.build(); viaProto = true; return proto; @@ -45,14 +46,14 @@ public class RMStateVersionPBImpl extends RMStateVersion { private void maybeInitBuilder() { if (viaProto || builder == null) { - builder = RMStateVersionProto.newBuilder(proto); + builder = VersionProto.newBuilder(proto); } viaProto = false; } @Override public int getMajorVersion() { - RMStateVersionProtoOrBuilder p = viaProto ? proto : builder; + VersionProtoOrBuilder p = viaProto ? proto : builder; return p.getMajorVersion(); } @@ -64,7 +65,7 @@ public class RMStateVersionPBImpl extends RMStateVersion { @Override public int getMinorVersion() { - RMStateVersionProtoOrBuilder p = viaProto ? proto : builder; + VersionProtoOrBuilder p = viaProto ? proto : builder; return p.getMinorVersion(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto index 4f5d16895be..01fac329a12 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto @@ -47,4 +47,10 @@ message NodeHealthStatusProto { optional bool is_node_healthy = 1; optional string health_report = 2; optional int64 last_health_report_time = 3; -} \ No newline at end of file +} + +message VersionProto { + optional int32 major_version = 1; + optional int32 minor_version = 2; +} + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index 1e155d27b84..aee4e9a78a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager; import static org.apache.hadoop.service.Service.STATE.STARTED; +import java.io.DataInputStream; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URISyntaxException; @@ -42,6 +43,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.DataInputByteBuffer; +import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.Credentials; @@ -63,6 +65,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse; import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -71,6 +74,8 @@ import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.SerializedException; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.EventHandler; @@ -81,6 +86,8 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationACLMapProto; +import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.security.NMTokenIdentifier; import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent; @@ -119,11 +126,13 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.Contai import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredApplicationsState; import org.apache.hadoop.yarn.server.nodemanager.security.authorize.NMPolicyProvider; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.ByteString; public class ContainerManagerImpl extends CompositeService implements ServiceStateChangeListener, ContainerManagementProtocol, @@ -224,14 +233,49 @@ public class ContainerManagerImpl extends CompositeService implements recover(); } + @SuppressWarnings("unchecked") private void recover() throws IOException, URISyntaxException { NMStateStoreService stateStore = context.getNMStateStore(); if (stateStore.canRecover()) { rsrcLocalizationSrvc.recoverLocalizedResources( stateStore.loadLocalizationState()); + + RecoveredApplicationsState appsState = stateStore.loadApplicationsState(); + for (ContainerManagerApplicationProto proto : + appsState.getApplications()) { + recoverApplication(proto); + } + + String diagnostic = "Application marked finished during recovery"; + for (ApplicationId appId : appsState.getFinishedApplications()) { + dispatcher.getEventHandler().handle( + new ApplicationFinishEvent(appId, diagnostic)); + } } } + private void recoverApplication(ContainerManagerApplicationProto p) + throws IOException { + ApplicationId appId = new ApplicationIdPBImpl(p.getId()); + Credentials creds = new Credentials(); + creds.readTokenStorageStream( + new DataInputStream(p.getCredentials().newInput())); + + List aclProtoList = p.getAclsList(); + Map acls = + new HashMap(aclProtoList.size()); + for (ApplicationACLMapProto aclProto : aclProtoList) { + acls.put(ProtoUtils.convertFromProtoFormat(aclProto.getAccessType()), + aclProto.getAcl()); + } + + LOG.info("Recovering application " + appId); + ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), appId, + creds, context); + context.getApplications().put(appId, app); + app.handle(new ApplicationInitEvent(appId, acls)); + } + protected LogHandler createLogHandler(Configuration conf, Context context, DeletionService deletionService) { if (conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, @@ -275,6 +319,7 @@ public class ContainerManagerImpl extends CompositeService implements YarnRPC rpc = YarnRPC.create(conf); InetSocketAddress initialAddress = conf.getSocketAddr( + YarnConfiguration.NM_BIND_HOST, YarnConfiguration.NM_ADDRESS, YarnConfiguration.DEFAULT_NM_ADDRESS, YarnConfiguration.DEFAULT_NM_PORT); @@ -296,7 +341,22 @@ public class ContainerManagerImpl extends CompositeService implements " server is still starting."); this.setBlockNewContainerRequests(true); server.start(); - InetSocketAddress connectAddress = NetUtils.getConnectAddress(server); + + InetSocketAddress connectAddress; + String bindHost = conf.get(YarnConfiguration.NM_BIND_HOST); + String nmAddress = conf.getTrimmed(YarnConfiguration.NM_ADDRESS); + if (bindHost == null || bindHost.isEmpty() || + nmAddress == null || nmAddress.isEmpty()) { + connectAddress = NetUtils.getConnectAddress(server); + } else { + //a bind-host case with an address, to support overriding the first hostname + //found when querying for our hostname with the specified address, combine + //the specified address with the actual port listened on by the server + connectAddress = NetUtils.getConnectAddress( + new InetSocketAddress(nmAddress.split(":")[0], + server.getListenerAddress().getPort())); + } + NodeId nodeId = NodeId.newInstance( connectAddress.getAddress().getCanonicalHostName(), connectAddress.getPort()); @@ -304,6 +364,7 @@ public class ContainerManagerImpl extends CompositeService implements this.context.getNMTokenSecretManager().setNodeId(nodeId); this.context.getContainerTokenSecretManager().setNodeId(nodeId); LOG.info("ContainerManager started at " + connectAddress); + LOG.info("ContainerManager bound to " + initialAddress); super.serviceStart(); } @@ -341,6 +402,12 @@ public class ContainerManagerImpl extends CompositeService implements } LOG.info("Applications still running : " + applications.keySet()); + if (this.context.getNMStateStore().canRecover() + && !this.context.getDecommissioned()) { + // do not cleanup apps as they can be recovered on restart + return; + } + List appIds = new ArrayList(applications.keySet()); this.handle( @@ -497,6 +564,8 @@ public class ContainerManagerImpl extends CompositeService implements messageBuilder.append("\nThis token is expired. current time is ") .append(System.currentTimeMillis()).append(" found ") .append(containerTokenIdentifier.getExpiryTimeStamp()); + messageBuilder.append("\nNote: System times on machines may be out of sync.") + .append(" Check system time and time zones."); } if (unauthorized) { String msg = messageBuilder.toString(); @@ -548,6 +617,41 @@ public class ContainerManagerImpl extends CompositeService implements succeededContainers, failedContainers); } + private ContainerManagerApplicationProto buildAppProto(ApplicationId appId, + String user, Credentials credentials, + Map appAcls) { + + ContainerManagerApplicationProto.Builder builder = + ContainerManagerApplicationProto.newBuilder(); + builder.setId(((ApplicationIdPBImpl) appId).getProto()); + builder.setUser(user); + + builder.clearCredentials(); + if (credentials != null) { + DataOutputBuffer dob = new DataOutputBuffer(); + try { + credentials.writeTokenStorageToStream(dob); + builder.setCredentials(ByteString.copyFrom(dob.getData())); + } catch (IOException e) { + // should not occur + LOG.error("Cannot serialize credentials", e); + } + } + + builder.clearAcls(); + if (appAcls != null) { + for (Map.Entry acl : appAcls.entrySet()) { + ApplicationACLMapProto p = ApplicationACLMapProto.newBuilder() + .setAccessType(ProtoUtils.convertToProtoFormat(acl.getKey())) + .setAcl(acl.getValue()) + .build(); + builder.addAcls(p); + } + } + + return builder.build(); + } + @SuppressWarnings("unchecked") private void startContainerInternal(NMTokenIdentifier nmTokenIdentifier, ContainerTokenIdentifier containerTokenIdentifier, @@ -621,10 +725,12 @@ public class ContainerManagerImpl extends CompositeService implements if (null == context.getApplications().putIfAbsent(applicationID, application)) { LOG.info("Creating a new application reference for app " + applicationID); - + Map appAcls = + container.getLaunchContext().getApplicationACLs(); + context.getNMStateStore().storeApplication(applicationID, + buildAppProto(applicationID, user, credentials, appAcls)); dispatcher.getEventHandler().handle( - new ApplicationInitEvent(applicationID, container.getLaunchContext() - .getApplicationACLs())); + new ApplicationInitEvent(applicationID, appAcls)); } dispatcher.getEventHandler().handle( @@ -875,6 +981,11 @@ public class ContainerManagerImpl extends CompositeService implements } else if (appsFinishedEvent.getReason() == CMgrCompletedAppsEvent.Reason.BY_RESOURCEMANAGER) { diagnostic = "Application killed by ResourceManager"; } + try { + this.context.getNMStateStore().storeFinishedApplication(appID); + } catch (IOException e) { + LOG.error("Unable to update application state in store", e); + } this.dispatcher.getEventHandler().handle( new ApplicationFinishEvent(appID, diagnostic)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java index a206591d80a..cc5544cc47a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.application; +import java.io.IOException; import java.util.EnumSet; import java.util.HashMap; import java.util.Map; @@ -428,6 +429,11 @@ public class ApplicationImpl implements Application { ApplicationId appId = event.getApplicationID(); app.context.getApplications().remove(appId); app.aclsManager.removeApplication(appId); + try { + app.context.getNMStateStore().removeApplication(appId); + } catch (IOException e) { + LOG.error("Unable to remove application from state store", e); + } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index 554b368dd5f..64a0b37cc31 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -81,6 +81,7 @@ import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; @@ -251,6 +252,7 @@ public class ResourceLocalizationService extends CompositeService cacheCleanupPeriod = conf.getLong(YarnConfiguration.NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS, YarnConfiguration.DEFAULT_NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS); localizationServerAddress = conf.getSocketAddr( + YarnConfiguration.NM_BIND_HOST, YarnConfiguration.NM_LOCALIZER_ADDRESS, YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS, YarnConfiguration.DEFAULT_NM_LOCALIZER_PORT); @@ -341,7 +343,9 @@ public class ResourceLocalizationService extends CompositeService server = createServer(); server.start(); localizationServerAddress = - getConfig().updateConnectAddr(YarnConfiguration.NM_LOCALIZER_ADDRESS, + getConfig().updateConnectAddr(YarnConfiguration.NM_BIND_HOST, + YarnConfiguration.NM_LOCALIZER_ADDRESS, + YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS, server.getListenerAddress()); LOG.info("Localizer started on port " + server.getPort()); super.serviceStart(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java index 008da7a2b8a..c3fc272d7f5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java @@ -41,13 +41,14 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto; +import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; -import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.NMDBSchemaVersionProto; import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; -import org.apache.hadoop.yarn.server.nodemanager.recovery.records.NMDBSchemaVersion; -import org.apache.hadoop.yarn.server.nodemanager.recovery.records.impl.pb.NMDBSchemaVersionPBImpl; +import org.apache.hadoop.yarn.server.records.Version; +import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; import org.apache.hadoop.yarn.server.utils.LeveldbIterator; import org.apache.hadoop.yarn.util.ConverterUtils; import org.fusesource.leveldbjni.JniDBFactory; @@ -68,12 +69,17 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { private static final String DB_NAME = "yarn-nm-state"; private static final String DB_SCHEMA_VERSION_KEY = "nm-schema-version"; - private static final NMDBSchemaVersion CURRENT_VERSION_INFO = NMDBSchemaVersion + private static final Version CURRENT_VERSION_INFO = Version .newInstance(1, 0); private static final String DELETION_TASK_KEY_PREFIX = "DeletionService/deltask_"; + private static final String APPLICATIONS_KEY_PREFIX = + "ContainerManager/applications/"; + private static final String FINISHED_APPS_KEY_PREFIX = + "ContainerManager/finishedApps/"; + private static final String LOCALIZATION_KEY_PREFIX = "Localization/"; private static final String LOCALIZATION_PUBLIC_KEY_PREFIX = LOCALIZATION_KEY_PREFIX + "public/"; @@ -116,6 +122,92 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { } + @Override + public RecoveredApplicationsState loadApplicationsState() + throws IOException { + RecoveredApplicationsState state = new RecoveredApplicationsState(); + state.applications = new ArrayList(); + String keyPrefix = APPLICATIONS_KEY_PREFIX; + LeveldbIterator iter = null; + try { + iter = new LeveldbIterator(db); + iter.seek(bytes(keyPrefix)); + while (iter.hasNext()) { + Entry entry = iter.next(); + String key = asString(entry.getKey()); + if (!key.startsWith(keyPrefix)) { + break; + } + state.applications.add( + ContainerManagerApplicationProto.parseFrom(entry.getValue())); + } + + state.finishedApplications = new ArrayList(); + keyPrefix = FINISHED_APPS_KEY_PREFIX; + iter.seek(bytes(keyPrefix)); + while (iter.hasNext()) { + Entry entry = iter.next(); + String key = asString(entry.getKey()); + if (!key.startsWith(keyPrefix)) { + break; + } + ApplicationId appId = + ConverterUtils.toApplicationId(key.substring(keyPrefix.length())); + state.finishedApplications.add(appId); + } + } catch (DBException e) { + throw new IOException(e); + } finally { + if (iter != null) { + iter.close(); + } + } + + return state; + } + + @Override + public void storeApplication(ApplicationId appId, + ContainerManagerApplicationProto p) throws IOException { + String key = APPLICATIONS_KEY_PREFIX + appId; + try { + db.put(bytes(key), p.toByteArray()); + } catch (DBException e) { + throw new IOException(e); + } + } + + @Override + public void storeFinishedApplication(ApplicationId appId) + throws IOException { + String key = FINISHED_APPS_KEY_PREFIX + appId; + try { + db.put(bytes(key), new byte[0]); + } catch (DBException e) { + throw new IOException(e); + } + } + + @Override + public void removeApplication(ApplicationId appId) + throws IOException { + try { + WriteBatch batch = db.createWriteBatch(); + try { + String key = APPLICATIONS_KEY_PREFIX + appId; + batch.delete(bytes(key)); + key = FINISHED_APPS_KEY_PREFIX + appId; + batch.delete(bytes(key)); + db.write(batch); + } finally { + batch.close(); + } + } catch (DBException e) { + throw new IOException(e); + } + } + + @Override public RecoveredLocalizationState loadLocalizationState() throws IOException { @@ -617,14 +709,14 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { } - NMDBSchemaVersion loadVersion() throws IOException { + Version loadVersion() throws IOException { byte[] data = db.get(bytes(DB_SCHEMA_VERSION_KEY)); // if version is not stored previously, treat it as 1.0. if (data == null || data.length == 0) { - return NMDBSchemaVersion.newInstance(1, 0); + return Version.newInstance(1, 0); } - NMDBSchemaVersion version = - new NMDBSchemaVersionPBImpl(NMDBSchemaVersionProto.parseFrom(data)); + Version version = + new VersionPBImpl(VersionProto.parseFrom(data)); return version; } @@ -634,14 +726,14 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { // Only used for test @VisibleForTesting - void storeVersion(NMDBSchemaVersion state) throws IOException { + void storeVersion(Version state) throws IOException { dbStoreVersion(state); } - private void dbStoreVersion(NMDBSchemaVersion state) throws IOException { + private void dbStoreVersion(Version state) throws IOException { String key = DB_SCHEMA_VERSION_KEY; byte[] data = - ((NMDBSchemaVersionPBImpl) state).getProto().toByteArray(); + ((VersionPBImpl) state).getProto().toByteArray(); try { db.put(bytes(key), data); } catch (DBException e) { @@ -649,7 +741,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { } } - NMDBSchemaVersion getCurrentVersion() { + Version getCurrentVersion() { return CURRENT_VERSION_INFO; } @@ -664,9 +756,9 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { * upgrade NM state or remove incompatible old state. */ private void checkVersion() throws IOException { - NMDBSchemaVersion loadedVersion = loadVersion(); + Version loadedVersion = loadVersion(); LOG.info("Loaded NM state version info " + loadedVersion); - if (loadedVersion != null && loadedVersion.equals(getCurrentVersion())) { + if (loadedVersion.equals(getCurrentVersion())) { return; } if (loadedVersion.isCompatibleTo(getCurrentVersion())) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java index 89205b1f637..3bb1e2189fc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java @@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; +import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; import org.apache.hadoop.yarn.server.api.records.MasterKey; @@ -42,6 +43,25 @@ public class NMNullStateStoreService extends NMStateStoreService { return false; } + @Override + public RecoveredApplicationsState loadApplicationsState() throws IOException { + throw new UnsupportedOperationException( + "Recovery not supported by this state store"); + } + + @Override + public void storeApplication(ApplicationId appId, + ContainerManagerApplicationProto p) throws IOException { + } + + @Override + public void storeFinishedApplication(ApplicationId appId) { + } + + @Override + public void removeApplication(ApplicationId appId) throws IOException { + } + @Override public RecoveredLocalizationState loadLocalizationState() throws IOException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java index 87c438b59bd..f0988e36172 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java @@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; +import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; import org.apache.hadoop.yarn.server.api.records.MasterKey; @@ -45,6 +46,19 @@ public abstract class NMStateStoreService extends AbstractService { super(name); } + public static class RecoveredApplicationsState { + List applications; + List finishedApplications; + + public List getApplications() { + return applications; + } + + public List getFinishedApplications() { + return finishedApplications; + } + } + public static class LocalResourceTrackerState { List localizedResources = new ArrayList(); @@ -162,6 +176,19 @@ public abstract class NMStateStoreService extends AbstractService { } + public abstract RecoveredApplicationsState loadApplicationsState() + throws IOException; + + public abstract void storeApplication(ApplicationId appId, + ContainerManagerApplicationProto p) throws IOException; + + public abstract void storeFinishedApplication(ApplicationId appId) + throws IOException; + + public abstract void removeApplication(ApplicationId appId) + throws IOException; + + /** * Load the state of localized resources * @return recovered localized resource state diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/records/impl/pb/NMDBSchemaVersionPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/records/impl/pb/NMDBSchemaVersionPBImpl.java deleted file mode 100644 index f42c1bee331..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/records/impl/pb/NMDBSchemaVersionPBImpl.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.nodemanager.recovery.records.impl.pb; - -import org.apache.hadoop.classification.InterfaceAudience.Private; -import org.apache.hadoop.classification.InterfaceStability.Evolving; -import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.NMDBSchemaVersionProto; -import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.NMDBSchemaVersionProtoOrBuilder; - -import org.apache.hadoop.yarn.server.nodemanager.recovery.records.NMDBSchemaVersion; - -@Private -@Evolving -public class NMDBSchemaVersionPBImpl extends NMDBSchemaVersion { - - NMDBSchemaVersionProto proto = NMDBSchemaVersionProto.getDefaultInstance(); - NMDBSchemaVersionProto.Builder builder = null; - boolean viaProto = false; - - public NMDBSchemaVersionPBImpl() { - builder = NMDBSchemaVersionProto.newBuilder(); - } - - public NMDBSchemaVersionPBImpl(NMDBSchemaVersionProto proto) { - this.proto = proto; - viaProto = true; - } - - public NMDBSchemaVersionProto getProto() { - proto = viaProto ? proto : builder.build(); - viaProto = true; - return proto; - } - - private void maybeInitBuilder() { - if (viaProto || builder == null) { - builder = NMDBSchemaVersionProto.newBuilder(proto); - } - viaProto = false; - } - - @Override - public int getMajorVersion() { - NMDBSchemaVersionProtoOrBuilder p = viaProto ? proto : builder; - return p.getMajorVersion(); - } - - @Override - public void setMajorVersion(int majorVersion) { - maybeInitBuilder(); - builder.setMajorVersion(majorVersion); - } - - @Override - public int getMinorVersion() { - NMDBSchemaVersionProtoOrBuilder p = viaProto ? proto : builder; - return p.getMinorVersion(); - } - - @Override - public void setMinorVersion(int minorVersion) { - maybeInitBuilder(); - builder.setMinorVersion(minorVersion); - } - -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java index 2f78ec4cb27..ca2f239e223 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java @@ -55,7 +55,9 @@ public class WebServer extends AbstractService { @Override protected void serviceStart() throws Exception { - String bindAddress = WebAppUtils.getNMWebAppURLWithoutScheme(getConfig()); + String bindAddress = WebAppUtils.getWebAppBindURL(getConfig(), + YarnConfiguration.NM_BIND_HOST, + WebAppUtils.getNMWebAppURLWithoutScheme(getConfig())); LOG.info("Instantiating NMWebApp at " + bindAddress); try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto index a07e7ad6b2d..e6f39f6c5f8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto @@ -24,6 +24,13 @@ package hadoop.yarn; import "yarn_protos.proto"; +message ContainerManagerApplicationProto { + optional ApplicationIdProto id = 1; + optional string user = 2; + optional bytes credentials = 3; + repeated ApplicationACLMapProto acls = 4; +} + message DeletionServiceDeleteTaskProto { optional int32 id = 1; optional string user = 2; @@ -39,8 +46,3 @@ message LocalizedResourceProto { optional int64 size = 3; } -message NMDBSchemaVersionProto { - optional int32 majorVersion = 1; - optional int32 minorVersion = 2; -} - diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 772bc05cb9f..fecc837cfab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -82,6 +82,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Ap import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; @@ -91,8 +93,6 @@ import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; -import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; @SuppressWarnings("rawtypes") public class TestNodeStatusUpdater { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java new file mode 100644 index 00000000000..00eb2fe288b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java @@ -0,0 +1,323 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; + +import java.nio.ByteBuffer; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; +import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; +import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAccessType; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.security.NMTokenIdentifier; +import org.apache.hadoop.yarn.server.api.records.MasterKey; +import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; +import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent; +import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; +import org.apache.hadoop.yarn.server.nodemanager.Context; +import org.apache.hadoop.yarn.server.nodemanager.DeletionService; +import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext; +import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncher; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEvent; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEvent; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.LogHandler; +import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; +import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; +import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM; +import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; +import org.junit.Test; + +public class TestContainerManagerRecovery { + + private NodeManagerMetrics metrics = NodeManagerMetrics.create(); + + @Test + public void testApplicationRecovery() throws Exception { + YarnConfiguration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true); + conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); + conf.set(YarnConfiguration.YARN_ADMIN_ACL, "yarn_admin_user"); + NMStateStoreService stateStore = new NMMemoryStateStoreService(); + stateStore.init(conf); + stateStore.start(); + Context context = new NMContext(new NMContainerTokenSecretManager( + conf), new NMTokenSecretManagerInNM(), null, + new ApplicationACLsManager(conf), stateStore); + ContainerManagerImpl cm = createContainerManager(context); + cm.init(conf); + cm.start(); + + // simulate registration with RM + MasterKey masterKey = new MasterKeyPBImpl(); + masterKey.setKeyId(123); + masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123) + .byteValue() })); + context.getContainerTokenSecretManager().setMasterKey(masterKey); + context.getNMTokenSecretManager().setMasterKey(masterKey); + + // add an application by starting a container + String appUser = "app_user1"; + String modUser = "modify_user1"; + String viewUser = "view_user1"; + String enemyUser = "enemy_user"; + ApplicationId appId = ApplicationId.newInstance(0, 1); + ApplicationAttemptId attemptId = + ApplicationAttemptId.newInstance(appId, 1); + ContainerId cid = ContainerId.newInstance(attemptId, 1); + Map localResources = Collections.emptyMap(); + Map containerEnv = Collections.emptyMap(); + List containerCmds = Collections.emptyList(); + Map serviceData = Collections.emptyMap(); + Credentials containerCreds = new Credentials(); + DataOutputBuffer dob = new DataOutputBuffer(); + containerCreds.writeTokenStorageToStream(dob); + ByteBuffer containerTokens = ByteBuffer.wrap(dob.getData(), 0, + dob.getLength()); + Map acls = + new HashMap(); + acls.put(ApplicationAccessType.MODIFY_APP, modUser); + acls.put(ApplicationAccessType.VIEW_APP, viewUser); + ContainerLaunchContext clc = ContainerLaunchContext.newInstance( + localResources, containerEnv, containerCmds, serviceData, + containerTokens, acls); + StartContainersResponse startResponse = startContainer(context, cm, cid, + clc); + assertTrue(startResponse.getFailedRequests().isEmpty()); + assertEquals(1, context.getApplications().size()); + Application app = context.getApplications().get(appId); + assertNotNull(app); + waitForAppState(app, ApplicationState.INITING); + assertTrue(context.getApplicationACLsManager().checkAccess( + UserGroupInformation.createRemoteUser(modUser), + ApplicationAccessType.MODIFY_APP, appUser, appId)); + assertFalse(context.getApplicationACLsManager().checkAccess( + UserGroupInformation.createRemoteUser(viewUser), + ApplicationAccessType.MODIFY_APP, appUser, appId)); + assertTrue(context.getApplicationACLsManager().checkAccess( + UserGroupInformation.createRemoteUser(viewUser), + ApplicationAccessType.VIEW_APP, appUser, appId)); + assertFalse(context.getApplicationACLsManager().checkAccess( + UserGroupInformation.createRemoteUser(enemyUser), + ApplicationAccessType.VIEW_APP, appUser, appId)); + + // reset container manager and verify app recovered with proper acls + cm.stop(); + context = new NMContext(new NMContainerTokenSecretManager( + conf), new NMTokenSecretManagerInNM(), null, + new ApplicationACLsManager(conf), stateStore); + cm = createContainerManager(context); + cm.init(conf); + cm.start(); + assertEquals(1, context.getApplications().size()); + app = context.getApplications().get(appId); + assertNotNull(app); + waitForAppState(app, ApplicationState.INITING); + assertTrue(context.getApplicationACLsManager().checkAccess( + UserGroupInformation.createRemoteUser(modUser), + ApplicationAccessType.MODIFY_APP, appUser, appId)); + assertFalse(context.getApplicationACLsManager().checkAccess( + UserGroupInformation.createRemoteUser(viewUser), + ApplicationAccessType.MODIFY_APP, appUser, appId)); + assertTrue(context.getApplicationACLsManager().checkAccess( + UserGroupInformation.createRemoteUser(viewUser), + ApplicationAccessType.VIEW_APP, appUser, appId)); + assertFalse(context.getApplicationACLsManager().checkAccess( + UserGroupInformation.createRemoteUser(enemyUser), + ApplicationAccessType.VIEW_APP, appUser, appId)); + + // simulate application completion + List finishedApps = new ArrayList(); + finishedApps.add(appId); + cm.handle(new CMgrCompletedAppsEvent(finishedApps, + CMgrCompletedAppsEvent.Reason.BY_RESOURCEMANAGER)); + waitForAppState(app, ApplicationState.APPLICATION_RESOURCES_CLEANINGUP); + + // restart and verify app is marked for finishing + cm.stop(); + context = new NMContext(new NMContainerTokenSecretManager( + conf), new NMTokenSecretManagerInNM(), null, + new ApplicationACLsManager(conf), stateStore); + cm = createContainerManager(context); + cm.init(conf); + cm.start(); + assertEquals(1, context.getApplications().size()); + app = context.getApplications().get(appId); + assertNotNull(app); + waitForAppState(app, ApplicationState.APPLICATION_RESOURCES_CLEANINGUP); + assertTrue(context.getApplicationACLsManager().checkAccess( + UserGroupInformation.createRemoteUser(modUser), + ApplicationAccessType.MODIFY_APP, appUser, appId)); + assertFalse(context.getApplicationACLsManager().checkAccess( + UserGroupInformation.createRemoteUser(viewUser), + ApplicationAccessType.MODIFY_APP, appUser, appId)); + assertTrue(context.getApplicationACLsManager().checkAccess( + UserGroupInformation.createRemoteUser(viewUser), + ApplicationAccessType.VIEW_APP, appUser, appId)); + assertFalse(context.getApplicationACLsManager().checkAccess( + UserGroupInformation.createRemoteUser(enemyUser), + ApplicationAccessType.VIEW_APP, appUser, appId)); + + // simulate log aggregation completion + app.handle(new ApplicationEvent(app.getAppId(), + ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP)); + assertEquals(app.getApplicationState(), ApplicationState.FINISHED); + app.handle(new ApplicationEvent(app.getAppId(), + ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)); + + // restart and verify app is no longer present after recovery + cm.stop(); + context = new NMContext(new NMContainerTokenSecretManager( + conf), new NMTokenSecretManagerInNM(), null, + new ApplicationACLsManager(conf), stateStore); + cm = createContainerManager(context); + cm.init(conf); + cm.start(); + assertTrue(context.getApplications().isEmpty()); + cm.stop(); + } + + private StartContainersResponse startContainer(Context context, + final ContainerManagerImpl cm, ContainerId cid, + ContainerLaunchContext clc) throws Exception { + UserGroupInformation user = UserGroupInformation.createRemoteUser( + cid.getApplicationAttemptId().toString()); + StartContainerRequest scReq = StartContainerRequest.newInstance( + clc, TestContainerManager.createContainerToken(cid, 0, + context.getNodeId(), user.getShortUserName(), + context.getContainerTokenSecretManager())); + final List scReqList = + new ArrayList(); + scReqList.add(scReq); + NMTokenIdentifier nmToken = new NMTokenIdentifier( + cid.getApplicationAttemptId(), context.getNodeId(), + user.getShortUserName(), + context.getNMTokenSecretManager().getCurrentKey().getKeyId()); + user.addTokenIdentifier(nmToken); + return user.doAs(new PrivilegedExceptionAction() { + @Override + public StartContainersResponse run() throws Exception { + return cm.startContainers( + StartContainersRequest.newInstance(scReqList)); + } + }); + } + + private void waitForAppState(Application app, ApplicationState state) + throws Exception { + final int msecPerSleep = 10; + int msecLeft = 5000; + while (app.getApplicationState() != state && msecLeft > 0) { + Thread.sleep(msecPerSleep); + msecLeft -= msecPerSleep; + } + assertEquals(state, app.getApplicationState()); + } + + private ContainerManagerImpl createContainerManager(Context context) { + final LogHandler logHandler = mock(LogHandler.class); + final ResourceLocalizationService rsrcSrv = + new ResourceLocalizationService(null, null, null, null, + context.getNMStateStore()) { + @Override + public void serviceInit(Configuration conf) throws Exception { + } + + @Override + public void serviceStart() throws Exception { + // do nothing + } + + @Override + public void serviceStop() throws Exception { + // do nothing + } + + @Override + public void handle(LocalizationEvent event) { + // do nothing + } + }; + + final ContainersLauncher launcher = new ContainersLauncher(context, null, + null, null, null) { + @Override + public void handle(ContainersLauncherEvent event) { + // do nothing + } + }; + + return new ContainerManagerImpl(context, + mock(ContainerExecutor.class), mock(DeletionService.class), + mock(NodeStatusUpdater.class), metrics, + context.getApplicationACLsManager(), null) { + @Override + protected LogHandler createLogHandler(Configuration conf, + Context context, DeletionService deletionService) { + return logHandler; + } + + @Override + protected ResourceLocalizationService createResourceLocalizationService( + ContainerExecutor exec, DeletionService deletionContext) { + return rsrcSrv; + } + + @Override + protected ContainersLauncher createContainersLauncher( + Context context, ContainerExecutor exec) { + return launcher; + } + + @Override + public void setBlockNewContainerRequests( + boolean blockNewContainerRequests) { + // do nothing + } + }; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java index c43471fa2ee..e0239928221 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java @@ -88,6 +88,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.eve import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorEventType; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.junit.Assert; import org.junit.Test; @@ -722,6 +723,8 @@ public class TestContainer { Context context = mock(Context.class); when(context.getApplications()).thenReturn( new ConcurrentHashMap()); + NMNullStateStoreService stateStore = new NMNullStateStoreService(); + when(context.getNMStateStore()).thenReturn(stateStore); ContainerExecutor executor = mock(ContainerExecutor.class); launcher = new ContainersLauncher(context, dispatcher, executor, null, null); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java index fef2b12221f..5b5e1ef5bb8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java @@ -21,7 +21,9 @@ package org.apache.hadoop.yarn.server.nodemanager.recovery; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -29,12 +31,15 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; +import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; public class NMMemoryStateStoreService extends NMStateStoreService { + private Map apps; + private Set finishedApps; private Map trackerStates; private Map deleteTasks; private RecoveredNMTokensState nmTokenState; @@ -44,6 +49,58 @@ public class NMMemoryStateStoreService extends NMStateStoreService { super(NMMemoryStateStoreService.class.getName()); } + @Override + protected void initStorage(Configuration conf) { + apps = new HashMap(); + finishedApps = new HashSet(); + nmTokenState = new RecoveredNMTokensState(); + nmTokenState.applicationMasterKeys = + new HashMap(); + containerTokenState = new RecoveredContainerTokensState(); + containerTokenState.activeTokens = new HashMap(); + trackerStates = new HashMap(); + deleteTasks = new HashMap(); + } + + @Override + protected void startStorage() { + } + + @Override + protected void closeStorage() { + } + + + @Override + public RecoveredApplicationsState loadApplicationsState() + throws IOException { + RecoveredApplicationsState state = new RecoveredApplicationsState(); + state.applications = new ArrayList( + apps.values()); + state.finishedApplications = new ArrayList(finishedApps); + return state; + } + + @Override + public void storeApplication(ApplicationId appId, + ContainerManagerApplicationProto proto) throws IOException { + ContainerManagerApplicationProto protoCopy = + ContainerManagerApplicationProto.parseFrom(proto.toByteString()); + apps.put(appId, protoCopy); + } + + @Override + public void storeFinishedApplication(ApplicationId appId) { + finishedApps.add(appId); + } + + @Override + public void removeApplication(ApplicationId appId) throws IOException { + apps.remove(appId); + finishedApps.remove(appId); + } + + private LocalResourceTrackerState loadTrackerState(TrackerState ts) { LocalResourceTrackerState result = new LocalResourceTrackerState(); result.localizedResources.addAll(ts.localizedResources.values()); @@ -117,25 +174,6 @@ public class NMMemoryStateStoreService extends NMStateStoreService { } } - @Override - protected void initStorage(Configuration conf) { - nmTokenState = new RecoveredNMTokensState(); - nmTokenState.applicationMasterKeys = - new HashMap(); - containerTokenState = new RecoveredContainerTokensState(); - containerTokenState.activeTokens = new HashMap(); - trackerStates = new HashMap(); - deleteTasks = new HashMap(); - } - - @Override - protected void startStorage() { - } - - @Override - protected void closeStorage() { - } - @Override public RecoveredDeletionServiceState loadDeletionServiceState() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java index 833a062d3b8..84a69069a30 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java @@ -37,19 +37,22 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; +import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.LocalResourceTrackerState; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredApplicationsState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerTokensState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredDeletionServiceState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredLocalizationState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredNMTokensState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredUserResources; -import org.apache.hadoop.yarn.server.nodemanager.recovery.records.NMDBSchemaVersion; +import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager; import org.apache.hadoop.yarn.server.security.BaseNMTokenSecretManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; @@ -114,12 +117,12 @@ public class TestNMLeveldbStateStoreService { @Test public void testCheckVersion() throws IOException { // default version - NMDBSchemaVersion defaultVersion = stateStore.getCurrentVersion(); + Version defaultVersion = stateStore.getCurrentVersion(); Assert.assertEquals(defaultVersion, stateStore.loadVersion()); // compatible version - NMDBSchemaVersion compatibleVersion = - NMDBSchemaVersion.newInstance(defaultVersion.getMajorVersion(), + Version compatibleVersion = + Version.newInstance(defaultVersion.getMajorVersion(), defaultVersion.getMinorVersion() + 2); stateStore.storeVersion(compatibleVersion); Assert.assertEquals(compatibleVersion, stateStore.loadVersion()); @@ -128,8 +131,8 @@ public class TestNMLeveldbStateStoreService { Assert.assertEquals(defaultVersion, stateStore.loadVersion()); // incompatible version - NMDBSchemaVersion incompatibleVersion = - NMDBSchemaVersion.newInstance(defaultVersion.getMajorVersion() + 1, + Version incompatibleVersion = + Version.newInstance(defaultVersion.getMajorVersion() + 1, defaultVersion.getMinorVersion()); stateStore.storeVersion(incompatibleVersion); try { @@ -141,6 +144,54 @@ public class TestNMLeveldbStateStoreService { } } + @Test + public void testApplicationStorage() throws IOException { + // test empty when no state + RecoveredApplicationsState state = stateStore.loadApplicationsState(); + assertTrue(state.getApplications().isEmpty()); + assertTrue(state.getFinishedApplications().isEmpty()); + + // store an application and verify recovered + final ApplicationId appId1 = ApplicationId.newInstance(1234, 1); + ContainerManagerApplicationProto.Builder builder = + ContainerManagerApplicationProto.newBuilder(); + builder.setId(((ApplicationIdPBImpl) appId1).getProto()); + builder.setUser("user1"); + ContainerManagerApplicationProto appProto1 = builder.build(); + stateStore.storeApplication(appId1, appProto1); + restartStateStore(); + state = stateStore.loadApplicationsState(); + assertEquals(1, state.getApplications().size()); + assertEquals(appProto1, state.getApplications().get(0)); + assertTrue(state.getFinishedApplications().isEmpty()); + + // finish an application and add a new one + stateStore.storeFinishedApplication(appId1); + final ApplicationId appId2 = ApplicationId.newInstance(1234, 2); + builder = ContainerManagerApplicationProto.newBuilder(); + builder.setId(((ApplicationIdPBImpl) appId2).getProto()); + builder.setUser("user2"); + ContainerManagerApplicationProto appProto2 = builder.build(); + stateStore.storeApplication(appId2, appProto2); + restartStateStore(); + state = stateStore.loadApplicationsState(); + assertEquals(2, state.getApplications().size()); + assertTrue(state.getApplications().contains(appProto1)); + assertTrue(state.getApplications().contains(appProto2)); + assertEquals(1, state.getFinishedApplications().size()); + assertEquals(appId1, state.getFinishedApplications().get(0)); + + // test removing an application + stateStore.storeFinishedApplication(appId2); + stateStore.removeApplication(appId2); + restartStateStore(); + state = stateStore.loadApplicationsState(); + assertEquals(1, state.getApplications().size()); + assertEquals(appProto1, state.getApplications().get(0)); + assertEquals(1, state.getFinishedApplications().size()); + assertEquals(appId1, state.getFinishedApplications().get(0)); + } + @Test public void testStartResourceLocalization() throws IOException { String user = "somebody"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java index 1d2f376d325..c47f49e207e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java @@ -90,7 +90,9 @@ public class AdminService extends CompositeService implements private EmbeddedElectorService embeddedElector; private Server server; - private InetSocketAddress masterServiceAddress; + + // Address to use for binding. May be a wildcard address. + private InetSocketAddress masterServiceBindAddress; private AccessControlList adminAcl; private final RecordFactory recordFactory = @@ -114,10 +116,12 @@ public class AdminService extends CompositeService implements } } - masterServiceAddress = conf.getSocketAddr( + masterServiceBindAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, YarnConfiguration.RM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_RM_ADMIN_PORT); + adminAcl = new AccessControlList(conf.get( YarnConfiguration.YARN_ADMIN_ACL, YarnConfiguration.DEFAULT_YARN_ADMIN_ACL)); @@ -141,7 +145,7 @@ public class AdminService extends CompositeService implements Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); this.server = (Server) rpc.getServer( - ResourceManagerAdministrationProtocol.class, this, masterServiceAddress, + ResourceManagerAdministrationProtocol.class, this, masterServiceBindAddress, conf, null, conf.getInt(YarnConfiguration.RM_ADMIN_CLIENT_THREAD_COUNT, YarnConfiguration.DEFAULT_RM_ADMIN_CLIENT_THREAD_COUNT)); @@ -170,8 +174,10 @@ public class AdminService extends CompositeService implements } this.server.start(); - conf.updateConnectAddr(YarnConfiguration.RM_ADMIN_ADDRESS, - server.getListenerAddress()); + conf.updateConnectAddr(YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_ADMIN_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS, + server.getListenerAddress()); } protected void stopServer() throws Exception { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java index e60add44bb1..eda4c7b658e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java @@ -127,6 +127,7 @@ public class ApplicationMasterService extends AbstractService implements YarnRPC rpc = YarnRPC.create(conf); InetSocketAddress masterServiceAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, YarnConfiguration.RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT); @@ -159,7 +160,9 @@ public class ApplicationMasterService extends AbstractService implements this.server.start(); this.bindAddress = - conf.updateConnectAddr(YarnConfiguration.RM_SCHEDULER_ADDRESS, + conf.updateConnectAddr(YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_SCHEDULER_ADDRESS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, server.getListenerAddress()); super.serviceStart(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index 974376091b0..71f873c26a4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -199,7 +199,9 @@ public class ClientRMService extends AbstractService implements } this.server.start(); - clientBindAddress = conf.updateConnectAddr(YarnConfiguration.RM_ADDRESS, + clientBindAddress = conf.updateConnectAddr(YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADDRESS, server.getListenerAddress()); super.serviceStart(); } @@ -213,7 +215,9 @@ public class ClientRMService extends AbstractService implements } InetSocketAddress getBindAddress(Configuration conf) { - return conf.getSocketAddr(YarnConfiguration.RM_ADDRESS, + return conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 4b5d94875ad..40e346c680a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -155,7 +155,8 @@ public class ResourceManager extends CompositeService implements Recoverable { private AppReportFetcher fetcher = null; protected ResourceTrackerService resourceTracker; - private String webAppAddress; + @VisibleForTesting + protected String webAppAddress; private ConfigurationProvider configurationProvider = null; /** End of Active services */ @@ -230,7 +231,9 @@ public class ResourceManager extends CompositeService implements Recoverable { } createAndInitActiveServices(); - webAppAddress = WebAppUtils.getRMWebAppURLWithoutScheme(this.conf); + webAppAddress = WebAppUtils.getWebAppBindURL(this.conf, + YarnConfiguration.RM_BIND_HOST, + WebAppUtils.getRMWebAppURLWithoutScheme(this.conf)); this.rmLoginUGI = UserGroupInformation.getCurrentUser(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index f2a83763bc8..b532dd56309 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -121,6 +121,7 @@ public class ResourceTrackerService extends AbstractService implements @Override protected void serviceInit(Configuration conf) throws Exception { resourceTrackerAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); @@ -175,9 +176,11 @@ public class ResourceTrackerService extends AbstractService implements } refreshServiceAcls(conf, RMPolicyProvider.getInstance()); } - + this.server.start(); - conf.updateConnectAddr(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + conf.updateConnectAddr(YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, server.getListenerAddress()); } @@ -308,7 +311,8 @@ public class ResourceTrackerService extends AbstractService implements LOG.info("Reconnect from the node at: " + host); this.nmLivelinessMonitor.unregister(nodeId); this.rmContext.getDispatcher().getEventHandler().handle( - new RMNodeReconnectEvent(nodeId, rmNode)); + new RMNodeReconnectEvent(nodeId, rmNode, + request.getRunningApplications())); } // On every node manager register we will be clearing NMToken keys if // present for any running application. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java index 243c7a19912..2f8a944f666 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java @@ -44,22 +44,22 @@ import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.AMRMTokenSecretManagerStateProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.EpochProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationAttemptStateDataProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationStateDataProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMStateVersionProto; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.records.Version; +import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.Epoch; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.AMRMTokenSecretManagerStatePBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.EpochPBImpl; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.RMStateVersionPBImpl; import org.apache.hadoop.yarn.util.ConverterUtils; import com.google.common.annotations.VisibleForTesting; @@ -77,7 +77,7 @@ public class FileSystemRMStateStore extends RMStateStore { public static final Log LOG = LogFactory.getLog(FileSystemRMStateStore.class); protected static final String ROOT_DIR_NAME = "FSRMStateRoot"; - protected static final RMStateVersion CURRENT_VERSION_INFO = RMStateVersion + protected static final Version CURRENT_VERSION_INFO = Version .newInstance(1, 1); protected static final String AMRMTOKEN_SECRET_MANAGER_NODE = "AMRMTokenSecretManagerNode"; @@ -130,18 +130,18 @@ public class FileSystemRMStateStore extends RMStateStore { } @Override - protected RMStateVersion getCurrentVersion() { + protected Version getCurrentVersion() { return CURRENT_VERSION_INFO; } @Override - protected synchronized RMStateVersion loadVersion() throws Exception { + protected synchronized Version loadVersion() throws Exception { Path versionNodePath = getNodePath(rootDirPath, VERSION_NODE); if (fs.exists(versionNodePath)) { FileStatus status = fs.getFileStatus(versionNodePath); byte[] data = readFile(versionNodePath, status.getLen()); - RMStateVersion version = - new RMStateVersionPBImpl(RMStateVersionProto.parseFrom(data)); + Version version = + new VersionPBImpl(VersionProto.parseFrom(data)); return version; } return null; @@ -151,7 +151,7 @@ public class FileSystemRMStateStore extends RMStateStore { protected synchronized void storeVersion() throws Exception { Path versionNodePath = getNodePath(rootDirPath, VERSION_NODE); byte[] data = - ((RMStateVersionPBImpl) CURRENT_VERSION_INFO).getProto().toByteArray(); + ((VersionPBImpl) CURRENT_VERSION_INFO).getProto().toByteArray(); if (fs.exists(versionNodePath)) { updateFile(versionNodePath, data); } else { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java index 369f89a545e..f56517cd828 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java @@ -32,10 +32,10 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import com.google.common.annotations.VisibleForTesting; @@ -259,7 +259,7 @@ public class MemoryRMStateStore extends RMStateStore { } @Override - protected RMStateVersion loadVersion() throws Exception { + protected Version loadVersion() throws Exception { return null; } @@ -268,7 +268,7 @@ public class MemoryRMStateStore extends RMStateStore { } @Override - protected RMStateVersion getCurrentVersion() { + protected Version getCurrentVersion() { return null; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java index ea7087176c9..e910c19629e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java @@ -25,10 +25,10 @@ import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; @Unstable public class NullRMStateStore extends RMStateStore { @@ -123,7 +123,7 @@ public class NullRMStateStore extends RMStateStore { } @Override - protected RMStateVersion loadVersion() throws Exception { + protected Version loadVersion() throws Exception { // Do nothing return null; } @@ -134,7 +134,7 @@ public class NullRMStateStore extends RMStateStore { } @Override - protected RMStateVersion getCurrentVersion() { + protected Version getCurrentVersion() { // Do nothing return null; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java index e2c4e7e47fa..da08d80466d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java @@ -47,12 +47,12 @@ import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.resourcemanager.RMFatalEvent; import org.apache.hadoop.yarn.server.resourcemanager.RMFatalEventType; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppNewSavedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; @@ -493,14 +493,14 @@ public abstract class RMStateStore extends AbstractService { * upgrade RM state. */ public void checkVersion() throws Exception { - RMStateVersion loadedVersion = loadVersion(); + Version loadedVersion = loadVersion(); LOG.info("Loaded RM state version info " + loadedVersion); if (loadedVersion != null && loadedVersion.equals(getCurrentVersion())) { return; } // if there is no version info, treat it as 1.0; if (loadedVersion == null) { - loadedVersion = RMStateVersion.newInstance(1, 0); + loadedVersion = Version.newInstance(1, 0); } if (loadedVersion.isCompatibleTo(getCurrentVersion())) { LOG.info("Storing RM state version info " + getCurrentVersion()); @@ -516,7 +516,7 @@ public abstract class RMStateStore extends AbstractService { * Derived class use this method to load the version information from state * store. */ - protected abstract RMStateVersion loadVersion() throws Exception; + protected abstract Version loadVersion() throws Exception; /** * Derived class use this method to store the version information. @@ -526,7 +526,7 @@ public abstract class RMStateStore extends AbstractService { /** * Get the current version of the underlying state store. */ - protected abstract RMStateVersion getCurrentVersion(); + protected abstract Version getCurrentVersion(); /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java index 5644ad9e34a..bb379c5b8b9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java @@ -44,23 +44,23 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.AMRMTokenSecretManagerStateProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationAttemptStateDataProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationStateDataProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMStateVersionProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.EpochProto; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; +import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.resourcemanager.RMZKUtils; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.Epoch; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.AMRMTokenSecretManagerStatePBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.EpochPBImpl; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.RMStateVersionPBImpl; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; @@ -86,7 +86,7 @@ public class ZKRMStateStore extends RMStateStore { private final SecureRandom random = new SecureRandom(); protected static final String ROOT_ZNODE_NAME = "ZKRMStateRoot"; - protected static final RMStateVersion CURRENT_VERSION_INFO = RMStateVersion + protected static final Version CURRENT_VERSION_INFO = Version .newInstance(1, 1); private static final String RM_DELEGATION_TOKENS_ROOT_ZNODE_NAME = "RMDelegationTokensRoot"; @@ -377,7 +377,7 @@ public class ZKRMStateStore extends RMStateStore { } @Override - protected RMStateVersion getCurrentVersion() { + protected Version getCurrentVersion() { return CURRENT_VERSION_INFO; } @@ -385,7 +385,7 @@ public class ZKRMStateStore extends RMStateStore { protected synchronized void storeVersion() throws Exception { String versionNodePath = getNodePath(zkRootNodePath, VERSION_NODE); byte[] data = - ((RMStateVersionPBImpl) CURRENT_VERSION_INFO).getProto().toByteArray(); + ((VersionPBImpl) CURRENT_VERSION_INFO).getProto().toByteArray(); if (existsWithRetries(versionNodePath, true) != null) { setDataWithRetries(versionNodePath, data, -1); } else { @@ -394,13 +394,13 @@ public class ZKRMStateStore extends RMStateStore { } @Override - protected synchronized RMStateVersion loadVersion() throws Exception { + protected synchronized Version loadVersion() throws Exception { String versionNodePath = getNodePath(zkRootNodePath, VERSION_NODE); if (existsWithRetries(versionNodePath, true) != null) { byte[] data = getDataWithRetries(versionNodePath, true); - RMStateVersion version = - new RMStateVersionPBImpl(RMStateVersionProto.parseFrom(data)); + Version version = + new VersionPBImpl(VersionProto.parseFrom(data)); return version; } return null; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/RMStateVersion.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/RMStateVersion.java deleted file mode 100644 index cfee512b5d4..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/RMStateVersion.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.yarn.server.resourcemanager.recovery.records; - -import org.apache.hadoop.classification.InterfaceAudience.Private; -import org.apache.hadoop.classification.InterfaceStability.Unstable; -import org.apache.hadoop.yarn.util.Records; - -/** - * The version information of RM state. - */ -@Private -@Unstable -public abstract class RMStateVersion { - - public static RMStateVersion newInstance(int majorVersion, int minorVersion) { - RMStateVersion version = Records.newRecord(RMStateVersion.class); - version.setMajorVersion(majorVersion); - version.setMinorVersion(minorVersion); - return version; - } - - public abstract int getMajorVersion(); - - public abstract void setMajorVersion(int majorVersion); - - public abstract int getMinorVersion(); - - public abstract void setMinorVersion(int minorVersion); - - public String toString() { - return getMajorVersion() + "." + getMinorVersion(); - } - - public boolean isCompatibleTo(RMStateVersion version) { - return getMajorVersion() == version.getMajorVersion(); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + getMajorVersion(); - result = prime * result + getMinorVersion(); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - RMStateVersion other = (RMStateVersion) obj; - if (this.getMajorVersion() == other.getMajorVersion() - && this.getMinorVersion() == other.getMinorVersion()) { - return true; - } else { - return false; - } - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index efa1ee72808..7c7b7541b3e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -1191,6 +1191,9 @@ public class RMAppImpl implements RMApp, Recoverable { public static boolean isAppInFinalState(RMApp rmApp) { RMAppState appState = ((RMAppImpl) rmApp).getRecoveredFinalState(); + if (appState == null) { + appState = rmApp.getState(); + } return appState == RMAppState.FAILED || appState == RMAppState.FINISHED || appState == RMAppState.KILLED; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index e20adc5c4d4..cf81d727138 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -456,6 +456,24 @@ public class RMNodeImpl implements RMNode, EventHandler { } } + private static void handleRunningAppOnNode(RMNodeImpl rmNode, + RMContext context, ApplicationId appId, NodeId nodeId) { + RMApp app = context.getRMApps().get(appId); + + // if we failed getting app by appId, maybe something wrong happened, just + // add the app to the finishedApplications list so that the app can be + // cleaned up on the NM + if (null == app) { + LOG.warn("Cannot get RMApp by appId=" + appId + + ", just added it to finishedApplications list for cleanup"); + rmNode.finishedApplications.add(appId); + return; + } + + context.getDispatcher().getEventHandler() + .handle(new RMAppRunningOnNodeEvent(appId, nodeId)); + } + public static class AddNodeTransition implements SingleArcTransition { @@ -496,24 +514,6 @@ public class RMNodeImpl implements RMNode, EventHandler { new NodesListManagerEvent( NodesListManagerEventType.NODE_USABLE, rmNode)); } - - void handleRunningAppOnNode(RMNodeImpl rmNode, RMContext context, - ApplicationId appId, NodeId nodeId) { - RMApp app = context.getRMApps().get(appId); - - // if we failed getting app by appId, maybe something wrong happened, just - // add the app to the finishedApplications list so that the app can be - // cleaned up on the NM - if (null == app) { - LOG.warn("Cannot get RMApp by appId=" + appId - + ", just added it to finishedApplications list for cleanup"); - rmNode.finishedApplications.add(appId); - return; - } - - context.getDispatcher().getEventHandler() - .handle(new RMAppRunningOnNodeEvent(appId, nodeId)); - } } public static class ReconnectNodeTransition implements @@ -526,7 +526,8 @@ public class RMNodeImpl implements RMNode, EventHandler { rmNode.context.getDispatcher().getEventHandler().handle( new NodeRemovedSchedulerEvent(rmNode)); - RMNode newNode = ((RMNodeReconnectEvent)event).getReconnectedNode(); + RMNodeReconnectEvent reconnectEvent = (RMNodeReconnectEvent) event; + RMNode newNode = reconnectEvent.getReconnectedNode(); rmNode.nodeManagerVersion = newNode.getNodeManagerVersion(); if (rmNode.getTotalCapability().equals(newNode.getTotalCapability()) && rmNode.getHttpPort() == newNode.getHttpPort()) { @@ -551,6 +552,13 @@ public class RMNodeImpl implements RMNode, EventHandler { rmNode.context.getDispatcher().getEventHandler().handle( new RMNodeStartedEvent(newNode.getNodeID(), null, null)); } + + if (null != reconnectEvent.getRunningApplications()) { + for (ApplicationId appId : reconnectEvent.getRunningApplications()) { + handleRunningAppOnNode(rmNode, rmNode.context, appId, rmNode.nodeId); + } + } + rmNode.context.getDispatcher().getEventHandler().handle( new NodesListManagerEvent( NodesListManagerEventType.NODE_USABLE, rmNode)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeReconnectEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeReconnectEvent.java index b1fa0ad8c0c..ebbac9ab156 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeReconnectEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeReconnectEvent.java @@ -18,17 +18,27 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmnode; +import java.util.List; + +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.NodeId; public class RMNodeReconnectEvent extends RMNodeEvent { private RMNode reconnectedNode; + private List runningApplications; - public RMNodeReconnectEvent(NodeId nodeId, RMNode newNode) { + public RMNodeReconnectEvent(NodeId nodeId, RMNode newNode, + List runningApps) { super(nodeId, RMNodeEventType.RECONNECTED); reconnectedNode = newNode; + runningApplications = runningApps; } public RMNode getReconnectedNode() { return reconnectedNode; } + + public List getRunningApplications() { + return runningApplications; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java index a127123a760..3c6f210b4b6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java @@ -360,7 +360,7 @@ public class AppSchedulingInfo { List resourceRequests) { // Update future requirements decrementOutstanding(offSwitchRequest); - // Update cloned RackLocal and OffRack requests for recovery + // Update cloned OffRack requests for recovery resourceRequests.add(cloneResourceRequest(offSwitchRequest)); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java index e88ebd24f3b..c07882da0ad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java @@ -449,6 +449,35 @@ public class TestApplicationCleanup { rm2.stop(); } + @Test (timeout = 60000) + public void testAppCleanupWhenNMReconnects() throws Exception { + conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1); + MemoryRMStateStore memStore = new MemoryRMStateStore(); + memStore.init(conf); + + // start RM + MockRM rm1 = new MockRM(conf, memStore); + rm1.start(); + MockNM nm1 = + new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService()); + nm1.registerNode(); + + // create app and launch the AM + RMApp app0 = rm1.submitApp(200); + MockAM am0 = launchAM(app0, rm1, nm1); + nm1.nodeHeartbeat(am0.getApplicationAttemptId(), 1, ContainerState.COMPLETE); + rm1.waitForState(app0.getApplicationId(), RMAppState.FAILED); + + // wait for application cleanup message received + waitForAppCleanupMessageRecved(nm1, app0.getApplicationId()); + + // reconnect NM with application still active + nm1.registerNode(Arrays.asList(app0.getApplicationId())); + waitForAppCleanupMessageRecved(nm1, app0.getApplicationId()); + + rm1.stop(); + } + public static void main(String[] args) throws Exception { TestApplicationCleanup t = new TestApplicationCleanup(); t.testAppCleanup(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java index 610023b73dc..b8290de5127 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java @@ -380,7 +380,19 @@ public class TestRMHA { } @Test - public void testHAWithRMHostName() { + public void testHAWithRMHostName() throws Exception { + innerTestHAWithRMHostName(false); + configuration.clear(); + setUp(); + innerTestHAWithRMHostName(true); + } + + public void innerTestHAWithRMHostName(boolean includeBindHost) { + //this is run two times, with and without a bind host configured + if (includeBindHost) { + configuration.set(YarnConfiguration.RM_BIND_HOST, "9.9.9.9"); + } + //test if both RM_HOSTBANE_{rm_id} and RM_RPCADDRESS_{rm_id} are set //We should only read rpc addresses from RM_RPCADDRESS_{rm_id} configuration configuration.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, @@ -400,6 +412,15 @@ public class TestRMHA { RM2_ADDRESS, conf.get(HAUtil.addSuffix(confKey, RM2_NODE_ID))); assertEquals("RPC address not set for " + confKey, RM3_ADDRESS, conf.get(HAUtil.addSuffix(confKey, RM3_NODE_ID))); + if (includeBindHost) { + assertEquals("Web address misconfigured WITH bind-host", + rm.webAppAddress.substring(0, 7), "9.9.9.9"); + } else { + //YarnConfiguration tries to figure out which rm host it's on by binding to it, + //which doesn't happen for any of these fake addresses, so we end up with 0.0.0.0 + assertEquals("Web address misconfigured WITHOUT bind-host", + rm.webAppAddress.substring(0, 7), "0.0.0.0"); + } } } catch (YarnRuntimeException e) { fail("Should not throw any exceptions."); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index 46693be36ca..aa2cfc2eba1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -520,7 +520,7 @@ public class TestRMNodeTransitions { int initialUnhealthy = cm.getUnhealthyNMs(); int initialDecommissioned = cm.getNumDecommisionedNMs(); int initialRebooted = cm.getNumRebootedNMs(); - node.handle(new RMNodeReconnectEvent(node.getNodeID(), node)); + node.handle(new RMNodeReconnectEvent(node.getNodeID(), node, null)); Assert.assertEquals("Active Nodes", initialActive, cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes", @@ -542,7 +542,8 @@ public class TestRMNodeTransitions { RMNodeImpl node = getRunningNode(nmVersion1); Assert.assertEquals(nmVersion1, node.getNodeManagerVersion()); RMNodeImpl reconnectingNode = getRunningNode(nmVersion2); - node.handle(new RMNodeReconnectEvent(node.getNodeID(), reconnectingNode)); + node.handle(new RMNodeReconnectEvent(node.getNodeID(), reconnectingNode, + null)); Assert.assertEquals(nmVersion2, node.getNodeManagerVersion()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java index a61f23f5a71..5d3e51a398e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java @@ -55,13 +55,13 @@ import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMDTSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; @@ -111,8 +111,8 @@ public class RMStateStoreTestBase extends ClientBaseWithFixes{ interface RMStateStoreHelper { RMStateStore getRMStateStore() throws Exception; boolean isFinalStateValid() throws Exception; - void writeVersion(RMStateVersion version) throws Exception; - RMStateVersion getCurrentVersion() throws Exception; + void writeVersion(Version version) throws Exception; + Version getCurrentVersion() throws Exception; boolean appExists(RMApp app) throws Exception; } @@ -477,13 +477,13 @@ public class RMStateStoreTestBase extends ClientBaseWithFixes{ store.setRMDispatcher(new TestDispatcher()); // default version - RMStateVersion defaultVersion = stateStoreHelper.getCurrentVersion(); + Version defaultVersion = stateStoreHelper.getCurrentVersion(); store.checkVersion(); Assert.assertEquals(defaultVersion, store.loadVersion()); // compatible version - RMStateVersion compatibleVersion = - RMStateVersion.newInstance(defaultVersion.getMajorVersion(), + Version compatibleVersion = + Version.newInstance(defaultVersion.getMajorVersion(), defaultVersion.getMinorVersion() + 2); stateStoreHelper.writeVersion(compatibleVersion); Assert.assertEquals(compatibleVersion, store.loadVersion()); @@ -492,8 +492,8 @@ public class RMStateStoreTestBase extends ClientBaseWithFixes{ Assert.assertEquals(defaultVersion, store.loadVersion()); // incompatible version - RMStateVersion incompatibleVersion = - RMStateVersion.newInstance(defaultVersion.getMajorVersion() + 2, + Version incompatibleVersion = + Version.newInstance(defaultVersion.getMajorVersion() + 2, defaultVersion.getMinorVersion()); stateStoreHelper.writeVersion(incompatibleVersion); try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java index f5b3e8a8a67..88e5393f14d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java @@ -36,9 +36,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; +import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.RMStateVersionPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.util.ConverterUtils; @@ -70,7 +70,7 @@ public class TestFSRMStateStore extends RMStateStoreTestBase { return new Path(new Path(workingDirPathURI, ROOT_DIR_NAME), VERSION_NODE); } - public RMStateVersion getCurrentVersion() { + public Version getCurrentVersion() { return CURRENT_VERSION_INFO; } @@ -111,13 +111,13 @@ public class TestFSRMStateStore extends RMStateStoreTestBase { } @Override - public void writeVersion(RMStateVersion version) throws Exception { - store.updateFile(store.getVersionNode(), ((RMStateVersionPBImpl) version) + public void writeVersion(Version version) throws Exception { + store.updateFile(store.getVersionNode(), ((VersionPBImpl) version) .getProto().toByteArray()); } @Override - public RMStateVersion getCurrentVersion() throws Exception { + public Version getCurrentVersion() throws Exception { return store.getCurrentVersion(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java index 1dee533ac05..3c7170adaeb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java @@ -32,9 +32,9 @@ import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.service.Service; import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.records.Version; +import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.RMStateVersionPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.data.Stat; @@ -69,7 +69,7 @@ public class TestZKRMStateStore extends RMStateStoreTestBase { return znodeWorkingPath + "/" + ROOT_ZNODE_NAME + "/" + VERSION_NODE; } - public RMStateVersion getCurrentVersion() { + public Version getCurrentVersion() { return CURRENT_VERSION_INFO; } @@ -96,13 +96,13 @@ public class TestZKRMStateStore extends RMStateStoreTestBase { } @Override - public void writeVersion(RMStateVersion version) throws Exception { - client.setData(store.getVersionNode(), ((RMStateVersionPBImpl) version) + public void writeVersion(Version version) throws Exception { + client.setData(store.getVersionNode(), ((VersionPBImpl) version) .getProto().toByteArray(), -1); } @Override - public RMStateVersion getCurrentVersion() throws Exception { + public Version getCurrentVersion() throws Exception { return store.getCurrentVersion(); }