diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 4e6dc469ebe..10bbc2f30ef 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -118,6 +118,9 @@ Trunk (Unreleased) HADOOP-10325. Improve jenkins javadoc warnings from test-patch.sh (cmccabe) + HADOOP-10342. Add a new method to UGI to use a Kerberos login subject to + build a new UGI. (Larry McCay via omalley) + BUG FIXES HADOOP-9451. Fault single-layer config if node group topology is enabled. @@ -298,6 +301,18 @@ Trunk (Unreleased) HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia) +Release 2.5.0 - UNRELEASED + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + OPTIMIZATIONS + + BUG FIXES + Release 2.4.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -342,6 +357,8 @@ Release 2.4.0 - UNRELEASED HADOOP-10249. LdapGroupsMapping should trim ldap password read from file. (Dilli Armugam via suresh) + HADOOP-10346. Deadlock while logging tokens (jlowe) + Release 2.3.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java index 87746bbe396..f5c814d7c1d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java @@ -279,7 +279,10 @@ public class CommonConfigurationKeysPublic { 60; // HTTP policies to be used in configuration + // Use HttpPolicy.name() instead + @Deprecated public static final String HTTP_POLICY_HTTP_ONLY = "HTTP_ONLY"; + @Deprecated public static final String HTTP_POLICY_HTTPS_ONLY = "HTTPS_ONLY"; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java index d323f764359..15008addc8f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java @@ -28,7 +28,6 @@ @InterfaceAudience.Private @InterfaceStability.Unstable public class HttpConfig { - private static Policy policy; public enum Policy { HTTP_ONLY, HTTPS_ONLY, @@ -52,28 +51,4 @@ public boolean isHttpsEnabled() { return this == HTTPS_ONLY || this == HTTP_AND_HTTPS; } } - - static { - Configuration conf = new Configuration(); - boolean sslEnabled = conf.getBoolean( - CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY, - CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_DEFAULT); - policy = sslEnabled ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY; - } - - public static void setPolicy(Policy policy) { - HttpConfig.policy = policy; - } - - public static boolean isSecure() { - return policy == Policy.HTTPS_ONLY; - } - - public static String getSchemePrefix() { - return (isSecure()) ? "https://" : "http://"; - } - - public static String getScheme(Policy policy) { - return policy == Policy.HTTPS_ONLY ? "https://" : "http://"; - } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index 45d0cd1f466..b3cc04c4eb0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -649,7 +649,7 @@ public Object run() throws IOException, InterruptedException { // try re-login if (UserGroupInformation.isLoginKeytabBased()) { UserGroupInformation.getLoginUser().reloginFromKeytab(); - } else { + } else if (UserGroupInformation.isLoginTicketBased()) { UserGroupInformation.getLoginUser().reloginFromTicketCache(); } // have granularity of milliseconds diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 178a4723506..a8bbd6c25e5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -702,6 +702,35 @@ public static UserGroupInformation getUGIFromTicketCache( } } + /** + * Create a UserGroupInformation from a Subject with Kerberos principal. + * + * @param user The KerberosPrincipal to use in UGI + * + * @throws IOException if the kerberos login fails + */ + public static UserGroupInformation getUGIFromSubject(Subject subject) + throws IOException { + if (subject == null) { + throw new IOException("Subject must not be null"); + } + + if (subject.getPrincipals(KerberosPrincipal.class).isEmpty()) { + throw new IOException("Provided Subject must contain a KerberosPrincipal"); + } + + KerberosPrincipal principal = + subject.getPrincipals(KerberosPrincipal.class).iterator().next(); + + User ugiUser = new User(principal.getName(), + AuthenticationMethod.KERBEROS, null); + subject.getPrincipals().add(ugiUser); + UserGroupInformation ugi = new UserGroupInformation(subject); + ugi.setLogin(null); + ugi.setAuthenticationMethod(AuthenticationMethod.KERBEROS); + return ugi; + } + /** * Get the currently logged in user. * @return the logged in user @@ -1101,6 +1130,14 @@ public synchronized static boolean isLoginKeytabBased() throws IOException { return getLoginUser().isKeytab; } + /** + * Did the login happen via ticket cache + * @return true or false + */ + public static boolean isLoginTicketBased() throws IOException { + return getLoginUser().isKrbTkt; + } + /** * Create a user from a login name. It is intended to be used for remote * users in RPC, since it won't have any credentials. @@ -1619,5 +1656,4 @@ public static void main(String [] args) throws Exception { System.out.println("Keytab " + loginUser.isKeytab); } } - } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java index 14d81910b51..3944ad12825 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java @@ -105,18 +105,21 @@ public byte[] getIdentifier() { return identifier; } - private static synchronized Class + private static Class getClassForIdentifier(Text kind) { - if (tokenKindMap == null) { - tokenKindMap = Maps.newHashMap(); - for (TokenIdentifier id : ServiceLoader.load(TokenIdentifier.class)) { - tokenKindMap.put(id.getKind(), id.getClass()); + Class cls = null; + synchronized (Token.class) { + if (tokenKindMap == null) { + tokenKindMap = Maps.newHashMap(); + for (TokenIdentifier id : ServiceLoader.load(TokenIdentifier.class)) { + tokenKindMap.put(id.getKind(), id.getClass()); + } } + cls = tokenKindMap.get(kind); } - Class cls = tokenKindMap.get(kind); if (cls == null) { LOG.warn("Cannot find class for token kind " + kind); - return null; + return null; } return cls; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java index e862db4acd9..0df0fe7f259 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java @@ -90,10 +90,6 @@ public boolean accept(Path file) { public FSMainOperationsBaseTest() { } - public FSMainOperationsBaseTest(String testRootDir) { - super(testRootDir); - } - @Before public void setUp() throws Exception { fSys = createFileSystem(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java index f3753c3d9d0..0074b01f909 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java @@ -49,7 +49,7 @@ public FileContextTestHelper() { /** * Create a context with the given test root */ - public FileContextTestHelper(String testRootDir) { + private FileContextTestHelper(String testRootDir) { this.testRootDir = testRootDir; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java index a5d8403c66a..fc058ca19bf 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java @@ -52,7 +52,7 @@ public FileSystemTestHelper() { /** * Create helper with the specified test root dir */ - public FileSystemTestHelper(String testRootDir) { + private FileSystemTestHelper(String testRootDir) { this.testRootDir = testRootDir; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithSecurityOn.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithSecurityOn.java index a1585933515..ae7bf084b8d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithSecurityOn.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithSecurityOn.java @@ -17,8 +17,14 @@ package org.apache.hadoop.security; import java.io.IOException; +import java.security.PrivilegedAction; +import java.util.Set; + +import javax.security.auth.kerberos.KerberosPrincipal; import junit.framework.Assert; +import static org.junit.Assert.*; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; @@ -72,4 +78,40 @@ public void testLogin() throws IOException { ex.printStackTrace(); } } + + @Test + public void testGetUGIFromKerberosSubject() throws IOException { + String user1keyTabFilepath = System.getProperty("kdc.resource.dir") + + "/keytabs/user1.keytab"; + + UserGroupInformation ugi = UserGroupInformation + .loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM", + user1keyTabFilepath); + Set principals = ugi.getSubject().getPrincipals( + KerberosPrincipal.class); + if (principals.isEmpty()) { + Assert.fail("There should be a kerberos principal in the subject."); + } + else { + UserGroupInformation ugi2 = UserGroupInformation.getUGIFromSubject( + ugi.getSubject()); + if (ugi2 != null) { + ugi2.doAs(new PrivilegedAction() { + + @Override + public Object run() { + try { + UserGroupInformation ugi3 = UserGroupInformation.getCurrentUser(); + String doAsUserName = ugi3.getUserName(); + assertEquals(doAsUserName, "user1@EXAMPLE.COM"); + System.out.println("DO AS USERNAME: " + doAsUserName); + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } + }); + } + } + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java index 35568c28839..614054f7c27 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java @@ -28,6 +28,7 @@ import org.junit.*; import javax.security.auth.Subject; +import javax.security.auth.kerberos.KerberosPrincipal; import javax.security.auth.login.AppConfigurationEntry; import javax.security.auth.login.LoginContext; import java.io.BufferedReader; @@ -768,6 +769,16 @@ public Void run() throws IOException { }); } + @Test (timeout = 30000) + public void testGetUGIFromSubject() throws Exception { + KerberosPrincipal p = new KerberosPrincipal("guest"); + Subject subject = new Subject(); + subject.getPrincipals().add(p); + UserGroupInformation ugi = UserGroupInformation.getUGIFromSubject(subject); + assertNotNull(ugi); + assertEquals("guest@DEFAULT.REALM", ugi.getUserName()); + } + /** Test hasSufficientTimeElapsed method */ @Test (timeout = 30000) public void testHasSufficientTimeElapsed() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8cc4ddd4a50..dd04a4baf94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -259,59 +259,17 @@ Trunk (Unreleased) HDFS-5794. Fix the inconsistency of layout version number of ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9) - BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS +Release 2.5.0 - UNRELEASED - HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9) + INCOMPATIBLE CHANGES - HDFS-5738. Serialize INode information in protobuf. (Haohui Mai via jing9) + NEW FEATURES - HDFS-5772. Serialize under-construction file information in FSImage. (jing9) + IMPROVEMENTS - HDFS-5783. Compute the digest before loading FSImage. (Haohui Mai via jing9) + OPTIMIZATIONS - HDFS-5785. Serialize symlink in protobuf. (Haohui Mai via jing9) - - HDFS-5793. Optimize the serialization of PermissionStatus. (Haohui Mai via - jing9) - - HDFS-5743. Use protobuf to serialize snapshot information. (jing9) - - HDFS-5774. Serialize CachePool directives in protobuf. (Haohui Mai via jing9) - - HDFS-5744. Serialize information for token managers in protobuf. (Haohui Mai - via jing9) - - HDFS-5824. Add a Type field in Snapshot DiffEntry's protobuf definition. - (jing9) - - HDFS-5808. Implement cancellation when saving FSImage. (Haohui Mai via jing9) - - HDFS-5826. Update the stored edit logs to be consistent with the changes in - HDFS-5698 branch. (Haohui Mai via jing9) - - HDFS-5797. Implement offline image viewer. (Haohui Mai via jing9) - - HDFS-5771. Track progress when loading fsimage. (Haohui Mai via cnauroth) - - HDFS-5871. Use PBHelper to serialize CacheDirectiveInfoExpirationProto. - (Haohui Mai via jing9) - - HDFS-5884. LoadDelegator should use IOUtils.readFully() to read the magic - header. (Haohui Mai via jing9) - - HDFS-5885. Add annotation for repeated fields in the protobuf definition. - (Haohui Mai via jing9) - - HDFS-5906. Fixing findbugs and javadoc warnings in the HDFS-5698 branch. - (Haohui Mai via jing9) - - HDFS-5911. The id of a CacheDirective instance does not get serialized in - the protobuf-fsimage. (Haohui Mai via jing9) - - HDFS-5915. Refactor FSImageFormatProtobuf to simplify cross section reads. - (Haohui Mai via cnauroth) - - HDFS-5847. Consolidate INodeReference into a separate section. (jing9) + BUG FIXES Release 2.4.0 - UNRELEASED @@ -319,6 +277,9 @@ Release 2.4.0 - UNRELEASED NEW FEATURES + HDFS-5698. Use protobuf to serialize / deserialize FSImage. (See breakdown + of tasks below for features and contributors) + IMPROVEMENTS HDFS-5781. Use an array to record the mapping between FSEditLogOpCode and @@ -451,6 +412,90 @@ Release 2.4.0 - UNRELEASED HDFS-5943. 'dfs.namenode.https-address' property is not loaded from configuration in federation setup. (suresh) + HDFS-3128. Unit tests should not use a test root in /tmp. (wang) + + HDFS-5948. TestBackupNode flakes with port in use error. (Haohui Mai + via Arpit Agarwal) + + HDFS-5949. New Namenode UI when trying to download a file, the browser + doesn't know the file name. (Haohui Mai via brandonli) + + HDFS-5716. Allow WebHDFS to use pluggable authentication filter + (Haohui Mai via brandonli) + + HDFS-5953. TestBlockReaderFactory fails in trunk. (Akira Ajisaka via wang) + + HDFS-5759. Web UI does not show up during the period of loading FSImage. + (Haohui Mai via Arpit Agarwal) + + HDFS-5942. Fix javadoc in OfflineImageViewer. (Akira Ajisaka via cnauroth) + + HDFS-5780. TestRBWBlockInvalidation times out intemittently. (Mit Desai + via kihwal) + + HDFS-5803. TestBalancer.testBalancer0 fails. (Chen He via kihwal) + + HDFS-5893. HftpFileSystem.RangeHeaderUrlOpener uses the default + URLConnectionFactory which does not import SSL certificates. (Haohui Mai via + jing9) + + BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS + + HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9) + + HDFS-5738. Serialize INode information in protobuf. (Haohui Mai via jing9) + + HDFS-5772. Serialize under-construction file information in FSImage. (jing9) + + HDFS-5783. Compute the digest before loading FSImage. (Haohui Mai via jing9) + + HDFS-5785. Serialize symlink in protobuf. (Haohui Mai via jing9) + + HDFS-5793. Optimize the serialization of PermissionStatus. (Haohui Mai via + jing9) + + HDFS-5743. Use protobuf to serialize snapshot information. (jing9) + + HDFS-5774. Serialize CachePool directives in protobuf. (Haohui Mai via jing9) + + HDFS-5744. Serialize information for token managers in protobuf. (Haohui Mai + via jing9) + + HDFS-5824. Add a Type field in Snapshot DiffEntry's protobuf definition. + (jing9) + + HDFS-5808. Implement cancellation when saving FSImage. (Haohui Mai via jing9) + + HDFS-5826. Update the stored edit logs to be consistent with the changes in + HDFS-5698 branch. (Haohui Mai via jing9) + + HDFS-5797. Implement offline image viewer. (Haohui Mai via jing9) + + HDFS-5771. Track progress when loading fsimage. (Haohui Mai via cnauroth) + + HDFS-5871. Use PBHelper to serialize CacheDirectiveInfoExpirationProto. + (Haohui Mai via jing9) + + HDFS-5884. LoadDelegator should use IOUtils.readFully() to read the magic + header. (Haohui Mai via jing9) + + HDFS-5885. Add annotation for repeated fields in the protobuf definition. + (Haohui Mai via jing9) + + HDFS-5906. Fixing findbugs and javadoc warnings in the HDFS-5698 branch. + (Haohui Mai via jing9) + + HDFS-5911. The id of a CacheDirective instance does not get serialized in + the protobuf-fsimage. (Haohui Mai via jing9) + + HDFS-5915. Refactor FSImageFormatProtobuf to simplify cross section reads. + (Haohui Mai via cnauroth) + + HDFS-5847. Consolidate INodeReference into a separate section. (jing9) + + HDFS-5959. Fix typo at section name in FSImageFormatProtobuf.java. + (Akira Ajisaka via suresh) + Release 2.3.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 646d38bbb91..539bfc97838 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; +import org.apache.hadoop.hdfs.web.AuthFilter; import org.apache.hadoop.http.HttpConfig; /** @@ -173,6 +174,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2; public static final String DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY = "dfs.namenode.replication.max-streams-hard-limit"; public static final int DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4; + public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY = "dfs.web.authentication.filter"; + public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT = AuthFilter.class.getName(); public static final String DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled"; public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true; public static final String DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index 6888f5e7540..39ad1319583 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -234,7 +234,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) { inodeLoader.loadINodeSection(in); } break; - case INODE_REFRENCE: + case INODE_REFERENCE: snapshotLoader.loadINodeReferenceSection(in); break; case INODE_DIR: @@ -553,7 +553,7 @@ public enum SectionName { NS_INFO("NS_INFO"), STRING_TABLE("STRING_TABLE"), INODE("INODE"), - INODE_REFRENCE("INODE_REFRENCE"), + INODE_REFERENCE("INODE_REFERENCE"), SNAPSHOT("SNAPSHOT"), INODE_DIR("INODE_DIR"), FILES_UNDERCONSTRUCTION("FILES_UNDERCONSTRUCTION"), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java index 0a1cf1c9e60..1655c4781dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java @@ -27,7 +27,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -61,18 +60,13 @@ private URL createRedirectURL(String path, String encodedPath, HdfsFileStatus st } else { hostname = host.getIpAddr(); } - int port = host.getInfoPort(); - if ("https".equals(scheme)) { - final Integer portObject = (Integer) getServletContext().getAttribute( - DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY); - if (portObject != null) { - port = portObject; - } - } + + int port = "https".equals(scheme) ? host.getInfoSecurePort() : host + .getInfoPort(); String dtParam = ""; if (dt != null) { - dtParam=JspHelper.getDelegationTokenUrlParam(dt); + dtParam = JspHelper.getDelegationTokenUrlParam(dt); } // Add namenode address to the url params diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index b3a77112232..43952be5b61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; -import org.apache.hadoop.hdfs.web.AuthFilter; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.resources.Param; import org.apache.hadoop.hdfs.web.resources.UserParam; @@ -70,21 +69,27 @@ public class NameNodeHttpServer { private void initWebHdfs(Configuration conf) throws IOException { if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) { // set user pattern based on configuration file - UserParam.setUserPattern(conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT)); + UserParam.setUserPattern(conf.get( + DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, + DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT)); + + // add authentication filter for webhdfs + final String className = conf.get( + DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY, + DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT); + final String name = className; - // add SPNEGO authentication filter for webhdfs - final String name = "SPNEGO"; - final String classname = AuthFilter.class.getName(); final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*"; Map params = getAuthFilterParams(conf); - HttpServer2.defineFilter(httpServer.getWebAppContext(), name, classname, params, - new String[]{pathSpec}); - HttpServer2.LOG.info("Added filter '" + name + "' (class=" + classname + ")"); + HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className, + params, new String[] { pathSpec }); + HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className + + ")"); // add webhdfs packages - httpServer.addJerseyResourcePackage( - NamenodeWebHdfsMethods.class.getPackage().getName() - + ";" + Param.class.getPackage().getName(), pathSpec); + httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class + .getPackage().getName() + ";" + Param.class.getPackage().getName(), + pathSpec); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java index 660b0dc274b..86f2daaf0ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java @@ -383,7 +383,7 @@ public void serializeINodeReferenceSection(OutputStream out) INodeReferenceSection.INodeReference.Builder rb = buildINodeReference(ref); rb.build().writeDelimitedTo(out); } - parent.commitSection(headers, SectionName.INODE_REFRENCE); + parent.commitSection(headers, SectionName.INODE_REFERENCE); } private INodeReferenceSection.INodeReference.Builder buildINodeReference( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java index 61c4d5e22ca..7e1cea0e401 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java @@ -51,28 +51,16 @@ import com.google.common.io.LimitInputStream; /** - * This is the tool for analyzing file sizes in the namespace image. In order to - * run the tool one should define a range of integers [0, maxSize] by - * specifying maxSize and a step. The range of integers is - * divided into segments of size step: - * [0, s1, ..., sn-1, maxSize], and the visitor - * calculates how many files in the system fall into each segment - * [si-1, si). Note that files larger than - * maxSize always fall into the very last segment. + * LsrPBImage displays the blocks of the namespace in a format very similar + * to the output of ls/lsr. Entries are marked as directories or not, + * permissions listed, replication, username and groupname, along with size, + * modification date and full path. * - *

Input.

- *
    - *
  • filename specifies the location of the image file;
  • - *
  • maxSize determines the range [0, maxSize] of files - * sizes considered by the visitor;
  • - *
  • step the range is divided into segments of size step.
  • - *
- * - *

Output.

The output file is formatted as a tab separated two column - * table: Size and NumFiles. Where Size represents the start of the segment, and - * numFiles is the number of files form the image which size falls in this - * segment. - * + * Note: A significant difference between the output of the lsr command + * and this image visitor is that this class cannot sort the file entries; + * they are listed in the order they are stored within the fsimage file. + * Therefore, the output of this class cannot be directly compared to the + * output of the lsr command. */ final class LsrPBImage { private final Configuration conf; @@ -127,7 +115,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) { case INODE: loadINodeSection(is); break; - case INODE_REFRENCE: + case INODE_REFERENCE: loadINodeReferenceSection(is); break; case INODE_DIR: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java index d70f63710b1..1f43b5b3cd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java @@ -55,28 +55,8 @@ import com.google.common.io.LimitInputStream; /** - * This is the tool for analyzing file sizes in the namespace image. In order to - * run the tool one should define a range of integers [0, maxSize] by - * specifying maxSize and a step. The range of integers is - * divided into segments of size step: - * [0, s1, ..., sn-1, maxSize], and the visitor - * calculates how many files in the system fall into each segment - * [si-1, si). Note that files larger than - * maxSize always fall into the very last segment. - * - *

Input.

- *
    - *
  • filename specifies the location of the image file;
  • - *
  • maxSize determines the range [0, maxSize] of files - * sizes considered by the visitor;
  • - *
  • step the range is divided into segments of size step.
  • - *
- * - *

Output.

The output file is formatted as a tab separated two column - * table: Size and NumFiles. Where Size represents the start of the segment, and - * numFiles is the number of files form the image which size falls in this - * segment. - * + * PBImageXmlWriter walks over an fsimage structure and writes out + * an equivalent XML document that contains the fsimage's components. */ @InterfaceAudience.Private public final class PBImageXmlWriter { @@ -133,7 +113,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) { case INODE: dumpINodeSection(is); break; - case INODE_REFRENCE: + case INODE_REFERENCE: dumpINodeReferenceSection(is); break; case INODE_DIR: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java index 83adc226dce..d27e507a580 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java @@ -344,14 +344,15 @@ protected String addDelegationTokenParam(String query) throws IOException { } static class RangeHeaderUrlOpener extends ByteRangeInputStream.URLOpener { - URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY; + private final URLConnectionFactory connFactory; - RangeHeaderUrlOpener(final URL url) { + RangeHeaderUrlOpener(URLConnectionFactory connFactory, final URL url) { super(url); + this.connFactory = connFactory; } protected HttpURLConnection openConnection() throws IOException { - return (HttpURLConnection)connectionFactory.openConnection(url); + return (HttpURLConnection)connFactory.openConnection(url); } /** Use HTTP Range header for specifying offset. */ @@ -381,8 +382,9 @@ static class RangeHeaderInputStream extends ByteRangeInputStream { super(o, r); } - RangeHeaderInputStream(final URL url) { - this(new RangeHeaderUrlOpener(url), new RangeHeaderUrlOpener(null)); + RangeHeaderInputStream(URLConnectionFactory connFactory, final URL url) { + this(new RangeHeaderUrlOpener(connFactory, url), + new RangeHeaderUrlOpener(connFactory, null)); } @Override @@ -397,7 +399,7 @@ public FSDataInputStream open(Path f, int buffersize) throws IOException { String path = "/data" + ServletUtil.encodePath(f.toUri().getPath()); String query = addDelegationTokenParam("ugi=" + getEncodedUgiParameter()); URL u = getNamenodeURL(path, query); - return new FSDataInputStream(new RangeHeaderInputStream(u)); + return new FSDataInputStream(new RangeHeaderInputStream(connectionFactory, u)); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index 90667716424..0df17b53889 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -151,7 +151,7 @@ {/fs} - +

Current transaction ID: {nn.JournalTransactionInfo.LastAppliedOrWrittenTxId}

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js index 39450043cda..98e4ecc1b5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js @@ -50,24 +50,23 @@ var data = {}; // Workarounds for the fact that JMXJsonServlet returns non-standard JSON strings - function data_workaround(d) { - d.nn.JournalTransactionInfo = JSON.parse(d.nn.JournalTransactionInfo); - d.nn.NameJournalStatus = JSON.parse(d.nn.NameJournalStatus); - d.nn.NameDirStatuses = JSON.parse(d.nn.NameDirStatuses); - d.nn.NodeUsage = JSON.parse(d.nn.NodeUsage); - d.nn.CorruptFiles = JSON.parse(d.nn.CorruptFiles); - return d; + function workaround(nn) { + nn.JournalTransactionInfo = JSON.parse(nn.JournalTransactionInfo); + nn.NameJournalStatus = JSON.parse(nn.NameJournalStatus); + nn.NameDirStatuses = JSON.parse(nn.NameDirStatuses); + nn.NodeUsage = JSON.parse(nn.NodeUsage); + nn.CorruptFiles = JSON.parse(nn.CorruptFiles); + return nn; } load_json( BEANS, - function(d) { + guard_with_startup_progress(function(d) { for (var k in d) { - data[k] = d[k].beans[0]; + data[k] = k === 'nn' ? workaround(d[k].beans[0]) : d[k].beans[0]; } - data = data_workaround(data); render(); - }, + }), function (url, jqxhr, text, err) { show_err_msg('

Failed to retrieve data from ' + url + ', cause: ' + err + '

'); }); @@ -92,6 +91,19 @@ show_err_msg('

Failed to retrieve data from ' + url + ', cause: ' + err + '

'); } + function guard_with_startup_progress(fn) { + return function() { + try { + fn.apply(this, arguments); + } catch (err) { + if (err instanceof TypeError) { + show_err_msg('NameNode is still loading. Redirecting to the Startup Progress page.'); + load_startup_progress(); + } + } + }; + } + function load_startup_progress() { function workaround(r) { function rename_property(o, s, d) { @@ -143,25 +155,29 @@ return r; } - $.get('/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo', function (resp) { - var data = workaround(resp.beans[0]); - dust.render('datanode-info', data, function(err, out) { - $('#tab-datanode').html(out); - $('#ui-tabs a[href="#tab-datanode"]').tab('show'); - }); - }).error(ajax_error_handler); + $.get( + '/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo', + guard_with_startup_progress(function (resp) { + var data = workaround(resp.beans[0]); + dust.render('datanode-info', data, function(err, out) { + $('#tab-datanode').html(out); + $('#ui-tabs a[href="#tab-datanode"]').tab('show'); + }); + })).error(ajax_error_handler); } $('a[href="#tab-datanode"]').click(load_datanode_info); function load_snapshot_info() { - $.get('/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState', function (resp) { - var data = JSON.parse(resp.beans[0].SnapshotStats); - dust.render('snapshot-info', data, function(err, out) { - $('#tab-snapshot').html(out); - $('#ui-tabs a[href="#tab-snapshot"]').tab('show'); - }); - }).error(ajax_error_handler); + $.get( + '/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState', + guard_with_startup_progress(function (resp) { + var data = JSON.parse(resp.beans[0].SnapshotStats); + dust.render('snapshot-info', data, function(err, out) { + $('#tab-snapshot').html(out); + $('#ui-tabs a[href="#tab-snapshot"]').tab('show'); + }); + })).error(ajax_error_handler); } $('#ui-tabs a[href="#tab-snapshot"]').click(load_snapshot_info); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js index 9d1ca663527..5d802104ffd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js @@ -124,7 +124,7 @@ $('#file-info-tail').hide(); $('#file-info-title').text("File information - " + path); - var download_url = '/webhdfs/v1' + abs_path + '/?op=OPEN'; + var download_url = '/webhdfs/v1' + abs_path + '?op=OPEN'; $('#file-info-download').attr('href', download_url); $('#file-info-preview').click(function() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java index 40dbc4e88c9..6ef7d6be1c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java @@ -40,7 +40,7 @@ public class TestFcHdfsCreateMkdir extends @Override protected FileContextTestHelper createFileContextHelper() { - return new FileContextTestHelper("/tmp/TestFcHdfsCreateMkdir"); + return new FileContextTestHelper(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java index 9f0b23cedfe..1a90b0c6cf2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java @@ -35,7 +35,7 @@ public class TestFcHdfsPermission extends FileContextPermissionBase { private static final FileContextTestHelper fileContextTestHelper = - new FileContextTestHelper("/tmp/TestFcHdfsPermission"); + new FileContextTestHelper(); private static FileContext fc; private static MiniDFSCluster cluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java index b6833dabba7..e9d6a5c9ccb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java @@ -43,7 +43,7 @@ public class TestFcHdfsSetUMask { private static FileContextTestHelper fileContextTestHelper = - new FileContextTestHelper("/tmp/TestFcHdfsSetUMask"); + new FileContextTestHelper(); private static MiniDFSCluster cluster; private static Path defaultWorkingDirectory; private static FileContext fc; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java index 80e180b4d8f..5f63ec930d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java @@ -49,7 +49,7 @@ public class TestHDFSFileContextMainOperations extends @Override protected FileContextTestHelper createFileContextHelper() { - return new FileContextTestHelper("/tmp/TestHDFSFileContextMainOperations"); + return new FileContextTestHelper(); } @BeforeClass diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java index cfd4a8d418c..a243fe959b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java @@ -50,7 +50,7 @@ * underlying file system as Hdfs. */ public class TestResolveHdfsSymlink { - private static File TEST_ROOT_DIR = PathUtils.getTestDir(TestResolveHdfsSymlink.class); + private static FileContextTestHelper helper = new FileContextTestHelper(); private static MiniDFSCluster cluster = null; @BeforeClass @@ -82,13 +82,14 @@ public void testFcResolveAfs() throws IOException, InterruptedException { FileContext fcHdfs = FileContext.getFileContext(cluster.getFileSystem() .getUri()); + final String localTestRoot = helper.getAbsoluteTestRootDir(fcLocal); Path alphaLocalPath = new Path(fcLocal.getDefaultFileSystem().getUri() - .toString(), new File(TEST_ROOT_DIR, "alpha").getAbsolutePath()); + .toString(), new File(localTestRoot, "alpha").getAbsolutePath()); DFSTestUtil.createFile(FileSystem.getLocal(conf), alphaLocalPath, 16, (short) 1, 2); Path linkTarget = new Path(fcLocal.getDefaultFileSystem().getUri() - .toString(), TEST_ROOT_DIR.getAbsolutePath()); + .toString(), localTestRoot); Path hdfsLink = new Path(fcHdfs.getDefaultFileSystem().getUri().toString(), "/tmp/link"); fcHdfs.createSymlink(linkTarget, hdfsLink, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java index e5a513394a9..2ba89116072 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java @@ -42,8 +42,7 @@ public void testSymlinkHdfsDisable() throws Exception { DistributedFileSystem dfs = cluster.getFileSystem(); FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf); // Create test files/links - FileContextTestHelper helper = new FileContextTestHelper( - "/tmp/TestSymlinkHdfsDisable"); + FileContextTestHelper helper = new FileContextTestHelper(); Path root = helper.getTestRootPath(fc); Path target = new Path(root, "target"); Path link = new Path(root, "link"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java index a0722f7bc39..72ce5325c9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java @@ -45,7 +45,7 @@ public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest { @Override protected FileSystemTestHelper createFileSystemHelper() { - return new FileSystemTestHelper("/tmp/TestViewFileSystemAtHdfsRoot"); + return new FileSystemTestHelper(); } @BeforeClass diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java index 013d8a6e1a3..3fc4a567f4b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java @@ -52,7 +52,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest { @Override protected FileSystemTestHelper createFileSystemHelper() { - return new FileSystemTestHelper("/tmp/TestViewFileSystemHdfs"); + return new FileSystemTestHelper(); } @BeforeClass diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java index 27020d8f7ba..ac0a267f18f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java @@ -46,7 +46,7 @@ public class TestViewFsAtHdfsRoot extends ViewFsBaseTest { @Override protected FileContextTestHelper createFileContextHelper() { - return new FileContextTestHelper("/tmp/TestViewFsAtHdfsRoot"); + return new FileContextTestHelper(); } @BeforeClass diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java index 8761762c9bb..f1da50647d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java @@ -42,7 +42,7 @@ public class TestViewFsHdfs extends ViewFsBaseTest { @Override protected FileContextTestHelper createFileContextHelper() { - return new FileContextTestHelper("/tmp/TestViewFsHdfs"); + return new FileContextTestHelper(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java index 6b496e21865..aa0e8d00185 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java @@ -36,6 +36,7 @@ import org.apache.hadoop.net.unix.TemporarySocketDirectory; import org.junit.After; import org.junit.Assert; +import org.junit.Assume; import org.junit.Before; import org.junit.Test; @@ -47,6 +48,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC; +import static org.hamcrest.CoreMatchers.equalTo; public class TestBlockReaderFactory { static final Log LOG = LogFactory.getLog(TestBlockReaderFactory.class); @@ -56,6 +58,11 @@ public void init() { DomainSocket.disableBindPathValidation(); } + @Before + public void before() { + Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null)); + } + @After public void cleanup() { DFSInputStream.tcpReadsDisabledForTesting = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index f78eec13b25..653fa79cc85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -74,7 +74,7 @@ public class TestBalancer { ClientProtocol client; - static final long TIMEOUT = 20000L; //msec + static final long TIMEOUT = 40000L; //msec static final double CAPACITY_ALLOWED_VARIANCE = 0.005; // 0.5% static final double BALANCE_ALLOWED_VARIANCE = 0.11; // 10%+delta static final int DEFAULT_BLOCK_SIZE = 10; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java index 2e5d70b0965..e909dc9d800 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java @@ -66,7 +66,7 @@ private static NumberReplicas countReplicas(final FSNamesystem namesystem, * datanode, namenode should ask to invalidate that corrupted block and * schedule replication for one more replica for that under replicated block. */ - @Test(timeout=60000) + @Test(timeout=600000) public void testBlockInvalidationWhenRBWReplicaMissedInDN() throws IOException, InterruptedException { // This test cannot pass on Windows due to file locking enforcement. It will @@ -75,7 +75,7 @@ public void testBlockInvalidationWhenRBWReplicaMissedInDN() Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2); - conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 100); + conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 300); conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2) @@ -104,23 +104,24 @@ public void testBlockInvalidationWhenRBWReplicaMissedInDN() metaFile.delete()); out.close(); - - // Check datanode has reported the corrupt block. - int corruptReplicas = 0; + + int liveReplicas = 0; while (true) { - if ((corruptReplicas = countReplicas(namesystem, blk).corruptReplicas()) > 0) { + if ((liveReplicas = countReplicas(namesystem, blk).liveReplicas()) < 2) { + // This confirms we have a corrupt replica + LOG.info("Live Replicas after corruption: " + liveReplicas); break; } Thread.sleep(100); } - assertEquals("There should be 1 replica in the corruptReplicasMap", 1, - corruptReplicas); - - // Check the block has got replicated to another datanode. - blk = DFSTestUtil.getFirstBlock(fs, testPath); - int liveReplicas = 0; + assertEquals("There should be less than 2 replicas in the " + + "liveReplicasMap", 1, liveReplicas); + while (true) { - if ((liveReplicas = countReplicas(namesystem, blk).liveReplicas()) > 1) { + if ((liveReplicas = + countReplicas(namesystem, blk).liveReplicas()) > 1) { + //Wait till the live replica count becomes equal to Replication Factor + LOG.info("Live Replicas after Rereplication: " + liveReplicas); break; } Thread.sleep(100); @@ -128,9 +129,9 @@ public void testBlockInvalidationWhenRBWReplicaMissedInDN() assertEquals("There should be two live replicas", 2, liveReplicas); - // sleep for 1 second, so that by this time datanode reports the corrupt + // sleep for 2 seconds, so that by this time datanode reports the corrupt // block after a live replica of block got replicated. - Thread.sleep(1000); + Thread.sleep(2000); // Check that there is no corrupt block in the corruptReplicasMap. assertEquals("There should not be any replica in the corruptReplicasMap", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java index fc56eb48c24..f01be4b99f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java @@ -282,6 +282,7 @@ void testCheckpoint(StartupOption op) throws Exception { HAUtil.setAllowStandbyReads(conf, true); short replication = (short)conf.getInt("dfs.replication", 3); int numDatanodes = Math.max(3, replication); + conf.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "localhost:0"); conf.set(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, "0"); conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // disable block scanner conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java index edfee360990..b4221c0249b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java @@ -97,12 +97,13 @@ public String getHeaderField(String field) { @Test public void testByteRange() throws IOException { + URLConnectionFactory factory = mock(URLConnectionFactory.class); HftpFileSystem.RangeHeaderUrlOpener ospy = spy( - new HftpFileSystem.RangeHeaderUrlOpener(new URL("http://test/"))); + new HftpFileSystem.RangeHeaderUrlOpener(factory, new URL("http://test/"))); doReturn(new MockHttpURLConnection(ospy.getURL())).when(ospy) .openConnection(); HftpFileSystem.RangeHeaderUrlOpener rspy = spy( - new HftpFileSystem.RangeHeaderUrlOpener((URL) null)); + new HftpFileSystem.RangeHeaderUrlOpener(factory, (URL) null)); doReturn(new MockHttpURLConnection(rspy.getURL())).when(rspy) .openConnection(); ByteRangeInputStream is = new HftpFileSystem.RangeHeaderInputStream(ospy, rspy); @@ -171,12 +172,15 @@ public void testByteRange() throws IOException { assertEquals("Should fail because incorrect response code was sent", "HTTP_OK expected, received 206", e.getMessage()); } + is.close(); } @Test public void testPropagatedClose() throws IOException { - ByteRangeInputStream brs = spy( - new HftpFileSystem.RangeHeaderInputStream(new URL("http://test/"))); + URLConnectionFactory factory = mock(URLConnectionFactory.class); + + ByteRangeInputStream brs = spy(new HftpFileSystem.RangeHeaderInputStream( + factory, new URL("http://test/"))); InputStream mockStream = mock(InputStream.class); doReturn(mockStream).when(brs).openInputStream(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java index ad2352b3e00..7c18627acf7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java @@ -52,7 +52,7 @@ public class TestFSMainOperationsWebHdfs extends FSMainOperationsBaseTest { private static FileSystem fileSystem; public TestFSMainOperationsWebHdfs() { - super("/tmp/TestFSMainOperationsWebHdfs"); + super(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java index 0942ef26726..bfd26970718 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java @@ -19,6 +19,7 @@ import java.io.File; import java.io.InputStream; +import java.io.OutputStream; import java.net.InetSocketAddress; import java.net.URI; @@ -30,6 +31,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.junit.AfterClass; import org.junit.Assert; @@ -65,9 +67,11 @@ public static void setUp() throws Exception { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); - cluster.getFileSystem().create(new Path("/test")).close(); + OutputStream os = cluster.getFileSystem().create(new Path("/test")); + os.write(23); + os.close(); InetSocketAddress addr = cluster.getNameNode().getHttpsAddress(); - nnAddr = addr.getHostName() + ":" + addr.getPort(); + nnAddr = NetUtils.getHostPortString(addr); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr); } @@ -82,6 +86,9 @@ public static void tearDown() throws Exception { public void testHsftpFileSystem() throws Exception { FileSystem fs = FileSystem.get(new URI("hsftp://" + nnAddr), conf); Assert.assertTrue(fs.exists(new Path("/test"))); + InputStream is = fs.open(new Path("/test")); + Assert.assertEquals(23, is.read()); + is.close(); fs.close(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithAuthenticationFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithAuthenticationFilter.java new file mode 100644 index 00000000000..09c78728a9e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithAuthenticationFilter.java @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URI; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.net.NetUtils; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestWebHdfsWithAuthenticationFilter { + private static boolean authorized = false; + + public static final class CustomizedFilter implements Filter { + @Override + public void init(FilterConfig filterConfig) throws ServletException { + } + + @Override + public void doFilter(ServletRequest request, ServletResponse response, + FilterChain chain) throws IOException, ServletException { + if (authorized) { + chain.doFilter(request, response); + } else { + ((HttpServletResponse) response) + .sendError(HttpServletResponse.SC_FORBIDDEN); + } + } + + @Override + public void destroy() { + } + + } + + private static Configuration conf; + private static MiniDFSCluster cluster; + private static FileSystem fs; + + @BeforeClass + public static void setUp() throws IOException { + conf = new Configuration(); + conf.set(DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY, + CustomizedFilter.class.getName()); + conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:0"); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + InetSocketAddress addr = cluster.getNameNode().getHttpAddress(); + fs = FileSystem.get( + URI.create("webhdfs://" + NetUtils.getHostPortString(addr)), conf); + cluster.waitActive(); + } + + @AfterClass + public static void tearDown() throws IOException { + fs.close(); + cluster.shutdown(); + } + + @Test + public void testWebHdfsAuthFilter() throws IOException { + // getFileStatus() is supposed to pass through with the default filter. + authorized = false; + try { + fs.getFileStatus(new Path("/")); + Assert.fail("The filter fails to block the request"); + } catch (IOException e) { + } + authorized = true; + fs.getFileStatus(new Path("/")); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/PathUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/PathUtils.java index 2ee4aa1390b..ac0f632145b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/PathUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/PathUtils.java @@ -19,6 +19,7 @@ import java.io.File; +import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.fs.Path; public class PathUtils { @@ -36,7 +37,10 @@ public static File getTestDir(Class caller) { } public static File getTestDir(Class caller, boolean create) { - File dir = new File(System.getProperty("test.build.data", "/tmp"), caller.getSimpleName()); + File dir = + new File(System.getProperty("test.build.data", "target/test/data") + + "/" + RandomStringUtils.randomAlphanumeric(10), + caller.getSimpleName()); if (create) { dir.mkdirs(); } diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 02099b7ce7a..6a2cc7cb283 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -139,6 +139,18 @@ Trunk (Unreleased) MAPREDUCE-5717. Task pings are interpreted as task progress (jlowe) +Release 2.5.0 - UNRELEASED + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + OPTIMIZATIONS + + BUG FIXES + Release 2.4.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -165,6 +177,9 @@ Release 2.4.0 - UNRELEASED MAPREDUCE-5670. CombineFileRecordReader should report progress when moving to the next file (Chen He via jlowe) + MAPREDUCE-5757. ConcurrentModificationException in JobControl.toList + (jlowe) + Release 2.3.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index 969e2fa7a13..ce1e4487d0e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -1387,7 +1387,8 @@ public static void main(String[] args) { // RM/NM to issue SSL certificates but definitely not MR-AM as it is // running in user-land. MRWebAppUtil.initialize(conf); - HttpConfig.setPolicy(HttpConfig.Policy.HTTP_ONLY); + conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY, + HttpConfig.Policy.HTTP_ONLY.name()); // log the system properties String systemPropsToLog = MRApps.getSystemPropertiesToLog(conf); if (systemPropsToLog != null) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java index 2cc233688b8..2e1a22e4310 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.http.HttpConfig; /** * Stores Job History configuration keys that can be set by administrators of @@ -135,7 +136,7 @@ public class JHAdminConfig { public static final String MR_HS_HTTP_POLICY = MR_HISTORY_PREFIX + "http.policy"; public static String DEFAULT_MR_HS_HTTP_POLICY = - CommonConfigurationKeysPublic.HTTP_POLICY_HTTP_ONLY; + HttpConfig.Policy.HTTP_ONLY.name(); /**The address the history server webapp is on.*/ public static final String MR_HISTORY_WEBAPP_ADDRESS = diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java index 49a0407d0eb..2d453f1d308 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java @@ -71,11 +71,13 @@ public static Policy getYARNHttpPolicy() { } public static String getYARNWebappScheme() { - return HttpConfig.getScheme(httpPolicyInYarn); + return httpPolicyInYarn == HttpConfig.Policy.HTTPS_ONLY ? "https://" + : "http://"; } - + public static String getJHSWebappScheme() { - return HttpConfig.getScheme(httpPolicyInJHS); + return httpPolicyInJHS == HttpConfig.Policy.HTTPS_ONLY ? "https://" + : "http://"; } public static void setJHSWebappURLWithoutScheme(Configuration conf, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java index 92490a59e0a..eaeadea6ff4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java @@ -45,6 +45,7 @@ import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo; import org.apache.hadoop.mapreduce.util.HostUtil; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.webapp.util.WebAppUtils; /** * HistoryViewer is used to parse and view the JobHistory files @@ -231,7 +232,8 @@ private void printAllTaskAttempts(TaskType taskType) { taskList.append("\t"); taskList.append(attempt.getHostname()).append("\t"); taskList.append(attempt.getError()); - String taskLogsUrl = getTaskLogsUrl(attempt); + String taskLogsUrl = getTaskLogsUrl( + WebAppUtils.getHttpSchemePrefix(fs.getConf()), attempt); taskList.append(taskLogsUrl != null ? taskLogsUrl : "n/a"); System.out.println(taskList.toString()); } @@ -446,7 +448,7 @@ private void printFailedAttempts(FilteredJob filteredJob) { * @return the taskLogsUrl. null if http-port or tracker-name or * task-attempt-id are unavailable. */ - public static String getTaskLogsUrl( + public static String getTaskLogsUrl(String scheme, JobHistoryParser.TaskAttemptInfo attempt) { if (attempt.getHttpPort() == -1 || attempt.getTrackerName().equals("") @@ -457,7 +459,7 @@ public static String getTaskLogsUrl( String taskTrackerName = HostUtil.convertTrackerNameToHostName( attempt.getTrackerName()); - return HostUtil.getTaskLogUrl(taskTrackerName, + return HostUtil.getTaskLogUrl(scheme, taskTrackerName, Integer.toString(attempt.getHttpPort()), attempt.getAttemptId().toString()); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java index ca4857ecb75..b0b7a3c119f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java @@ -79,7 +79,7 @@ public JobControl(String groupName) { this.runnerState = ThreadState.READY; } - synchronized private static List toList( + private static List toList( LinkedList jobs) { ArrayList retv = new ArrayList(); for (ControlledJob job : jobs) { @@ -122,11 +122,11 @@ public List getReadyJobsList() { /** * @return the jobs in the success state */ - public List getSuccessfulJobList() { + synchronized public List getSuccessfulJobList() { return toList(this.successfulJobs); } - public List getFailedJobList() { + synchronized public List getFailedJobList() { return toList(this.failedJobs); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java index 0a42bb73a20..e131fc8933a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java @@ -20,7 +20,6 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; -import org.apache.hadoop.http.HttpConfig; @Private @Unstable @@ -33,9 +32,9 @@ public class HostUtil { * @param taskAttemptID * @return the taskLogUrl */ - public static String getTaskLogUrl(String taskTrackerHostName, + public static String getTaskLogUrl(String scheme, String taskTrackerHostName, String httpPort, String taskAttemptID) { - return (HttpConfig.getSchemePrefix() + taskTrackerHostName + ":" + + return (scheme + taskTrackerHostName + ":" + httpPort + "/tasklog?attemptid=" + taskAttemptID); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java index 9c92bed6acb..1373f3cdc23 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java @@ -24,7 +24,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.v2.hs.HistoryServerStateStoreService.HistoryServerState; @@ -121,7 +120,6 @@ protected void serviceInit(Configuration conf) throws Exception { // This is required for WebApps to use https if enabled. MRWebAppUtil.initialize(getConfig()); - HttpConfig.setPolicy(MRWebAppUtil.getJHSHttpPolicy()); try { doSecureLogin(conf); } catch(IOException ie) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java index 01047043380..00109c5d8e2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java @@ -230,9 +230,9 @@ public void run() { WebAppUtils.getRMWebAppURLWithoutScheme(getConfig())); LOG.info("MiniMRYARN HistoryServer address: " + getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS)); - LOG.info("MiniMRYARN HistoryServer web address: " + - getResolvedMRHistoryWebAppURLWithoutScheme(getConfig(), - HttpConfig.isSecure())); + LOG.info("MiniMRYARN HistoryServer web address: " + + getResolvedMRHistoryWebAppURLWithoutScheme(getConfig(), + MRWebAppUtil.getJHSHttpPolicy() == HttpConfig.Policy.HTTPS_ONLY)); } @Override diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 8b880d33f91..eafc295bfa6 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -15,6 +15,18 @@ Trunk - Unreleased YARN-524 TestYarnVersionInfo failing if generated properties doesn't include an SVN URL. (stevel) +Release 2.5.0 - UNRELEASED + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + OPTIMIZATIONS + + BUG FIXES + Release 2.4.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -273,6 +285,16 @@ Release 2.4.0 - UNRELEASED at allocation time so as to prevent RM from shelling out containers with expired tokens. (Omkar Vinit Joshi and Jian He via vinodkv) + YARN-1553. Modified YARN and MR to stop using HttpConfig.isSecure() and + instead rely on the http policy framework. And also fix some bugs related + to https handling in YARN web-apps. (Haohui Mai via vinodkv) + + YARN-1721. When moving app between queues in Fair Scheduler, grab lock on + FSSchedulerApp (Sandy Ryza) + + YARN-1724. Race condition in Fair Scheduler when continuous scheduling is + turned on (Sandy Ryza) + Release 2.3.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java index b5a0b1a2077..e3114ceae1e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java @@ -100,7 +100,7 @@ private static void verifyAndSetRMHAIdsList(Configuration conf) { StringBuilder setValue = new StringBuilder(); for (String id: ids) { // verify the RM service addresses configurations for every RMIds - for (String prefix : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) { + for (String prefix : YarnConfiguration.getServiceAddressConfKeys(conf)) { checkAndSetRMRPCAddress(prefix, id, conf); } setValue.append(id); @@ -158,7 +158,7 @@ private static void verifyAndSetConfValue(String prefix, Configuration conf) { } public static void verifyAndSetAllServiceAddresses(Configuration conf) { - for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) { + for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) { verifyAndSetConfValue(confKey, conf); } } @@ -236,7 +236,7 @@ static String getRMHAIdsWarningMessage(String ids) { @InterfaceAudience.Private @VisibleForTesting static String getConfKeyForRMInstance(String prefix, Configuration conf) { - if (!YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS.contains(prefix)) { + if (!YarnConfiguration.getServiceAddressConfKeys(conf).contains(prefix)) { return prefix; } else { String RMId = getRMHAId(conf); @@ -289,7 +289,7 @@ private static void checkAndSetRMRPCAddress(String prefix, String RMId, hostNameConfKey + " or " + addSuffix(prefix, RMId))); } else { conf.set(addSuffix(prefix, RMId), confVal + ":" - + YarnConfiguration.getRMDefaultPortNumber(prefix)); + + YarnConfiguration.getRMDefaultPortNumber(prefix, conf)); } } } catch (IllegalArgumentException iae) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 8c8ad16e8e4..9612cac15e5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -26,10 +26,8 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; -import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.StringUtils; @@ -187,6 +185,8 @@ public class YarnConfiguration extends Configuration { /** The https address of the RM web application.*/ public static final String RM_WEBAPP_HTTPS_ADDRESS = RM_PREFIX + "webapp.https.address"; + public static final boolean YARN_SSL_CLIENT_HTTPS_NEED_AUTH_DEFAULT = false; + public static final String YARN_SSL_SERVER_RESOURCE_DEFAULT = "ssl-server.xml"; public static final int DEFAULT_RM_WEBAPP_HTTPS_PORT = 8090; public static final String DEFAULT_RM_WEBAPP_HTTPS_ADDRESS = "0.0.0.0:" @@ -361,15 +361,21 @@ public class YarnConfiguration extends Configuration { public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS = "org.apache.hadoop.yarn.LocalConfigurationProvider"; - @Private - public static final List RM_SERVICES_ADDRESS_CONF_KEYS = + private static final List RM_SERVICES_ADDRESS_CONF_KEYS_HTTP = Collections.unmodifiableList(Arrays.asList( RM_ADDRESS, RM_SCHEDULER_ADDRESS, RM_ADMIN_ADDRESS, RM_RESOURCE_TRACKER_ADDRESS, - HttpConfig.isSecure() ? RM_WEBAPP_HTTPS_ADDRESS - : RM_WEBAPP_ADDRESS)); + RM_WEBAPP_ADDRESS)); + + private static final List RM_SERVICES_ADDRESS_CONF_KEYS_HTTPS = + Collections.unmodifiableList(Arrays.asList( + RM_ADDRESS, + RM_SCHEDULER_ADDRESS, + RM_ADMIN_ADDRESS, + RM_RESOURCE_TRACKER_ADDRESS, + RM_WEBAPP_HTTPS_ADDRESS)); public static final String AUTO_FAILOVER_PREFIX = RM_HA_PREFIX + "automatic-failover."; @@ -1102,10 +1108,9 @@ public class YarnConfiguration extends Configuration { YARN_PREFIX + "client.max-nodemanagers-proxies"; public static final int DEFAULT_NM_CLIENT_MAX_NM_PROXIES = 500; - public static final String YARN_HTTP_POLICY_KEY = - YARN_PREFIX + "http.policy"; - public static final String YARN_HTTP_POLICY_DEFAULT = - CommonConfigurationKeysPublic.HTTP_POLICY_HTTP_ONLY; + public static final String YARN_HTTP_POLICY_KEY = YARN_PREFIX + "http.policy"; + public static final String YARN_HTTP_POLICY_DEFAULT = HttpConfig.Policy.HTTP_ONLY + .name(); public YarnConfiguration() { super(); @@ -1118,6 +1123,12 @@ public YarnConfiguration(Configuration conf) { } } + @Private + public static List getServiceAddressConfKeys(Configuration conf) { + return useHttps(conf) ? RM_SERVICES_ADDRESS_CONF_KEYS_HTTPS + : RM_SERVICES_ADDRESS_CONF_KEYS_HTTP; + } + /** * Get the socket address for name property as a * InetSocketAddress. @@ -1130,7 +1141,7 @@ public YarnConfiguration(Configuration conf) { public InetSocketAddress getSocketAddr( String name, String defaultAddress, int defaultPort) { String address; - if (HAUtil.isHAEnabled(this) && RM_SERVICES_ADDRESS_CONF_KEYS.contains(name)) { + if (HAUtil.isHAEnabled(this) && getServiceAddressConfKeys(this).contains(name)) { address = HAUtil.getConfValueForRMInstance(name, defaultAddress, this); } else { address = get(name, defaultAddress); @@ -1149,7 +1160,8 @@ public InetSocketAddress updateConnectAddr(String name, } @Private - public static int getRMDefaultPortNumber(String addressPrefix) { + public static int getRMDefaultPortNumber(String addressPrefix, + Configuration conf) { if (addressPrefix.equals(YarnConfiguration.RM_ADDRESS)) { return YarnConfiguration.DEFAULT_RM_PORT; } else if (addressPrefix.equals(YarnConfiguration.RM_SCHEDULER_ADDRESS)) { @@ -1167,7 +1179,13 @@ public static int getRMDefaultPortNumber(String addressPrefix) { throw new HadoopIllegalArgumentException( "Invalid RM RPC address Prefix: " + addressPrefix + ". The valid value should be one of " - + YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS); + + getServiceAddressConfKeys(conf)); } } + + public static boolean useHttps(Configuration conf) { + return HttpConfig.Policy.HTTPS_ONLY == HttpConfig.Policy.fromString(conf + .get(YARN_HTTP_POLICY_KEY, + YARN_HTTP_POLICY_DEFAULT)); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java index 9fcc2bd6e3d..3269b8b0131 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java @@ -29,7 +29,6 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities; import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntity; import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors; @@ -65,12 +64,17 @@ public TimelineClientImpl() { } protected void serviceInit(Configuration conf) throws Exception { - resURI = new URI(JOINER.join(HttpConfig.getSchemePrefix(), - HttpConfig.isSecure() ? conf.get( - YarnConfiguration.AHS_WEBAPP_HTTPS_ADDRESS, - YarnConfiguration.DEFAULT_AHS_WEBAPP_HTTPS_ADDRESS) : conf.get( - YarnConfiguration.AHS_WEBAPP_ADDRESS, - YarnConfiguration.DEFAULT_AHS_WEBAPP_ADDRESS), RESOURCE_URI_STR)); + if (YarnConfiguration.useHttps(conf)) { + resURI = URI + .create(JOINER.join("https://", conf.get( + YarnConfiguration.AHS_WEBAPP_HTTPS_ADDRESS, + YarnConfiguration.DEFAULT_AHS_WEBAPP_HTTPS_ADDRESS), + RESOURCE_URI_STR)); + } else { + resURI = URI.create(JOINER.join("http://", conf.get( + YarnConfiguration.AHS_WEBAPP_ADDRESS, + YarnConfiguration.DEFAULT_AHS_WEBAPP_ADDRESS), RESOURCE_URI_STR)); + } super.serviceInit(conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java index 317baec4a31..590cd962f12 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java @@ -37,7 +37,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.security.AdminACLsManager; +import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -216,9 +218,11 @@ public void setup() { System.exit(1); } } - HttpServer2.Builder builder = new HttpServer2.Builder().setName(name) - .addEndpoint(URI.create("http://" + bindAddress + ":" + port)) - .setConf(conf).setFindPort(findPort) + HttpServer2.Builder builder = new HttpServer2.Builder() + .setName(name) + .addEndpoint( + URI.create(WebAppUtils.getHttpSchemePrefix(conf) + bindAddress + + ":" + port)).setConf(conf).setFindPort(findPort) .setACL(new AdminACLsManager(conf).getAdminAcl()) .setPathSpec(pathList.toArray(new String[0])); @@ -231,6 +235,11 @@ public void setup() { .setKeytabConfKey(spnegoKeytabKey) .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()); } + + if (YarnConfiguration.useHttps(conf)) { + WebAppUtils.loadSslConfiguration(builder); + } + HttpServer2 server = builder.build(); for(ServletStruct struct: servlets) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java index 4a288c44af9..a7dce0732c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java @@ -26,20 +26,16 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpConfig.Policy; +import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.util.ConverterUtils; -import com.google.common.base.Joiner; - @Private @Evolving public class WebAppUtils { - private static final Joiner JOINER = Joiner.on(""); - public static void setRMWebAppPort(Configuration conf, int port) { String hostname = getRMWebAppURLWithoutScheme(conf); hostname = @@ -51,7 +47,7 @@ public static void setRMWebAppPort(Configuration conf, int port) { public static void setRMWebAppHostnameAndPort(Configuration conf, String hostname, int port) { String resolvedAddress = hostname + ":" + port; - if (HttpConfig.isSecure()) { + if (YarnConfiguration.useHttps(conf)) { conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, resolvedAddress); } else { conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, resolvedAddress); @@ -60,7 +56,7 @@ public static void setRMWebAppHostnameAndPort(Configuration conf, public static void setNMWebAppHostNameAndPort(Configuration conf, String hostName, int port) { - if (HttpConfig.isSecure()) { + if (YarnConfiguration.useHttps(conf)) { conf.set(YarnConfiguration.NM_WEBAPP_HTTPS_ADDRESS, hostName + ":" + port); } else { @@ -70,16 +66,11 @@ public static void setNMWebAppHostNameAndPort(Configuration conf, } public static String getRMWebAppURLWithScheme(Configuration conf) { - return JOINER.join(HttpConfig.getSchemePrefix(), - HttpConfig.isSecure() ? conf.get( - YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, - YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_ADDRESS) : conf.get( - YarnConfiguration.RM_WEBAPP_ADDRESS, - YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS)); + return getHttpSchemePrefix(conf) + getRMWebAppURLWithoutScheme(conf); } public static String getRMWebAppURLWithoutScheme(Configuration conf) { - if (HttpConfig.isSecure()) { + if (YarnConfiguration.useHttps(conf)) { return conf.get(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_ADDRESS); }else { @@ -97,13 +88,13 @@ public static String getProxyHostAndPort(Configuration conf) { } public static String getResolvedRMWebAppURLWithScheme(Configuration conf) { - return HttpConfig.getSchemePrefix() + return getHttpSchemePrefix(conf) + getResolvedRMWebAppURLWithoutScheme(conf); } public static String getResolvedRMWebAppURLWithoutScheme(Configuration conf) { return getResolvedRMWebAppURLWithoutScheme(conf, - HttpConfig.isSecure() ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY); + YarnConfiguration.useHttps(conf) ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY); } public static String getResolvedRMWebAppURLWithoutScheme(Configuration conf, @@ -140,7 +131,7 @@ public static String getResolvedRMWebAppURLWithoutScheme(Configuration conf, } public static String getNMWebAppURLWithoutScheme(Configuration conf) { - if (HttpConfig.isSecure()) { + if (YarnConfiguration.useHttps(conf)) { return conf.get(YarnConfiguration.NM_WEBAPP_HTTPS_ADDRESS, YarnConfiguration.DEFAULT_NM_WEBAPP_HTTPS_ADDRESS); } else { @@ -150,7 +141,7 @@ public static String getNMWebAppURLWithoutScheme(Configuration conf) { } public static String getAHSWebAppURLWithoutScheme(Configuration conf) { - if (HttpConfig.isSecure()) { + if (YarnConfiguration.useHttps(conf)) { return conf.get(YarnConfiguration.AHS_WEBAPP_HTTPS_ADDRESS, YarnConfiguration.DEFAULT_AHS_WEBAPP_HTTPS_ADDRESS); } else { @@ -177,8 +168,38 @@ public static String getURLWithScheme(String schemePrefix, String url) { public static String getLogUrl(String nodeHttpAddress, String allocatedNode, ContainerId containerId, String user) { - return join(HttpConfig.getSchemePrefix(), nodeHttpAddress, "/logs", "/", + return join("//", nodeHttpAddress, "/logs", "/", allocatedNode, "/", ConverterUtils.toString(containerId), "/", ConverterUtils.toString(containerId), "/", user); } + + /** + * Choose which scheme (HTTP or HTTPS) to use when generating a URL based on + * the configuration. + * + * @return the schmeme (HTTP / HTTPS) + */ + public static String getHttpSchemePrefix(Configuration conf) { + return YarnConfiguration.useHttps(conf) ? "https://" : "http://"; + } + + /** + * Load the SSL keystore / truststore into the HttpServer builder. + */ + public static HttpServer2.Builder loadSslConfiguration( + HttpServer2.Builder builder) { + Configuration sslConf = new Configuration(false); + boolean needsClientAuth = YarnConfiguration.YARN_SSL_CLIENT_HTTPS_NEED_AUTH_DEFAULT; + sslConf.addResource(YarnConfiguration.YARN_SSL_SERVER_RESOURCE_DEFAULT); + + return builder + .needsClientAuth(needsClientAuth) + .keyPassword(sslConf.get("ssl.server.keystore.keypassword")) + .keyStore(sslConf.get("ssl.server.keystore.location"), + sslConf.get("ssl.server.keystore.password"), + sslConf.get("ssl.server.keystore.type", "jks")) + .trustStore(sslConf.get("ssl.server.truststore.location"), + sslConf.get("ssl.server.truststore.password"), + sslConf.get("ssl.server.truststore.type", "jks")); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestHAUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestHAUtil.java index 891b434262f..6ced5f26326 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestHAUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestHAUtil.java @@ -54,7 +54,7 @@ public void setUp() { conf.set(YarnConfiguration.RM_HA_IDS, RM_NODE_IDS_UNTRIMMED); conf.set(YarnConfiguration.RM_HA_ID, RM1_NODE_ID_UNTRIMMED); - for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) { + for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) { // configuration key itself cannot contains space/tab/return chars. conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS_UNTRIMMED); conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS); @@ -95,7 +95,7 @@ public void testVerifyAndSetConfiguration() throws Exception { StringUtils.getStringCollection(RM_NODE_IDS), HAUtil.getRMHAIds(conf)); assertEquals("Should be saved as Trimmed string", RM1_NODE_ID, HAUtil.getRMHAId(conf)); - for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) { + for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) { assertEquals("RPC address not set for " + confKey, RM1_ADDRESS, conf.get(confKey)); } @@ -117,7 +117,7 @@ public void testVerifyAndSetConfiguration() throws Exception { // simulate the case YarnConfiguration.RM_HA_ID is not set conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID); - for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) { + for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) { conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS); conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS); } @@ -134,7 +134,7 @@ public void testVerifyAndSetConfiguration() throws Exception { conf.set(YarnConfiguration.RM_HA_ID, RM_INVALID_NODE_ID); conf.set(YarnConfiguration.RM_HA_IDS, RM_INVALID_NODE_ID + "," + RM1_NODE_ID); - for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) { + for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) { // simulate xml with invalid node id conf.set(confKey + RM_INVALID_NODE_ID, RM_INVALID_NODE_ID); } @@ -169,7 +169,7 @@ public void testVerifyAndSetConfiguration() throws Exception { conf.clear(); conf.set(YarnConfiguration.RM_HA_IDS, RM2_NODE_ID + "," + RM3_NODE_ID); conf.set(YarnConfiguration.RM_HA_ID, RM1_NODE_ID_UNTRIMMED); - for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) { + for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) { conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS_UNTRIMMED); conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS); conf.set(HAUtil.addSuffix(confKey, RM3_NODE_ID), RM3_ADDRESS); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java index 9a15fe78369..09bb95816ff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java @@ -163,7 +163,7 @@ protected void render(Block html) { .append(startTime) .append("\",\"") .append( nodeLink == null ? "N/A" : StringEscapeUtils diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index a169c125a38..d9a540815b2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -28,8 +28,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.http.HttpConfig; -import org.apache.hadoop.http.HttpConfig.Policy; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.service.CompositeService; @@ -397,15 +395,8 @@ public static void main(String[] args) { StringUtils.startupShutdownMessage(NodeManager.class, args, LOG); NodeManager nodeManager = new NodeManager(); Configuration conf = new YarnConfiguration(); - setHttpPolicy(conf); nodeManager.initAndStartNodeManager(conf, false); } - - private static void setHttpPolicy(Configuration conf) { - HttpConfig.setPolicy(Policy.fromString(conf.get( - YarnConfiguration.YARN_HTTP_POLICY_KEY, - YarnConfiguration.YARN_HTTP_POLICY_DEFAULT))); - } @VisibleForTesting @Private diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java index 424da04d2be..c198ae6b0fe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java @@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.server.nodemanager.webapp; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 5ef58a74d8e..3aa11c55296 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -1015,7 +1015,6 @@ public static void main(String argv[]) { ShutdownHookManager.get().addShutdownHook( new CompositeServiceShutdownHook(resourceManager), SHUTDOWN_HOOK_PRIORITY); - setHttpPolicy(conf); resourceManager.init(conf); resourceManager.start(); } catch (Throwable t) { @@ -1023,12 +1022,6 @@ public static void main(String argv[]) { System.exit(-1); } } - - private static void setHttpPolicy(Configuration conf) { - HttpConfig.setPolicy(Policy.fromString(conf.get( - YarnConfiguration.YARN_HTTP_POLICY_KEY, - YarnConfiguration.YARN_HTTP_POLICY_DEFAULT))); - } /** * Register the handlers for alwaysOn services diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 4ca8c28243a..88c9ba5591c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -503,10 +503,11 @@ private String generateProxyUriWithScheme( final String trackingUriWithoutScheme) { this.readLock.lock(); try { + final String scheme = WebAppUtils.getHttpSchemePrefix(conf); URI trackingUri = StringUtils.isEmpty(trackingUriWithoutScheme) ? null : - ProxyUriUtils.getUriFromAMUrl(trackingUriWithoutScheme); + ProxyUriUtils.getUriFromAMUrl(scheme, trackingUriWithoutScheme); String proxy = WebAppUtils.getProxyHostAndPort(conf); - URI proxyUri = ProxyUriUtils.getUriFromAMUrl(proxy); + URI proxyUri = ProxyUriUtils.getUriFromAMUrl(scheme, proxy); URI result = ProxyUriUtils.getProxyUri(trackingUri, proxyUri, applicationAttemptId.getApplicationId()); return result.toASCIIString(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index e057e740fb8..e23de7b3e90 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -989,7 +989,13 @@ private synchronized void nodeUpdate(RMNode nm) { private void continuousScheduling() { while (true) { List nodeIdList = new ArrayList(nodes.keySet()); - Collections.sort(nodeIdList, nodeAvailableResourceComparator); + // Sort the nodes by space available on them, so that we offer + // containers on emptier nodes first, facilitating an even spread. This + // requires holding the scheduler lock, so that the space available on a + // node doesn't change during the sort. + synchronized (this) { + Collections.sort(nodeIdList, nodeAvailableResourceComparator); + } // iterate all nodes for (NodeId nodeId : nodeIdList) { @@ -1366,24 +1372,26 @@ public synchronized String moveApplication(ApplicationId appId, throw new YarnException("App to be moved " + appId + " not found."); } FSSchedulerApp attempt = (FSSchedulerApp) app.getCurrentAppAttempt(); - - FSLeafQueue oldQueue = (FSLeafQueue) app.getQueue(); - FSLeafQueue targetQueue = queueMgr.getLeafQueue(queueName, false); - if (targetQueue == null) { - throw new YarnException("Target queue " + queueName - + " not found or is not a leaf queue."); + // To serialize with FairScheduler#allocate, synchronize on app attempt + synchronized (attempt) { + FSLeafQueue oldQueue = (FSLeafQueue) app.getQueue(); + FSLeafQueue targetQueue = queueMgr.getLeafQueue(queueName, false); + if (targetQueue == null) { + throw new YarnException("Target queue " + queueName + + " not found or is not a leaf queue."); + } + if (targetQueue == oldQueue) { + return oldQueue.getQueueName(); + } + + if (oldQueue.getRunnableAppSchedulables().contains( + attempt.getAppSchedulable())) { + verifyMoveDoesNotViolateConstraints(attempt, oldQueue, targetQueue); + } + + executeMove(app, attempt, oldQueue, targetQueue); + return targetQueue.getQueueName(); } - if (targetQueue == oldQueue) { - return oldQueue.getQueueName(); - } - - if (oldQueue.getRunnableAppSchedulables().contains( - attempt.getAppSchedulable())) { - verifyMoveDoesNotViolateConstraints(attempt, oldQueue, targetQueue); - } - - executeMove(app, attempt, oldQueue, targetQueue); - return targetQueue.getQueueName(); } private void verifyMoveDoesNotViolateConstraints(FSSchedulerApp app, @@ -1420,8 +1428,8 @@ private void verifyMoveDoesNotViolateConstraints(FSSchedulerApp app, } /** - * Helper for moveApplication, which is synchronized, so all operations will - * be atomic. + * Helper for moveApplication, which has appropriate synchronization, so all + * operations will be atomic. */ private void executeMove(SchedulerApplication app, FSSchedulerApp attempt, FSLeafQueue oldQueue, FSLeafQueue newQueue) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java index 445a5a2ca99..ac8578ec65c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java @@ -27,7 +27,7 @@ import java.util.Collection; -import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; @@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; @@ -55,13 +56,16 @@ public class AppBlock extends HtmlBlock { private ApplicationACLsManager aclsManager; private QueueACLsManager queueACLsManager; + private final Configuration conf; @Inject AppBlock(ResourceManager rm, ViewContext ctx, - ApplicationACLsManager aclsManager, QueueACLsManager queueACLsManager) { + ApplicationACLsManager aclsManager, QueueACLsManager queueACLsManager, + Configuration conf) { super(ctx); this.aclsManager = aclsManager; this.queueACLsManager = queueACLsManager; + this.conf = conf; } @Override @@ -86,7 +90,7 @@ protected void render(Block html) { puts("Application not found: "+ aid); return; } - AppInfo app = new AppInfo(rmApp, true); + AppInfo app = new AppInfo(rmApp, true, WebAppUtils.getHttpSchemePrefix(conf)); // Check for the authorization. String remoteUser = request().getRemoteUser(); @@ -146,7 +150,7 @@ protected void render(Block html) { table.tr((odd = !odd) ? _ODD : _EVEN). td(String.valueOf(attemptInfo.getAttemptId())). td(Times.format(attemptInfo.getStartTime())). - td().a(".nodelink", url(HttpConfig.getSchemePrefix(), + td().a(".nodelink", url("//", attemptInfo.getNodeHttpAddress()), attemptInfo.getNodeHttpAddress())._(). td().a(".logslink", url(attemptInfo.getLogsLink()), "logs")._(). diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java index cec95ac2288..4f644d1a39d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java @@ -28,6 +28,7 @@ import java.util.concurrent.ConcurrentMap; import org.apache.commons.lang.StringEscapeUtils; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; @@ -36,16 +37,19 @@ import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; class AppsBlock extends HtmlBlock { final ConcurrentMap apps; + private final Configuration conf; -@Inject AppsBlock(RMContext rmContext, ViewContext ctx) { +@Inject AppsBlock(RMContext rmContext, ViewContext ctx, Configuration conf) { super(ctx); apps = rmContext.getRMApps(); + this.conf = conf; } @Override public void render(Block html) { @@ -79,7 +83,7 @@ class AppsBlock extends HtmlBlock { if (reqAppStates != null && !reqAppStates.contains(app.createApplicationState())) { continue; } - AppInfo appInfo = new AppInfo(app, true); + AppInfo appInfo = new AppInfo(app, true, WebAppUtils.getHttpSchemePrefix(conf)); String percent = String.format("%.1f", appInfo.getProgress()); //AppID numerical value parsed by parseHadoopID in yarn.dt.plugins.js appsTableData.append("[\" trackingUriPlugins; private final String rmAppPageUrlBase; + private final transient YarnConfiguration conf; private static class _ implements Hamlet._ { //Empty @@ -90,7 +91,7 @@ public HTML html() { public WebAppProxyServlet() { super(); - YarnConfiguration conf = new YarnConfiguration(); + conf = new YarnConfiguration(); this.trackingUriPlugins = conf.getInstances(YarnConfiguration.YARN_TRACKING_URL_GENERATOR, TrackingUriPlugin.class); @@ -300,7 +301,8 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) return; } else { if (ProxyUriUtils.getSchemeFromUrl(original).isEmpty()) { - trackingUri = ProxyUriUtils.getUriFromAMUrl("http", original); + trackingUri = ProxyUriUtils.getUriFromAMUrl( + WebAppUtils.getHttpSchemePrefix(conf), original); } else { trackingUri = new URI(original); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java index 7f81f9ba785..e35ed8410c8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java @@ -24,7 +24,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.FilterContainer; import org.apache.hadoop.http.FilterInitializer; -import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; @@ -39,7 +38,7 @@ public void initFilter(FilterContainer container, Configuration conf) { String[] parts = proxy.split(":"); params.put(AmIpFilter.PROXY_HOST, parts[0]); params.put(AmIpFilter.PROXY_URI_BASE, - HttpConfig.getSchemePrefix() + proxy + + WebAppUtils.getHttpSchemePrefix(conf) + proxy + System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV)); container.addFilter(FILTER_NAME, FILTER_CLASS, params); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java index f39ab3ecd10..c53d098f4b7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java @@ -288,8 +288,9 @@ public void start() { YarnConfiguration.DEFAULT_YARN_ADMIN_ACL)); proxyServer = new HttpServer2.Builder() .setName("proxy") - .addEndpoint(URI.create("http://" + bindAddress + ":0")) - .setFindPort(true) + .addEndpoint( + URI.create(WebAppUtils.getHttpSchemePrefix(conf) + bindAddress + + ":0")).setFindPort(true) .setConf(conf) .setACL(acl) .build();