Merge r1555021 through r1569522 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1569524 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
99b4caa888
|
@ -118,6 +118,9 @@ Trunk (Unreleased)
|
||||||
|
|
||||||
HADOOP-10325. Improve jenkins javadoc warnings from test-patch.sh (cmccabe)
|
HADOOP-10325. Improve jenkins javadoc warnings from test-patch.sh (cmccabe)
|
||||||
|
|
||||||
|
HADOOP-10342. Add a new method to UGI to use a Kerberos login subject to
|
||||||
|
build a new UGI. (Larry McCay via omalley)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
HADOOP-9451. Fault single-layer config if node group topology is enabled.
|
HADOOP-9451. Fault single-layer config if node group topology is enabled.
|
||||||
|
@ -298,6 +301,18 @@ Trunk (Unreleased)
|
||||||
|
|
||||||
HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
|
HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
|
||||||
|
|
||||||
|
Release 2.5.0 - UNRELEASED
|
||||||
|
|
||||||
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
|
NEW FEATURES
|
||||||
|
|
||||||
|
IMPROVEMENTS
|
||||||
|
|
||||||
|
OPTIMIZATIONS
|
||||||
|
|
||||||
|
BUG FIXES
|
||||||
|
|
||||||
Release 2.4.0 - UNRELEASED
|
Release 2.4.0 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -342,6 +357,8 @@ Release 2.4.0 - UNRELEASED
|
||||||
HADOOP-10249. LdapGroupsMapping should trim ldap password read from file.
|
HADOOP-10249. LdapGroupsMapping should trim ldap password read from file.
|
||||||
(Dilli Armugam via suresh)
|
(Dilli Armugam via suresh)
|
||||||
|
|
||||||
|
HADOOP-10346. Deadlock while logging tokens (jlowe)
|
||||||
|
|
||||||
Release 2.3.1 - UNRELEASED
|
Release 2.3.1 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -279,7 +279,10 @@ public class CommonConfigurationKeysPublic {
|
||||||
60;
|
60;
|
||||||
|
|
||||||
// HTTP policies to be used in configuration
|
// HTTP policies to be used in configuration
|
||||||
|
// Use HttpPolicy.name() instead
|
||||||
|
@Deprecated
|
||||||
public static final String HTTP_POLICY_HTTP_ONLY = "HTTP_ONLY";
|
public static final String HTTP_POLICY_HTTP_ONLY = "HTTP_ONLY";
|
||||||
|
@Deprecated
|
||||||
public static final String HTTP_POLICY_HTTPS_ONLY = "HTTPS_ONLY";
|
public static final String HTTP_POLICY_HTTPS_ONLY = "HTTPS_ONLY";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
public class HttpConfig {
|
public class HttpConfig {
|
||||||
private static Policy policy;
|
|
||||||
public enum Policy {
|
public enum Policy {
|
||||||
HTTP_ONLY,
|
HTTP_ONLY,
|
||||||
HTTPS_ONLY,
|
HTTPS_ONLY,
|
||||||
|
@ -52,28 +51,4 @@ public class HttpConfig {
|
||||||
return this == HTTPS_ONLY || this == HTTP_AND_HTTPS;
|
return this == HTTPS_ONLY || this == HTTP_AND_HTTPS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static {
|
|
||||||
Configuration conf = new Configuration();
|
|
||||||
boolean sslEnabled = conf.getBoolean(
|
|
||||||
CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY,
|
|
||||||
CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_DEFAULT);
|
|
||||||
policy = sslEnabled ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static void setPolicy(Policy policy) {
|
|
||||||
HttpConfig.policy = policy;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static boolean isSecure() {
|
|
||||||
return policy == Policy.HTTPS_ONLY;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static String getSchemePrefix() {
|
|
||||||
return (isSecure()) ? "https://" : "http://";
|
|
||||||
}
|
|
||||||
|
|
||||||
public static String getScheme(Policy policy) {
|
|
||||||
return policy == Policy.HTTPS_ONLY ? "https://" : "http://";
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -649,7 +649,7 @@ public class Client {
|
||||||
// try re-login
|
// try re-login
|
||||||
if (UserGroupInformation.isLoginKeytabBased()) {
|
if (UserGroupInformation.isLoginKeytabBased()) {
|
||||||
UserGroupInformation.getLoginUser().reloginFromKeytab();
|
UserGroupInformation.getLoginUser().reloginFromKeytab();
|
||||||
} else {
|
} else if (UserGroupInformation.isLoginTicketBased()) {
|
||||||
UserGroupInformation.getLoginUser().reloginFromTicketCache();
|
UserGroupInformation.getLoginUser().reloginFromTicketCache();
|
||||||
}
|
}
|
||||||
// have granularity of milliseconds
|
// have granularity of milliseconds
|
||||||
|
|
|
@ -702,6 +702,35 @@ public class UserGroupInformation {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a UserGroupInformation from a Subject with Kerberos principal.
|
||||||
|
*
|
||||||
|
* @param user The KerberosPrincipal to use in UGI
|
||||||
|
*
|
||||||
|
* @throws IOException if the kerberos login fails
|
||||||
|
*/
|
||||||
|
public static UserGroupInformation getUGIFromSubject(Subject subject)
|
||||||
|
throws IOException {
|
||||||
|
if (subject == null) {
|
||||||
|
throw new IOException("Subject must not be null");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (subject.getPrincipals(KerberosPrincipal.class).isEmpty()) {
|
||||||
|
throw new IOException("Provided Subject must contain a KerberosPrincipal");
|
||||||
|
}
|
||||||
|
|
||||||
|
KerberosPrincipal principal =
|
||||||
|
subject.getPrincipals(KerberosPrincipal.class).iterator().next();
|
||||||
|
|
||||||
|
User ugiUser = new User(principal.getName(),
|
||||||
|
AuthenticationMethod.KERBEROS, null);
|
||||||
|
subject.getPrincipals().add(ugiUser);
|
||||||
|
UserGroupInformation ugi = new UserGroupInformation(subject);
|
||||||
|
ugi.setLogin(null);
|
||||||
|
ugi.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
|
||||||
|
return ugi;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the currently logged in user.
|
* Get the currently logged in user.
|
||||||
* @return the logged in user
|
* @return the logged in user
|
||||||
|
@ -1101,6 +1130,14 @@ public class UserGroupInformation {
|
||||||
return getLoginUser().isKeytab;
|
return getLoginUser().isKeytab;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Did the login happen via ticket cache
|
||||||
|
* @return true or false
|
||||||
|
*/
|
||||||
|
public static boolean isLoginTicketBased() throws IOException {
|
||||||
|
return getLoginUser().isKrbTkt;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a user from a login name. It is intended to be used for remote
|
* Create a user from a login name. It is intended to be used for remote
|
||||||
* users in RPC, since it won't have any credentials.
|
* users in RPC, since it won't have any credentials.
|
||||||
|
@ -1619,5 +1656,4 @@ public class UserGroupInformation {
|
||||||
System.out.println("Keytab " + loginUser.isKeytab);
|
System.out.println("Keytab " + loginUser.isKeytab);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -105,18 +105,21 @@ public class Token<T extends TokenIdentifier> implements Writable {
|
||||||
return identifier;
|
return identifier;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static synchronized Class<? extends TokenIdentifier>
|
private static Class<? extends TokenIdentifier>
|
||||||
getClassForIdentifier(Text kind) {
|
getClassForIdentifier(Text kind) {
|
||||||
if (tokenKindMap == null) {
|
Class<? extends TokenIdentifier> cls = null;
|
||||||
tokenKindMap = Maps.newHashMap();
|
synchronized (Token.class) {
|
||||||
for (TokenIdentifier id : ServiceLoader.load(TokenIdentifier.class)) {
|
if (tokenKindMap == null) {
|
||||||
tokenKindMap.put(id.getKind(), id.getClass());
|
tokenKindMap = Maps.newHashMap();
|
||||||
|
for (TokenIdentifier id : ServiceLoader.load(TokenIdentifier.class)) {
|
||||||
|
tokenKindMap.put(id.getKind(), id.getClass());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
cls = tokenKindMap.get(kind);
|
||||||
}
|
}
|
||||||
Class<? extends TokenIdentifier> cls = tokenKindMap.get(kind);
|
|
||||||
if (cls == null) {
|
if (cls == null) {
|
||||||
LOG.warn("Cannot find class for token kind " + kind);
|
LOG.warn("Cannot find class for token kind " + kind);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
return cls;
|
return cls;
|
||||||
}
|
}
|
||||||
|
|
|
@ -90,10 +90,6 @@ public abstract class FSMainOperationsBaseTest extends FileSystemTestHelper {
|
||||||
public FSMainOperationsBaseTest() {
|
public FSMainOperationsBaseTest() {
|
||||||
}
|
}
|
||||||
|
|
||||||
public FSMainOperationsBaseTest(String testRootDir) {
|
|
||||||
super(testRootDir);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
fSys = createFileSystem();
|
fSys = createFileSystem();
|
||||||
|
|
|
@ -49,7 +49,7 @@ public final class FileContextTestHelper {
|
||||||
/**
|
/**
|
||||||
* Create a context with the given test root
|
* Create a context with the given test root
|
||||||
*/
|
*/
|
||||||
public FileContextTestHelper(String testRootDir) {
|
private FileContextTestHelper(String testRootDir) {
|
||||||
this.testRootDir = testRootDir;
|
this.testRootDir = testRootDir;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,7 @@ public class FileSystemTestHelper {
|
||||||
/**
|
/**
|
||||||
* Create helper with the specified test root dir
|
* Create helper with the specified test root dir
|
||||||
*/
|
*/
|
||||||
public FileSystemTestHelper(String testRootDir) {
|
private FileSystemTestHelper(String testRootDir) {
|
||||||
this.testRootDir = testRootDir;
|
this.testRootDir = testRootDir;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,8 +17,14 @@
|
||||||
package org.apache.hadoop.security;
|
package org.apache.hadoop.security;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.security.PrivilegedAction;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import javax.security.auth.kerberos.KerberosPrincipal;
|
||||||
|
|
||||||
import junit.framework.Assert;
|
import junit.framework.Assert;
|
||||||
|
import static org.junit.Assert.*;
|
||||||
|
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||||
|
@ -72,4 +78,40 @@ public class TestUGIWithSecurityOn {
|
||||||
ex.printStackTrace();
|
ex.printStackTrace();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetUGIFromKerberosSubject() throws IOException {
|
||||||
|
String user1keyTabFilepath = System.getProperty("kdc.resource.dir")
|
||||||
|
+ "/keytabs/user1.keytab";
|
||||||
|
|
||||||
|
UserGroupInformation ugi = UserGroupInformation
|
||||||
|
.loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM",
|
||||||
|
user1keyTabFilepath);
|
||||||
|
Set<KerberosPrincipal> principals = ugi.getSubject().getPrincipals(
|
||||||
|
KerberosPrincipal.class);
|
||||||
|
if (principals.isEmpty()) {
|
||||||
|
Assert.fail("There should be a kerberos principal in the subject.");
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
UserGroupInformation ugi2 = UserGroupInformation.getUGIFromSubject(
|
||||||
|
ugi.getSubject());
|
||||||
|
if (ugi2 != null) {
|
||||||
|
ugi2.doAs(new PrivilegedAction<Object>() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object run() {
|
||||||
|
try {
|
||||||
|
UserGroupInformation ugi3 = UserGroupInformation.getCurrentUser();
|
||||||
|
String doAsUserName = ugi3.getUserName();
|
||||||
|
assertEquals(doAsUserName, "user1@EXAMPLE.COM");
|
||||||
|
System.out.println("DO AS USERNAME: " + doAsUserName);
|
||||||
|
} catch (IOException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.util.Shell;
|
||||||
import org.junit.*;
|
import org.junit.*;
|
||||||
|
|
||||||
import javax.security.auth.Subject;
|
import javax.security.auth.Subject;
|
||||||
|
import javax.security.auth.kerberos.KerberosPrincipal;
|
||||||
import javax.security.auth.login.AppConfigurationEntry;
|
import javax.security.auth.login.AppConfigurationEntry;
|
||||||
import javax.security.auth.login.LoginContext;
|
import javax.security.auth.login.LoginContext;
|
||||||
import java.io.BufferedReader;
|
import java.io.BufferedReader;
|
||||||
|
@ -768,6 +769,16 @@ public class TestUserGroupInformation {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test (timeout = 30000)
|
||||||
|
public void testGetUGIFromSubject() throws Exception {
|
||||||
|
KerberosPrincipal p = new KerberosPrincipal("guest");
|
||||||
|
Subject subject = new Subject();
|
||||||
|
subject.getPrincipals().add(p);
|
||||||
|
UserGroupInformation ugi = UserGroupInformation.getUGIFromSubject(subject);
|
||||||
|
assertNotNull(ugi);
|
||||||
|
assertEquals("guest@DEFAULT.REALM", ugi.getUserName());
|
||||||
|
}
|
||||||
|
|
||||||
/** Test hasSufficientTimeElapsed method */
|
/** Test hasSufficientTimeElapsed method */
|
||||||
@Test (timeout = 30000)
|
@Test (timeout = 30000)
|
||||||
public void testHasSufficientTimeElapsed() throws Exception {
|
public void testHasSufficientTimeElapsed() throws Exception {
|
||||||
|
|
|
@ -259,59 +259,17 @@ Trunk (Unreleased)
|
||||||
HDFS-5794. Fix the inconsistency of layout version number of
|
HDFS-5794. Fix the inconsistency of layout version number of
|
||||||
ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
|
ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
|
Release 2.5.0 - UNRELEASED
|
||||||
|
|
||||||
HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
HDFS-5738. Serialize INode information in protobuf. (Haohui Mai via jing9)
|
NEW FEATURES
|
||||||
|
|
||||||
HDFS-5772. Serialize under-construction file information in FSImage. (jing9)
|
IMPROVEMENTS
|
||||||
|
|
||||||
HDFS-5783. Compute the digest before loading FSImage. (Haohui Mai via jing9)
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-5785. Serialize symlink in protobuf. (Haohui Mai via jing9)
|
BUG FIXES
|
||||||
|
|
||||||
HDFS-5793. Optimize the serialization of PermissionStatus. (Haohui Mai via
|
|
||||||
jing9)
|
|
||||||
|
|
||||||
HDFS-5743. Use protobuf to serialize snapshot information. (jing9)
|
|
||||||
|
|
||||||
HDFS-5774. Serialize CachePool directives in protobuf. (Haohui Mai via jing9)
|
|
||||||
|
|
||||||
HDFS-5744. Serialize information for token managers in protobuf. (Haohui Mai
|
|
||||||
via jing9)
|
|
||||||
|
|
||||||
HDFS-5824. Add a Type field in Snapshot DiffEntry's protobuf definition.
|
|
||||||
(jing9)
|
|
||||||
|
|
||||||
HDFS-5808. Implement cancellation when saving FSImage. (Haohui Mai via jing9)
|
|
||||||
|
|
||||||
HDFS-5826. Update the stored edit logs to be consistent with the changes in
|
|
||||||
HDFS-5698 branch. (Haohui Mai via jing9)
|
|
||||||
|
|
||||||
HDFS-5797. Implement offline image viewer. (Haohui Mai via jing9)
|
|
||||||
|
|
||||||
HDFS-5771. Track progress when loading fsimage. (Haohui Mai via cnauroth)
|
|
||||||
|
|
||||||
HDFS-5871. Use PBHelper to serialize CacheDirectiveInfoExpirationProto.
|
|
||||||
(Haohui Mai via jing9)
|
|
||||||
|
|
||||||
HDFS-5884. LoadDelegator should use IOUtils.readFully() to read the magic
|
|
||||||
header. (Haohui Mai via jing9)
|
|
||||||
|
|
||||||
HDFS-5885. Add annotation for repeated fields in the protobuf definition.
|
|
||||||
(Haohui Mai via jing9)
|
|
||||||
|
|
||||||
HDFS-5906. Fixing findbugs and javadoc warnings in the HDFS-5698 branch.
|
|
||||||
(Haohui Mai via jing9)
|
|
||||||
|
|
||||||
HDFS-5911. The id of a CacheDirective instance does not get serialized in
|
|
||||||
the protobuf-fsimage. (Haohui Mai via jing9)
|
|
||||||
|
|
||||||
HDFS-5915. Refactor FSImageFormatProtobuf to simplify cross section reads.
|
|
||||||
(Haohui Mai via cnauroth)
|
|
||||||
|
|
||||||
HDFS-5847. Consolidate INodeReference into a separate section. (jing9)
|
|
||||||
|
|
||||||
Release 2.4.0 - UNRELEASED
|
Release 2.4.0 - UNRELEASED
|
||||||
|
|
||||||
|
@ -319,6 +277,9 @@ Release 2.4.0 - UNRELEASED
|
||||||
|
|
||||||
NEW FEATURES
|
NEW FEATURES
|
||||||
|
|
||||||
|
HDFS-5698. Use protobuf to serialize / deserialize FSImage. (See breakdown
|
||||||
|
of tasks below for features and contributors)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
||||||
HDFS-5781. Use an array to record the mapping between FSEditLogOpCode and
|
HDFS-5781. Use an array to record the mapping between FSEditLogOpCode and
|
||||||
|
@ -451,6 +412,90 @@ Release 2.4.0 - UNRELEASED
|
||||||
HDFS-5943. 'dfs.namenode.https-address' property is not loaded from
|
HDFS-5943. 'dfs.namenode.https-address' property is not loaded from
|
||||||
configuration in federation setup. (suresh)
|
configuration in federation setup. (suresh)
|
||||||
|
|
||||||
|
HDFS-3128. Unit tests should not use a test root in /tmp. (wang)
|
||||||
|
|
||||||
|
HDFS-5948. TestBackupNode flakes with port in use error. (Haohui Mai
|
||||||
|
via Arpit Agarwal)
|
||||||
|
|
||||||
|
HDFS-5949. New Namenode UI when trying to download a file, the browser
|
||||||
|
doesn't know the file name. (Haohui Mai via brandonli)
|
||||||
|
|
||||||
|
HDFS-5716. Allow WebHDFS to use pluggable authentication filter
|
||||||
|
(Haohui Mai via brandonli)
|
||||||
|
|
||||||
|
HDFS-5953. TestBlockReaderFactory fails in trunk. (Akira Ajisaka via wang)
|
||||||
|
|
||||||
|
HDFS-5759. Web UI does not show up during the period of loading FSImage.
|
||||||
|
(Haohui Mai via Arpit Agarwal)
|
||||||
|
|
||||||
|
HDFS-5942. Fix javadoc in OfflineImageViewer. (Akira Ajisaka via cnauroth)
|
||||||
|
|
||||||
|
HDFS-5780. TestRBWBlockInvalidation times out intemittently. (Mit Desai
|
||||||
|
via kihwal)
|
||||||
|
|
||||||
|
HDFS-5803. TestBalancer.testBalancer0 fails. (Chen He via kihwal)
|
||||||
|
|
||||||
|
HDFS-5893. HftpFileSystem.RangeHeaderUrlOpener uses the default
|
||||||
|
URLConnectionFactory which does not import SSL certificates. (Haohui Mai via
|
||||||
|
jing9)
|
||||||
|
|
||||||
|
BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
|
HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)
|
||||||
|
|
||||||
|
HDFS-5738. Serialize INode information in protobuf. (Haohui Mai via jing9)
|
||||||
|
|
||||||
|
HDFS-5772. Serialize under-construction file information in FSImage. (jing9)
|
||||||
|
|
||||||
|
HDFS-5783. Compute the digest before loading FSImage. (Haohui Mai via jing9)
|
||||||
|
|
||||||
|
HDFS-5785. Serialize symlink in protobuf. (Haohui Mai via jing9)
|
||||||
|
|
||||||
|
HDFS-5793. Optimize the serialization of PermissionStatus. (Haohui Mai via
|
||||||
|
jing9)
|
||||||
|
|
||||||
|
HDFS-5743. Use protobuf to serialize snapshot information. (jing9)
|
||||||
|
|
||||||
|
HDFS-5774. Serialize CachePool directives in protobuf. (Haohui Mai via jing9)
|
||||||
|
|
||||||
|
HDFS-5744. Serialize information for token managers in protobuf. (Haohui Mai
|
||||||
|
via jing9)
|
||||||
|
|
||||||
|
HDFS-5824. Add a Type field in Snapshot DiffEntry's protobuf definition.
|
||||||
|
(jing9)
|
||||||
|
|
||||||
|
HDFS-5808. Implement cancellation when saving FSImage. (Haohui Mai via jing9)
|
||||||
|
|
||||||
|
HDFS-5826. Update the stored edit logs to be consistent with the changes in
|
||||||
|
HDFS-5698 branch. (Haohui Mai via jing9)
|
||||||
|
|
||||||
|
HDFS-5797. Implement offline image viewer. (Haohui Mai via jing9)
|
||||||
|
|
||||||
|
HDFS-5771. Track progress when loading fsimage. (Haohui Mai via cnauroth)
|
||||||
|
|
||||||
|
HDFS-5871. Use PBHelper to serialize CacheDirectiveInfoExpirationProto.
|
||||||
|
(Haohui Mai via jing9)
|
||||||
|
|
||||||
|
HDFS-5884. LoadDelegator should use IOUtils.readFully() to read the magic
|
||||||
|
header. (Haohui Mai via jing9)
|
||||||
|
|
||||||
|
HDFS-5885. Add annotation for repeated fields in the protobuf definition.
|
||||||
|
(Haohui Mai via jing9)
|
||||||
|
|
||||||
|
HDFS-5906. Fixing findbugs and javadoc warnings in the HDFS-5698 branch.
|
||||||
|
(Haohui Mai via jing9)
|
||||||
|
|
||||||
|
HDFS-5911. The id of a CacheDirective instance does not get serialized in
|
||||||
|
the protobuf-fsimage. (Haohui Mai via jing9)
|
||||||
|
|
||||||
|
HDFS-5915. Refactor FSImageFormatProtobuf to simplify cross section reads.
|
||||||
|
(Haohui Mai via cnauroth)
|
||||||
|
|
||||||
|
HDFS-5847. Consolidate INodeReference into a separate section. (jing9)
|
||||||
|
|
||||||
|
HDFS-5959. Fix typo at section name in FSImageFormatProtobuf.java.
|
||||||
|
(Akira Ajisaka via suresh)
|
||||||
|
|
||||||
Release 2.3.1 - UNRELEASED
|
Release 2.3.1 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
|
||||||
|
import org.apache.hadoop.hdfs.web.AuthFilter;
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
import org.apache.hadoop.http.HttpConfig;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -173,6 +174,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
public static final int DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2;
|
public static final int DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2;
|
||||||
public static final String DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY = "dfs.namenode.replication.max-streams-hard-limit";
|
public static final String DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY = "dfs.namenode.replication.max-streams-hard-limit";
|
||||||
public static final int DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4;
|
public static final int DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4;
|
||||||
|
public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY = "dfs.web.authentication.filter";
|
||||||
|
public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT = AuthFilter.class.getName();
|
||||||
public static final String DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled";
|
public static final String DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled";
|
||||||
public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
|
public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
|
||||||
public static final String DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern";
|
public static final String DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern";
|
||||||
|
|
|
@ -234,7 +234,7 @@ public final class FSImageFormatProtobuf {
|
||||||
inodeLoader.loadINodeSection(in);
|
inodeLoader.loadINodeSection(in);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case INODE_REFRENCE:
|
case INODE_REFERENCE:
|
||||||
snapshotLoader.loadINodeReferenceSection(in);
|
snapshotLoader.loadINodeReferenceSection(in);
|
||||||
break;
|
break;
|
||||||
case INODE_DIR:
|
case INODE_DIR:
|
||||||
|
@ -553,7 +553,7 @@ public final class FSImageFormatProtobuf {
|
||||||
NS_INFO("NS_INFO"),
|
NS_INFO("NS_INFO"),
|
||||||
STRING_TABLE("STRING_TABLE"),
|
STRING_TABLE("STRING_TABLE"),
|
||||||
INODE("INODE"),
|
INODE("INODE"),
|
||||||
INODE_REFRENCE("INODE_REFRENCE"),
|
INODE_REFERENCE("INODE_REFERENCE"),
|
||||||
SNAPSHOT("SNAPSHOT"),
|
SNAPSHOT("SNAPSHOT"),
|
||||||
INODE_DIR("INODE_DIR"),
|
INODE_DIR("INODE_DIR"),
|
||||||
FILES_UNDERCONSTRUCTION("FILES_UNDERCONSTRUCTION"),
|
FILES_UNDERCONSTRUCTION("FILES_UNDERCONSTRUCTION"),
|
||||||
|
|
|
@ -27,7 +27,6 @@ import javax.servlet.http.HttpServletResponse;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
@ -61,18 +60,13 @@ public class FileDataServlet extends DfsServlet {
|
||||||
} else {
|
} else {
|
||||||
hostname = host.getIpAddr();
|
hostname = host.getIpAddr();
|
||||||
}
|
}
|
||||||
int port = host.getInfoPort();
|
|
||||||
if ("https".equals(scheme)) {
|
int port = "https".equals(scheme) ? host.getInfoSecurePort() : host
|
||||||
final Integer portObject = (Integer) getServletContext().getAttribute(
|
.getInfoPort();
|
||||||
DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY);
|
|
||||||
if (portObject != null) {
|
|
||||||
port = portObject;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
String dtParam = "";
|
String dtParam = "";
|
||||||
if (dt != null) {
|
if (dt != null) {
|
||||||
dtParam=JspHelper.getDelegationTokenUrlParam(dt);
|
dtParam = JspHelper.getDelegationTokenUrlParam(dt);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add namenode address to the url params
|
// Add namenode address to the url params
|
||||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
|
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
||||||
import org.apache.hadoop.hdfs.web.AuthFilter;
|
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||||
import org.apache.hadoop.hdfs.web.resources.Param;
|
import org.apache.hadoop.hdfs.web.resources.Param;
|
||||||
import org.apache.hadoop.hdfs.web.resources.UserParam;
|
import org.apache.hadoop.hdfs.web.resources.UserParam;
|
||||||
|
@ -70,21 +69,27 @@ public class NameNodeHttpServer {
|
||||||
private void initWebHdfs(Configuration conf) throws IOException {
|
private void initWebHdfs(Configuration conf) throws IOException {
|
||||||
if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
|
if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
|
||||||
// set user pattern based on configuration file
|
// set user pattern based on configuration file
|
||||||
UserParam.setUserPattern(conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
|
UserParam.setUserPattern(conf.get(
|
||||||
|
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
|
||||||
|
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
|
||||||
|
|
||||||
|
// add authentication filter for webhdfs
|
||||||
|
final String className = conf.get(
|
||||||
|
DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
|
||||||
|
DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
|
||||||
|
final String name = className;
|
||||||
|
|
||||||
// add SPNEGO authentication filter for webhdfs
|
|
||||||
final String name = "SPNEGO";
|
|
||||||
final String classname = AuthFilter.class.getName();
|
|
||||||
final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
|
final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
|
||||||
Map<String, String> params = getAuthFilterParams(conf);
|
Map<String, String> params = getAuthFilterParams(conf);
|
||||||
HttpServer2.defineFilter(httpServer.getWebAppContext(), name, classname, params,
|
HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
|
||||||
new String[]{pathSpec});
|
params, new String[] { pathSpec });
|
||||||
HttpServer2.LOG.info("Added filter '" + name + "' (class=" + classname + ")");
|
HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
|
||||||
|
+ ")");
|
||||||
|
|
||||||
// add webhdfs packages
|
// add webhdfs packages
|
||||||
httpServer.addJerseyResourcePackage(
|
httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
|
||||||
NamenodeWebHdfsMethods.class.getPackage().getName()
|
.getPackage().getName() + ";" + Param.class.getPackage().getName(),
|
||||||
+ ";" + Param.class.getPackage().getName(), pathSpec);
|
pathSpec);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -383,7 +383,7 @@ public class FSImageFormatPBSnapshot {
|
||||||
INodeReferenceSection.INodeReference.Builder rb = buildINodeReference(ref);
|
INodeReferenceSection.INodeReference.Builder rb = buildINodeReference(ref);
|
||||||
rb.build().writeDelimitedTo(out);
|
rb.build().writeDelimitedTo(out);
|
||||||
}
|
}
|
||||||
parent.commitSection(headers, SectionName.INODE_REFRENCE);
|
parent.commitSection(headers, SectionName.INODE_REFERENCE);
|
||||||
}
|
}
|
||||||
|
|
||||||
private INodeReferenceSection.INodeReference.Builder buildINodeReference(
|
private INodeReferenceSection.INodeReference.Builder buildINodeReference(
|
||||||
|
|
|
@ -51,28 +51,16 @@ import com.google.common.collect.Maps;
|
||||||
import com.google.common.io.LimitInputStream;
|
import com.google.common.io.LimitInputStream;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is the tool for analyzing file sizes in the namespace image. In order to
|
* LsrPBImage displays the blocks of the namespace in a format very similar
|
||||||
* run the tool one should define a range of integers <tt>[0, maxSize]</tt> by
|
* to the output of ls/lsr. Entries are marked as directories or not,
|
||||||
* specifying <tt>maxSize</tt> and a <tt>step</tt>. The range of integers is
|
* permissions listed, replication, username and groupname, along with size,
|
||||||
* divided into segments of size <tt>step</tt>:
|
* modification date and full path.
|
||||||
* <tt>[0, s<sub>1</sub>, ..., s<sub>n-1</sub>, maxSize]</tt>, and the visitor
|
|
||||||
* calculates how many files in the system fall into each segment
|
|
||||||
* <tt>[s<sub>i-1</sub>, s<sub>i</sub>)</tt>. Note that files larger than
|
|
||||||
* <tt>maxSize</tt> always fall into the very last segment.
|
|
||||||
*
|
*
|
||||||
* <h3>Input.</h3>
|
* Note: A significant difference between the output of the lsr command
|
||||||
* <ul>
|
* and this image visitor is that this class cannot sort the file entries;
|
||||||
* <li><tt>filename</tt> specifies the location of the image file;</li>
|
* they are listed in the order they are stored within the fsimage file.
|
||||||
* <li><tt>maxSize</tt> determines the range <tt>[0, maxSize]</tt> of files
|
* Therefore, the output of this class cannot be directly compared to the
|
||||||
* sizes considered by the visitor;</li>
|
* output of the lsr command.
|
||||||
* <li><tt>step</tt> the range is divided into segments of size step.</li>
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* <h3>Output.</h3> The output file is formatted as a tab separated two column
|
|
||||||
* table: Size and NumFiles. Where Size represents the start of the segment, and
|
|
||||||
* numFiles is the number of files form the image which size falls in this
|
|
||||||
* segment.
|
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
final class LsrPBImage {
|
final class LsrPBImage {
|
||||||
private final Configuration conf;
|
private final Configuration conf;
|
||||||
|
@ -127,7 +115,7 @@ final class LsrPBImage {
|
||||||
case INODE:
|
case INODE:
|
||||||
loadINodeSection(is);
|
loadINodeSection(is);
|
||||||
break;
|
break;
|
||||||
case INODE_REFRENCE:
|
case INODE_REFERENCE:
|
||||||
loadINodeReferenceSection(is);
|
loadINodeReferenceSection(is);
|
||||||
break;
|
break;
|
||||||
case INODE_DIR:
|
case INODE_DIR:
|
||||||
|
|
|
@ -55,28 +55,8 @@ import com.google.common.collect.Lists;
|
||||||
import com.google.common.io.LimitInputStream;
|
import com.google.common.io.LimitInputStream;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is the tool for analyzing file sizes in the namespace image. In order to
|
* PBImageXmlWriter walks over an fsimage structure and writes out
|
||||||
* run the tool one should define a range of integers <tt>[0, maxSize]</tt> by
|
* an equivalent XML document that contains the fsimage's components.
|
||||||
* specifying <tt>maxSize</tt> and a <tt>step</tt>. The range of integers is
|
|
||||||
* divided into segments of size <tt>step</tt>:
|
|
||||||
* <tt>[0, s<sub>1</sub>, ..., s<sub>n-1</sub>, maxSize]</tt>, and the visitor
|
|
||||||
* calculates how many files in the system fall into each segment
|
|
||||||
* <tt>[s<sub>i-1</sub>, s<sub>i</sub>)</tt>. Note that files larger than
|
|
||||||
* <tt>maxSize</tt> always fall into the very last segment.
|
|
||||||
*
|
|
||||||
* <h3>Input.</h3>
|
|
||||||
* <ul>
|
|
||||||
* <li><tt>filename</tt> specifies the location of the image file;</li>
|
|
||||||
* <li><tt>maxSize</tt> determines the range <tt>[0, maxSize]</tt> of files
|
|
||||||
* sizes considered by the visitor;</li>
|
|
||||||
* <li><tt>step</tt> the range is divided into segments of size step.</li>
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* <h3>Output.</h3> The output file is formatted as a tab separated two column
|
|
||||||
* table: Size and NumFiles. Where Size represents the start of the segment, and
|
|
||||||
* numFiles is the number of files form the image which size falls in this
|
|
||||||
* segment.
|
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public final class PBImageXmlWriter {
|
public final class PBImageXmlWriter {
|
||||||
|
@ -133,7 +113,7 @@ public final class PBImageXmlWriter {
|
||||||
case INODE:
|
case INODE:
|
||||||
dumpINodeSection(is);
|
dumpINodeSection(is);
|
||||||
break;
|
break;
|
||||||
case INODE_REFRENCE:
|
case INODE_REFERENCE:
|
||||||
dumpINodeReferenceSection(is);
|
dumpINodeReferenceSection(is);
|
||||||
break;
|
break;
|
||||||
case INODE_DIR:
|
case INODE_DIR:
|
||||||
|
|
|
@ -344,14 +344,15 @@ public class HftpFileSystem extends FileSystem
|
||||||
}
|
}
|
||||||
|
|
||||||
static class RangeHeaderUrlOpener extends ByteRangeInputStream.URLOpener {
|
static class RangeHeaderUrlOpener extends ByteRangeInputStream.URLOpener {
|
||||||
URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY;
|
private final URLConnectionFactory connFactory;
|
||||||
|
|
||||||
RangeHeaderUrlOpener(final URL url) {
|
RangeHeaderUrlOpener(URLConnectionFactory connFactory, final URL url) {
|
||||||
super(url);
|
super(url);
|
||||||
|
this.connFactory = connFactory;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected HttpURLConnection openConnection() throws IOException {
|
protected HttpURLConnection openConnection() throws IOException {
|
||||||
return (HttpURLConnection)connectionFactory.openConnection(url);
|
return (HttpURLConnection)connFactory.openConnection(url);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Use HTTP Range header for specifying offset. */
|
/** Use HTTP Range header for specifying offset. */
|
||||||
|
@ -381,8 +382,9 @@ public class HftpFileSystem extends FileSystem
|
||||||
super(o, r);
|
super(o, r);
|
||||||
}
|
}
|
||||||
|
|
||||||
RangeHeaderInputStream(final URL url) {
|
RangeHeaderInputStream(URLConnectionFactory connFactory, final URL url) {
|
||||||
this(new RangeHeaderUrlOpener(url), new RangeHeaderUrlOpener(null));
|
this(new RangeHeaderUrlOpener(connFactory, url),
|
||||||
|
new RangeHeaderUrlOpener(connFactory, null));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -397,7 +399,7 @@ public class HftpFileSystem extends FileSystem
|
||||||
String path = "/data" + ServletUtil.encodePath(f.toUri().getPath());
|
String path = "/data" + ServletUtil.encodePath(f.toUri().getPath());
|
||||||
String query = addDelegationTokenParam("ugi=" + getEncodedUgiParameter());
|
String query = addDelegationTokenParam("ugi=" + getEncodedUgiParameter());
|
||||||
URL u = getNamenodeURL(path, query);
|
URL u = getNamenodeURL(path, query);
|
||||||
return new FSDataInputStream(new RangeHeaderInputStream(u));
|
return new FSDataInputStream(new RangeHeaderInputStream(connectionFactory, u));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -151,7 +151,7 @@
|
||||||
{/fs}
|
{/fs}
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
<div class="page-header"><h1>Namenode Journal Status</h1></div>
|
<div class="page-header"><h1>NameNode Journal Status</h1></div>
|
||||||
<p><b>Current transaction ID:</b> {nn.JournalTransactionInfo.LastAppliedOrWrittenTxId}</p>
|
<p><b>Current transaction ID:</b> {nn.JournalTransactionInfo.LastAppliedOrWrittenTxId}</p>
|
||||||
<table class="table" title="NameNode Journals">
|
<table class="table" title="NameNode Journals">
|
||||||
<thead>
|
<thead>
|
||||||
|
|
|
@ -50,24 +50,23 @@
|
||||||
var data = {};
|
var data = {};
|
||||||
|
|
||||||
// Workarounds for the fact that JMXJsonServlet returns non-standard JSON strings
|
// Workarounds for the fact that JMXJsonServlet returns non-standard JSON strings
|
||||||
function data_workaround(d) {
|
function workaround(nn) {
|
||||||
d.nn.JournalTransactionInfo = JSON.parse(d.nn.JournalTransactionInfo);
|
nn.JournalTransactionInfo = JSON.parse(nn.JournalTransactionInfo);
|
||||||
d.nn.NameJournalStatus = JSON.parse(d.nn.NameJournalStatus);
|
nn.NameJournalStatus = JSON.parse(nn.NameJournalStatus);
|
||||||
d.nn.NameDirStatuses = JSON.parse(d.nn.NameDirStatuses);
|
nn.NameDirStatuses = JSON.parse(nn.NameDirStatuses);
|
||||||
d.nn.NodeUsage = JSON.parse(d.nn.NodeUsage);
|
nn.NodeUsage = JSON.parse(nn.NodeUsage);
|
||||||
d.nn.CorruptFiles = JSON.parse(d.nn.CorruptFiles);
|
nn.CorruptFiles = JSON.parse(nn.CorruptFiles);
|
||||||
return d;
|
return nn;
|
||||||
}
|
}
|
||||||
|
|
||||||
load_json(
|
load_json(
|
||||||
BEANS,
|
BEANS,
|
||||||
function(d) {
|
guard_with_startup_progress(function(d) {
|
||||||
for (var k in d) {
|
for (var k in d) {
|
||||||
data[k] = d[k].beans[0];
|
data[k] = k === 'nn' ? workaround(d[k].beans[0]) : d[k].beans[0];
|
||||||
}
|
}
|
||||||
data = data_workaround(data);
|
|
||||||
render();
|
render();
|
||||||
},
|
}),
|
||||||
function (url, jqxhr, text, err) {
|
function (url, jqxhr, text, err) {
|
||||||
show_err_msg('<p>Failed to retrieve data from ' + url + ', cause: ' + err + '</p>');
|
show_err_msg('<p>Failed to retrieve data from ' + url + ', cause: ' + err + '</p>');
|
||||||
});
|
});
|
||||||
|
@ -92,6 +91,19 @@
|
||||||
show_err_msg('<p>Failed to retrieve data from ' + url + ', cause: ' + err + '</p>');
|
show_err_msg('<p>Failed to retrieve data from ' + url + ', cause: ' + err + '</p>');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function guard_with_startup_progress(fn) {
|
||||||
|
return function() {
|
||||||
|
try {
|
||||||
|
fn.apply(this, arguments);
|
||||||
|
} catch (err) {
|
||||||
|
if (err instanceof TypeError) {
|
||||||
|
show_err_msg('NameNode is still loading. Redirecting to the Startup Progress page.');
|
||||||
|
load_startup_progress();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
function load_startup_progress() {
|
function load_startup_progress() {
|
||||||
function workaround(r) {
|
function workaround(r) {
|
||||||
function rename_property(o, s, d) {
|
function rename_property(o, s, d) {
|
||||||
|
@ -143,25 +155,29 @@
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
$.get('/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo', function (resp) {
|
$.get(
|
||||||
var data = workaround(resp.beans[0]);
|
'/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo',
|
||||||
dust.render('datanode-info', data, function(err, out) {
|
guard_with_startup_progress(function (resp) {
|
||||||
$('#tab-datanode').html(out);
|
var data = workaround(resp.beans[0]);
|
||||||
$('#ui-tabs a[href="#tab-datanode"]').tab('show');
|
dust.render('datanode-info', data, function(err, out) {
|
||||||
});
|
$('#tab-datanode').html(out);
|
||||||
}).error(ajax_error_handler);
|
$('#ui-tabs a[href="#tab-datanode"]').tab('show');
|
||||||
|
});
|
||||||
|
})).error(ajax_error_handler);
|
||||||
}
|
}
|
||||||
|
|
||||||
$('a[href="#tab-datanode"]').click(load_datanode_info);
|
$('a[href="#tab-datanode"]').click(load_datanode_info);
|
||||||
|
|
||||||
function load_snapshot_info() {
|
function load_snapshot_info() {
|
||||||
$.get('/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState', function (resp) {
|
$.get(
|
||||||
var data = JSON.parse(resp.beans[0].SnapshotStats);
|
'/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState',
|
||||||
dust.render('snapshot-info', data, function(err, out) {
|
guard_with_startup_progress(function (resp) {
|
||||||
$('#tab-snapshot').html(out);
|
var data = JSON.parse(resp.beans[0].SnapshotStats);
|
||||||
$('#ui-tabs a[href="#tab-snapshot"]').tab('show');
|
dust.render('snapshot-info', data, function(err, out) {
|
||||||
});
|
$('#tab-snapshot').html(out);
|
||||||
}).error(ajax_error_handler);
|
$('#ui-tabs a[href="#tab-snapshot"]').tab('show');
|
||||||
|
});
|
||||||
|
})).error(ajax_error_handler);
|
||||||
}
|
}
|
||||||
|
|
||||||
$('#ui-tabs a[href="#tab-snapshot"]').click(load_snapshot_info);
|
$('#ui-tabs a[href="#tab-snapshot"]').click(load_snapshot_info);
|
||||||
|
|
|
@ -124,7 +124,7 @@
|
||||||
$('#file-info-tail').hide();
|
$('#file-info-tail').hide();
|
||||||
$('#file-info-title').text("File information - " + path);
|
$('#file-info-title').text("File information - " + path);
|
||||||
|
|
||||||
var download_url = '/webhdfs/v1' + abs_path + '/?op=OPEN';
|
var download_url = '/webhdfs/v1' + abs_path + '?op=OPEN';
|
||||||
|
|
||||||
$('#file-info-download').attr('href', download_url);
|
$('#file-info-download').attr('href', download_url);
|
||||||
$('#file-info-preview').click(function() {
|
$('#file-info-preview').click(function() {
|
||||||
|
|
|
@ -40,7 +40,7 @@ public class TestFcHdfsCreateMkdir extends
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected FileContextTestHelper createFileContextHelper() {
|
protected FileContextTestHelper createFileContextHelper() {
|
||||||
return new FileContextTestHelper("/tmp/TestFcHdfsCreateMkdir");
|
return new FileContextTestHelper();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ import org.junit.BeforeClass;
|
||||||
public class TestFcHdfsPermission extends FileContextPermissionBase {
|
public class TestFcHdfsPermission extends FileContextPermissionBase {
|
||||||
|
|
||||||
private static final FileContextTestHelper fileContextTestHelper =
|
private static final FileContextTestHelper fileContextTestHelper =
|
||||||
new FileContextTestHelper("/tmp/TestFcHdfsPermission");
|
new FileContextTestHelper();
|
||||||
private static FileContext fc;
|
private static FileContext fc;
|
||||||
|
|
||||||
private static MiniDFSCluster cluster;
|
private static MiniDFSCluster cluster;
|
||||||
|
|
|
@ -43,7 +43,7 @@ import org.junit.Test;
|
||||||
public class TestFcHdfsSetUMask {
|
public class TestFcHdfsSetUMask {
|
||||||
|
|
||||||
private static FileContextTestHelper fileContextTestHelper =
|
private static FileContextTestHelper fileContextTestHelper =
|
||||||
new FileContextTestHelper("/tmp/TestFcHdfsSetUMask");
|
new FileContextTestHelper();
|
||||||
private static MiniDFSCluster cluster;
|
private static MiniDFSCluster cluster;
|
||||||
private static Path defaultWorkingDirectory;
|
private static Path defaultWorkingDirectory;
|
||||||
private static FileContext fc;
|
private static FileContext fc;
|
||||||
|
|
|
@ -49,7 +49,7 @@ public class TestHDFSFileContextMainOperations extends
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected FileContextTestHelper createFileContextHelper() {
|
protected FileContextTestHelper createFileContextHelper() {
|
||||||
return new FileContextTestHelper("/tmp/TestHDFSFileContextMainOperations");
|
return new FileContextTestHelper();
|
||||||
}
|
}
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
|
|
|
@ -50,7 +50,7 @@ import org.junit.Test;
|
||||||
* underlying file system as Hdfs.
|
* underlying file system as Hdfs.
|
||||||
*/
|
*/
|
||||||
public class TestResolveHdfsSymlink {
|
public class TestResolveHdfsSymlink {
|
||||||
private static File TEST_ROOT_DIR = PathUtils.getTestDir(TestResolveHdfsSymlink.class);
|
private static FileContextTestHelper helper = new FileContextTestHelper();
|
||||||
private static MiniDFSCluster cluster = null;
|
private static MiniDFSCluster cluster = null;
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
|
@ -82,13 +82,14 @@ public class TestResolveHdfsSymlink {
|
||||||
FileContext fcHdfs = FileContext.getFileContext(cluster.getFileSystem()
|
FileContext fcHdfs = FileContext.getFileContext(cluster.getFileSystem()
|
||||||
.getUri());
|
.getUri());
|
||||||
|
|
||||||
|
final String localTestRoot = helper.getAbsoluteTestRootDir(fcLocal);
|
||||||
Path alphaLocalPath = new Path(fcLocal.getDefaultFileSystem().getUri()
|
Path alphaLocalPath = new Path(fcLocal.getDefaultFileSystem().getUri()
|
||||||
.toString(), new File(TEST_ROOT_DIR, "alpha").getAbsolutePath());
|
.toString(), new File(localTestRoot, "alpha").getAbsolutePath());
|
||||||
DFSTestUtil.createFile(FileSystem.getLocal(conf), alphaLocalPath, 16,
|
DFSTestUtil.createFile(FileSystem.getLocal(conf), alphaLocalPath, 16,
|
||||||
(short) 1, 2);
|
(short) 1, 2);
|
||||||
|
|
||||||
Path linkTarget = new Path(fcLocal.getDefaultFileSystem().getUri()
|
Path linkTarget = new Path(fcLocal.getDefaultFileSystem().getUri()
|
||||||
.toString(), TEST_ROOT_DIR.getAbsolutePath());
|
.toString(), localTestRoot);
|
||||||
Path hdfsLink = new Path(fcHdfs.getDefaultFileSystem().getUri().toString(),
|
Path hdfsLink = new Path(fcHdfs.getDefaultFileSystem().getUri().toString(),
|
||||||
"/tmp/link");
|
"/tmp/link");
|
||||||
fcHdfs.createSymlink(linkTarget, hdfsLink, true);
|
fcHdfs.createSymlink(linkTarget, hdfsLink, true);
|
||||||
|
|
|
@ -42,8 +42,7 @@ public class TestSymlinkHdfsDisable {
|
||||||
DistributedFileSystem dfs = cluster.getFileSystem();
|
DistributedFileSystem dfs = cluster.getFileSystem();
|
||||||
FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
|
FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
|
||||||
// Create test files/links
|
// Create test files/links
|
||||||
FileContextTestHelper helper = new FileContextTestHelper(
|
FileContextTestHelper helper = new FileContextTestHelper();
|
||||||
"/tmp/TestSymlinkHdfsDisable");
|
|
||||||
Path root = helper.getTestRootPath(fc);
|
Path root = helper.getTestRootPath(fc);
|
||||||
Path target = new Path(root, "target");
|
Path target = new Path(root, "target");
|
||||||
Path link = new Path(root, "link");
|
Path link = new Path(root, "link");
|
||||||
|
|
|
@ -45,7 +45,7 @@ public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected FileSystemTestHelper createFileSystemHelper() {
|
protected FileSystemTestHelper createFileSystemHelper() {
|
||||||
return new FileSystemTestHelper("/tmp/TestViewFileSystemAtHdfsRoot");
|
return new FileSystemTestHelper();
|
||||||
}
|
}
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
|
|
|
@ -52,7 +52,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected FileSystemTestHelper createFileSystemHelper() {
|
protected FileSystemTestHelper createFileSystemHelper() {
|
||||||
return new FileSystemTestHelper("/tmp/TestViewFileSystemHdfs");
|
return new FileSystemTestHelper();
|
||||||
}
|
}
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
|
|
|
@ -46,7 +46,7 @@ public class TestViewFsAtHdfsRoot extends ViewFsBaseTest {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected FileContextTestHelper createFileContextHelper() {
|
protected FileContextTestHelper createFileContextHelper() {
|
||||||
return new FileContextTestHelper("/tmp/TestViewFsAtHdfsRoot");
|
return new FileContextTestHelper();
|
||||||
}
|
}
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
|
|
|
@ -42,7 +42,7 @@ public class TestViewFsHdfs extends ViewFsBaseTest {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected FileContextTestHelper createFileContextHelper() {
|
protected FileContextTestHelper createFileContextHelper() {
|
||||||
return new FileContextTestHelper("/tmp/TestViewFsHdfs");
|
return new FileContextTestHelper();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.net.unix.DomainSocket;
|
||||||
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
|
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
import org.junit.Assume;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -47,6 +48,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
|
||||||
|
import static org.hamcrest.CoreMatchers.equalTo;
|
||||||
|
|
||||||
public class TestBlockReaderFactory {
|
public class TestBlockReaderFactory {
|
||||||
static final Log LOG = LogFactory.getLog(TestBlockReaderFactory.class);
|
static final Log LOG = LogFactory.getLog(TestBlockReaderFactory.class);
|
||||||
|
@ -56,6 +58,11 @@ public class TestBlockReaderFactory {
|
||||||
DomainSocket.disableBindPathValidation();
|
DomainSocket.disableBindPathValidation();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void before() {
|
||||||
|
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
|
||||||
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
public void cleanup() {
|
public void cleanup() {
|
||||||
DFSInputStream.tcpReadsDisabledForTesting = false;
|
DFSInputStream.tcpReadsDisabledForTesting = false;
|
||||||
|
|
|
@ -74,7 +74,7 @@ public class TestBalancer {
|
||||||
|
|
||||||
ClientProtocol client;
|
ClientProtocol client;
|
||||||
|
|
||||||
static final long TIMEOUT = 20000L; //msec
|
static final long TIMEOUT = 40000L; //msec
|
||||||
static final double CAPACITY_ALLOWED_VARIANCE = 0.005; // 0.5%
|
static final double CAPACITY_ALLOWED_VARIANCE = 0.005; // 0.5%
|
||||||
static final double BALANCE_ALLOWED_VARIANCE = 0.11; // 10%+delta
|
static final double BALANCE_ALLOWED_VARIANCE = 0.11; // 10%+delta
|
||||||
static final int DEFAULT_BLOCK_SIZE = 10;
|
static final int DEFAULT_BLOCK_SIZE = 10;
|
||||||
|
|
|
@ -66,7 +66,7 @@ public class TestRBWBlockInvalidation {
|
||||||
* datanode, namenode should ask to invalidate that corrupted block and
|
* datanode, namenode should ask to invalidate that corrupted block and
|
||||||
* schedule replication for one more replica for that under replicated block.
|
* schedule replication for one more replica for that under replicated block.
|
||||||
*/
|
*/
|
||||||
@Test(timeout=60000)
|
@Test(timeout=600000)
|
||||||
public void testBlockInvalidationWhenRBWReplicaMissedInDN()
|
public void testBlockInvalidationWhenRBWReplicaMissedInDN()
|
||||||
throws IOException, InterruptedException {
|
throws IOException, InterruptedException {
|
||||||
// This test cannot pass on Windows due to file locking enforcement. It will
|
// This test cannot pass on Windows due to file locking enforcement. It will
|
||||||
|
@ -75,7 +75,7 @@ public class TestRBWBlockInvalidation {
|
||||||
|
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
|
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 100);
|
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 300);
|
||||||
conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
|
conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
|
||||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
|
||||||
|
@ -104,23 +104,24 @@ public class TestRBWBlockInvalidation {
|
||||||
metaFile.delete());
|
metaFile.delete());
|
||||||
|
|
||||||
out.close();
|
out.close();
|
||||||
|
|
||||||
// Check datanode has reported the corrupt block.
|
int liveReplicas = 0;
|
||||||
int corruptReplicas = 0;
|
|
||||||
while (true) {
|
while (true) {
|
||||||
if ((corruptReplicas = countReplicas(namesystem, blk).corruptReplicas()) > 0) {
|
if ((liveReplicas = countReplicas(namesystem, blk).liveReplicas()) < 2) {
|
||||||
|
// This confirms we have a corrupt replica
|
||||||
|
LOG.info("Live Replicas after corruption: " + liveReplicas);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Thread.sleep(100);
|
Thread.sleep(100);
|
||||||
}
|
}
|
||||||
assertEquals("There should be 1 replica in the corruptReplicasMap", 1,
|
assertEquals("There should be less than 2 replicas in the "
|
||||||
corruptReplicas);
|
+ "liveReplicasMap", 1, liveReplicas);
|
||||||
|
|
||||||
// Check the block has got replicated to another datanode.
|
|
||||||
blk = DFSTestUtil.getFirstBlock(fs, testPath);
|
|
||||||
int liveReplicas = 0;
|
|
||||||
while (true) {
|
while (true) {
|
||||||
if ((liveReplicas = countReplicas(namesystem, blk).liveReplicas()) > 1) {
|
if ((liveReplicas =
|
||||||
|
countReplicas(namesystem, blk).liveReplicas()) > 1) {
|
||||||
|
//Wait till the live replica count becomes equal to Replication Factor
|
||||||
|
LOG.info("Live Replicas after Rereplication: " + liveReplicas);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Thread.sleep(100);
|
Thread.sleep(100);
|
||||||
|
@ -128,9 +129,9 @@ public class TestRBWBlockInvalidation {
|
||||||
assertEquals("There should be two live replicas", 2,
|
assertEquals("There should be two live replicas", 2,
|
||||||
liveReplicas);
|
liveReplicas);
|
||||||
|
|
||||||
// sleep for 1 second, so that by this time datanode reports the corrupt
|
// sleep for 2 seconds, so that by this time datanode reports the corrupt
|
||||||
// block after a live replica of block got replicated.
|
// block after a live replica of block got replicated.
|
||||||
Thread.sleep(1000);
|
Thread.sleep(2000);
|
||||||
|
|
||||||
// Check that there is no corrupt block in the corruptReplicasMap.
|
// Check that there is no corrupt block in the corruptReplicasMap.
|
||||||
assertEquals("There should not be any replica in the corruptReplicasMap",
|
assertEquals("There should not be any replica in the corruptReplicasMap",
|
||||||
|
|
|
@ -282,6 +282,7 @@ public class TestBackupNode {
|
||||||
HAUtil.setAllowStandbyReads(conf, true);
|
HAUtil.setAllowStandbyReads(conf, true);
|
||||||
short replication = (short)conf.getInt("dfs.replication", 3);
|
short replication = (short)conf.getInt("dfs.replication", 3);
|
||||||
int numDatanodes = Math.max(3, replication);
|
int numDatanodes = Math.max(3, replication);
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "localhost:0");
|
||||||
conf.set(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, "0");
|
conf.set(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, "0");
|
||||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // disable block scanner
|
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // disable block scanner
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
|
||||||
|
|
|
@ -97,12 +97,13 @@ public static class MockHttpURLConnection extends HttpURLConnection {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testByteRange() throws IOException {
|
public void testByteRange() throws IOException {
|
||||||
|
URLConnectionFactory factory = mock(URLConnectionFactory.class);
|
||||||
HftpFileSystem.RangeHeaderUrlOpener ospy = spy(
|
HftpFileSystem.RangeHeaderUrlOpener ospy = spy(
|
||||||
new HftpFileSystem.RangeHeaderUrlOpener(new URL("http://test/")));
|
new HftpFileSystem.RangeHeaderUrlOpener(factory, new URL("http://test/")));
|
||||||
doReturn(new MockHttpURLConnection(ospy.getURL())).when(ospy)
|
doReturn(new MockHttpURLConnection(ospy.getURL())).when(ospy)
|
||||||
.openConnection();
|
.openConnection();
|
||||||
HftpFileSystem.RangeHeaderUrlOpener rspy = spy(
|
HftpFileSystem.RangeHeaderUrlOpener rspy = spy(
|
||||||
new HftpFileSystem.RangeHeaderUrlOpener((URL) null));
|
new HftpFileSystem.RangeHeaderUrlOpener(factory, (URL) null));
|
||||||
doReturn(new MockHttpURLConnection(rspy.getURL())).when(rspy)
|
doReturn(new MockHttpURLConnection(rspy.getURL())).when(rspy)
|
||||||
.openConnection();
|
.openConnection();
|
||||||
ByteRangeInputStream is = new HftpFileSystem.RangeHeaderInputStream(ospy, rspy);
|
ByteRangeInputStream is = new HftpFileSystem.RangeHeaderInputStream(ospy, rspy);
|
||||||
|
@ -171,12 +172,15 @@ public static class MockHttpURLConnection extends HttpURLConnection {
|
||||||
assertEquals("Should fail because incorrect response code was sent",
|
assertEquals("Should fail because incorrect response code was sent",
|
||||||
"HTTP_OK expected, received 206", e.getMessage());
|
"HTTP_OK expected, received 206", e.getMessage());
|
||||||
}
|
}
|
||||||
|
is.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testPropagatedClose() throws IOException {
|
public void testPropagatedClose() throws IOException {
|
||||||
ByteRangeInputStream brs = spy(
|
URLConnectionFactory factory = mock(URLConnectionFactory.class);
|
||||||
new HftpFileSystem.RangeHeaderInputStream(new URL("http://test/")));
|
|
||||||
|
ByteRangeInputStream brs = spy(new HftpFileSystem.RangeHeaderInputStream(
|
||||||
|
factory, new URL("http://test/")));
|
||||||
|
|
||||||
InputStream mockStream = mock(InputStream.class);
|
InputStream mockStream = mock(InputStream.class);
|
||||||
doReturn(mockStream).when(brs).openInputStream();
|
doReturn(mockStream).when(brs).openInputStream();
|
||||||
|
|
|
@ -52,7 +52,7 @@ public class TestFSMainOperationsWebHdfs extends FSMainOperationsBaseTest {
|
||||||
private static FileSystem fileSystem;
|
private static FileSystem fileSystem;
|
||||||
|
|
||||||
public TestFSMainOperationsWebHdfs() {
|
public TestFSMainOperationsWebHdfs() {
|
||||||
super("/tmp/TestFSMainOperationsWebHdfs");
|
super();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.web;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
|
import java.io.OutputStream;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
|
||||||
|
@ -30,6 +31,7 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
import org.apache.hadoop.http.HttpConfig;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
@ -65,9 +67,11 @@ public class TestHttpsFileSystem {
|
||||||
|
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
cluster.getFileSystem().create(new Path("/test")).close();
|
OutputStream os = cluster.getFileSystem().create(new Path("/test"));
|
||||||
|
os.write(23);
|
||||||
|
os.close();
|
||||||
InetSocketAddress addr = cluster.getNameNode().getHttpsAddress();
|
InetSocketAddress addr = cluster.getNameNode().getHttpsAddress();
|
||||||
nnAddr = addr.getHostName() + ":" + addr.getPort();
|
nnAddr = NetUtils.getHostPortString(addr);
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr);
|
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,6 +86,9 @@ public class TestHttpsFileSystem {
|
||||||
public void testHsftpFileSystem() throws Exception {
|
public void testHsftpFileSystem() throws Exception {
|
||||||
FileSystem fs = FileSystem.get(new URI("hsftp://" + nnAddr), conf);
|
FileSystem fs = FileSystem.get(new URI("hsftp://" + nnAddr), conf);
|
||||||
Assert.assertTrue(fs.exists(new Path("/test")));
|
Assert.assertTrue(fs.exists(new Path("/test")));
|
||||||
|
InputStream is = fs.open(new Path("/test"));
|
||||||
|
Assert.assertEquals(23, is.read());
|
||||||
|
is.close();
|
||||||
fs.close();
|
fs.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,103 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.web;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.net.URI;
|
||||||
|
|
||||||
|
import javax.servlet.Filter;
|
||||||
|
import javax.servlet.FilterChain;
|
||||||
|
import javax.servlet.FilterConfig;
|
||||||
|
import javax.servlet.ServletException;
|
||||||
|
import javax.servlet.ServletRequest;
|
||||||
|
import javax.servlet.ServletResponse;
|
||||||
|
import javax.servlet.http.HttpServletResponse;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
public class TestWebHdfsWithAuthenticationFilter {
|
||||||
|
private static boolean authorized = false;
|
||||||
|
|
||||||
|
public static final class CustomizedFilter implements Filter {
|
||||||
|
@Override
|
||||||
|
public void init(FilterConfig filterConfig) throws ServletException {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void doFilter(ServletRequest request, ServletResponse response,
|
||||||
|
FilterChain chain) throws IOException, ServletException {
|
||||||
|
if (authorized) {
|
||||||
|
chain.doFilter(request, response);
|
||||||
|
} else {
|
||||||
|
((HttpServletResponse) response)
|
||||||
|
.sendError(HttpServletResponse.SC_FORBIDDEN);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void destroy() {
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Configuration conf;
|
||||||
|
private static MiniDFSCluster cluster;
|
||||||
|
private static FileSystem fs;
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUp() throws IOException {
|
||||||
|
conf = new Configuration();
|
||||||
|
conf.set(DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
|
||||||
|
CustomizedFilter.class.getName());
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:0");
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||||
|
InetSocketAddress addr = cluster.getNameNode().getHttpAddress();
|
||||||
|
fs = FileSystem.get(
|
||||||
|
URI.create("webhdfs://" + NetUtils.getHostPortString(addr)), conf);
|
||||||
|
cluster.waitActive();
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterClass
|
||||||
|
public static void tearDown() throws IOException {
|
||||||
|
fs.close();
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWebHdfsAuthFilter() throws IOException {
|
||||||
|
// getFileStatus() is supposed to pass through with the default filter.
|
||||||
|
authorized = false;
|
||||||
|
try {
|
||||||
|
fs.getFileStatus(new Path("/"));
|
||||||
|
Assert.fail("The filter fails to block the request");
|
||||||
|
} catch (IOException e) {
|
||||||
|
}
|
||||||
|
authorized = true;
|
||||||
|
fs.getFileStatus(new Path("/"));
|
||||||
|
}
|
||||||
|
}
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.test;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
|
|
||||||
|
import org.apache.commons.lang.RandomStringUtils;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
|
||||||
public class PathUtils {
|
public class PathUtils {
|
||||||
|
@ -36,7 +37,10 @@ public class PathUtils {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static File getTestDir(Class<?> caller, boolean create) {
|
public static File getTestDir(Class<?> caller, boolean create) {
|
||||||
File dir = new File(System.getProperty("test.build.data", "/tmp"), caller.getSimpleName());
|
File dir =
|
||||||
|
new File(System.getProperty("test.build.data", "target/test/data")
|
||||||
|
+ "/" + RandomStringUtils.randomAlphanumeric(10),
|
||||||
|
caller.getSimpleName());
|
||||||
if (create) {
|
if (create) {
|
||||||
dir.mkdirs();
|
dir.mkdirs();
|
||||||
}
|
}
|
||||||
|
|
|
@ -139,6 +139,18 @@ Trunk (Unreleased)
|
||||||
|
|
||||||
MAPREDUCE-5717. Task pings are interpreted as task progress (jlowe)
|
MAPREDUCE-5717. Task pings are interpreted as task progress (jlowe)
|
||||||
|
|
||||||
|
Release 2.5.0 - UNRELEASED
|
||||||
|
|
||||||
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
|
NEW FEATURES
|
||||||
|
|
||||||
|
IMPROVEMENTS
|
||||||
|
|
||||||
|
OPTIMIZATIONS
|
||||||
|
|
||||||
|
BUG FIXES
|
||||||
|
|
||||||
Release 2.4.0 - UNRELEASED
|
Release 2.4.0 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -165,6 +177,9 @@ Release 2.4.0 - UNRELEASED
|
||||||
MAPREDUCE-5670. CombineFileRecordReader should report progress when moving
|
MAPREDUCE-5670. CombineFileRecordReader should report progress when moving
|
||||||
to the next file (Chen He via jlowe)
|
to the next file (Chen He via jlowe)
|
||||||
|
|
||||||
|
MAPREDUCE-5757. ConcurrentModificationException in JobControl.toList
|
||||||
|
(jlowe)
|
||||||
|
|
||||||
Release 2.3.1 - UNRELEASED
|
Release 2.3.1 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -1387,7 +1387,8 @@ public class MRAppMaster extends CompositeService {
|
||||||
// RM/NM to issue SSL certificates but definitely not MR-AM as it is
|
// RM/NM to issue SSL certificates but definitely not MR-AM as it is
|
||||||
// running in user-land.
|
// running in user-land.
|
||||||
MRWebAppUtil.initialize(conf);
|
MRWebAppUtil.initialize(conf);
|
||||||
HttpConfig.setPolicy(HttpConfig.Policy.HTTP_ONLY);
|
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
|
||||||
|
HttpConfig.Policy.HTTP_ONLY.name());
|
||||||
// log the system properties
|
// log the system properties
|
||||||
String systemPropsToLog = MRApps.getSystemPropertiesToLog(conf);
|
String systemPropsToLog = MRApps.getSystemPropertiesToLog(conf);
|
||||||
if (systemPropsToLog != null) {
|
if (systemPropsToLog != null) {
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.mapreduce.v2.jobhistory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
|
import org.apache.hadoop.http.HttpConfig;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stores Job History configuration keys that can be set by administrators of
|
* Stores Job History configuration keys that can be set by administrators of
|
||||||
|
@ -135,7 +136,7 @@ public class JHAdminConfig {
|
||||||
public static final String MR_HS_HTTP_POLICY = MR_HISTORY_PREFIX
|
public static final String MR_HS_HTTP_POLICY = MR_HISTORY_PREFIX
|
||||||
+ "http.policy";
|
+ "http.policy";
|
||||||
public static String DEFAULT_MR_HS_HTTP_POLICY =
|
public static String DEFAULT_MR_HS_HTTP_POLICY =
|
||||||
CommonConfigurationKeysPublic.HTTP_POLICY_HTTP_ONLY;
|
HttpConfig.Policy.HTTP_ONLY.name();
|
||||||
|
|
||||||
/**The address the history server webapp is on.*/
|
/**The address the history server webapp is on.*/
|
||||||
public static final String MR_HISTORY_WEBAPP_ADDRESS =
|
public static final String MR_HISTORY_WEBAPP_ADDRESS =
|
||||||
|
|
|
@ -71,11 +71,13 @@ public class MRWebAppUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String getYARNWebappScheme() {
|
public static String getYARNWebappScheme() {
|
||||||
return HttpConfig.getScheme(httpPolicyInYarn);
|
return httpPolicyInYarn == HttpConfig.Policy.HTTPS_ONLY ? "https://"
|
||||||
|
: "http://";
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String getJHSWebappScheme() {
|
public static String getJHSWebappScheme() {
|
||||||
return HttpConfig.getScheme(httpPolicyInJHS);
|
return httpPolicyInJHS == HttpConfig.Policy.HTTPS_ONLY ? "https://"
|
||||||
|
: "http://";
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void setJHSWebappURLWithoutScheme(Configuration conf,
|
public static void setJHSWebappURLWithoutScheme(Configuration conf,
|
||||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
|
||||||
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
|
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
|
||||||
import org.apache.hadoop.mapreduce.util.HostUtil;
|
import org.apache.hadoop.mapreduce.util.HostUtil;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* HistoryViewer is used to parse and view the JobHistory files
|
* HistoryViewer is used to parse and view the JobHistory files
|
||||||
|
@ -231,7 +232,8 @@ public class HistoryViewer {
|
||||||
taskList.append("\t");
|
taskList.append("\t");
|
||||||
taskList.append(attempt.getHostname()).append("\t");
|
taskList.append(attempt.getHostname()).append("\t");
|
||||||
taskList.append(attempt.getError());
|
taskList.append(attempt.getError());
|
||||||
String taskLogsUrl = getTaskLogsUrl(attempt);
|
String taskLogsUrl = getTaskLogsUrl(
|
||||||
|
WebAppUtils.getHttpSchemePrefix(fs.getConf()), attempt);
|
||||||
taskList.append(taskLogsUrl != null ? taskLogsUrl : "n/a");
|
taskList.append(taskLogsUrl != null ? taskLogsUrl : "n/a");
|
||||||
System.out.println(taskList.toString());
|
System.out.println(taskList.toString());
|
||||||
}
|
}
|
||||||
|
@ -446,7 +448,7 @@ public class HistoryViewer {
|
||||||
* @return the taskLogsUrl. null if http-port or tracker-name or
|
* @return the taskLogsUrl. null if http-port or tracker-name or
|
||||||
* task-attempt-id are unavailable.
|
* task-attempt-id are unavailable.
|
||||||
*/
|
*/
|
||||||
public static String getTaskLogsUrl(
|
public static String getTaskLogsUrl(String scheme,
|
||||||
JobHistoryParser.TaskAttemptInfo attempt) {
|
JobHistoryParser.TaskAttemptInfo attempt) {
|
||||||
if (attempt.getHttpPort() == -1
|
if (attempt.getHttpPort() == -1
|
||||||
|| attempt.getTrackerName().equals("")
|
|| attempt.getTrackerName().equals("")
|
||||||
|
@ -457,7 +459,7 @@ public class HistoryViewer {
|
||||||
String taskTrackerName =
|
String taskTrackerName =
|
||||||
HostUtil.convertTrackerNameToHostName(
|
HostUtil.convertTrackerNameToHostName(
|
||||||
attempt.getTrackerName());
|
attempt.getTrackerName());
|
||||||
return HostUtil.getTaskLogUrl(taskTrackerName,
|
return HostUtil.getTaskLogUrl(scheme, taskTrackerName,
|
||||||
Integer.toString(attempt.getHttpPort()),
|
Integer.toString(attempt.getHttpPort()),
|
||||||
attempt.getAttemptId().toString());
|
attempt.getAttemptId().toString());
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,7 +79,7 @@ public class JobControl implements Runnable {
|
||||||
this.runnerState = ThreadState.READY;
|
this.runnerState = ThreadState.READY;
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized private static List<ControlledJob> toList(
|
private static List<ControlledJob> toList(
|
||||||
LinkedList<ControlledJob> jobs) {
|
LinkedList<ControlledJob> jobs) {
|
||||||
ArrayList<ControlledJob> retv = new ArrayList<ControlledJob>();
|
ArrayList<ControlledJob> retv = new ArrayList<ControlledJob>();
|
||||||
for (ControlledJob job : jobs) {
|
for (ControlledJob job : jobs) {
|
||||||
|
@ -122,11 +122,11 @@ public class JobControl implements Runnable {
|
||||||
/**
|
/**
|
||||||
* @return the jobs in the success state
|
* @return the jobs in the success state
|
||||||
*/
|
*/
|
||||||
public List<ControlledJob> getSuccessfulJobList() {
|
synchronized public List<ControlledJob> getSuccessfulJobList() {
|
||||||
return toList(this.successfulJobs);
|
return toList(this.successfulJobs);
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<ControlledJob> getFailedJobList() {
|
synchronized public List<ControlledJob> getFailedJobList() {
|
||||||
return toList(this.failedJobs);
|
return toList(this.failedJobs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.mapreduce.util;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
|
||||||
|
|
||||||
@Private
|
@Private
|
||||||
@Unstable
|
@Unstable
|
||||||
|
@ -33,9 +32,9 @@ public class HostUtil {
|
||||||
* @param taskAttemptID
|
* @param taskAttemptID
|
||||||
* @return the taskLogUrl
|
* @return the taskLogUrl
|
||||||
*/
|
*/
|
||||||
public static String getTaskLogUrl(String taskTrackerHostName,
|
public static String getTaskLogUrl(String scheme, String taskTrackerHostName,
|
||||||
String httpPort, String taskAttemptID) {
|
String httpPort, String taskAttemptID) {
|
||||||
return (HttpConfig.getSchemePrefix() + taskTrackerHostName + ":" +
|
return (scheme + taskTrackerHostName + ":" +
|
||||||
httpPort + "/tasklog?attemptid=" + taskAttemptID);
|
httpPort + "/tasklog?attemptid=" + taskAttemptID);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
|
||||||
import org.apache.hadoop.mapred.JobConf;
|
import org.apache.hadoop.mapred.JobConf;
|
||||||
import org.apache.hadoop.mapreduce.MRConfig;
|
import org.apache.hadoop.mapreduce.MRConfig;
|
||||||
import org.apache.hadoop.mapreduce.v2.hs.HistoryServerStateStoreService.HistoryServerState;
|
import org.apache.hadoop.mapreduce.v2.hs.HistoryServerStateStoreService.HistoryServerState;
|
||||||
|
@ -121,7 +120,6 @@ public class JobHistoryServer extends CompositeService {
|
||||||
|
|
||||||
// This is required for WebApps to use https if enabled.
|
// This is required for WebApps to use https if enabled.
|
||||||
MRWebAppUtil.initialize(getConfig());
|
MRWebAppUtil.initialize(getConfig());
|
||||||
HttpConfig.setPolicy(MRWebAppUtil.getJHSHttpPolicy());
|
|
||||||
try {
|
try {
|
||||||
doSecureLogin(conf);
|
doSecureLogin(conf);
|
||||||
} catch(IOException ie) {
|
} catch(IOException ie) {
|
||||||
|
|
|
@ -230,9 +230,9 @@ public class MiniMRYarnCluster extends MiniYARNCluster {
|
||||||
WebAppUtils.getRMWebAppURLWithoutScheme(getConfig()));
|
WebAppUtils.getRMWebAppURLWithoutScheme(getConfig()));
|
||||||
LOG.info("MiniMRYARN HistoryServer address: " +
|
LOG.info("MiniMRYARN HistoryServer address: " +
|
||||||
getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
|
getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
|
||||||
LOG.info("MiniMRYARN HistoryServer web address: " +
|
LOG.info("MiniMRYARN HistoryServer web address: "
|
||||||
getResolvedMRHistoryWebAppURLWithoutScheme(getConfig(),
|
+ getResolvedMRHistoryWebAppURLWithoutScheme(getConfig(),
|
||||||
HttpConfig.isSecure()));
|
MRWebAppUtil.getJHSHttpPolicy() == HttpConfig.Policy.HTTPS_ONLY));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -15,6 +15,18 @@ Trunk - Unreleased
|
||||||
YARN-524 TestYarnVersionInfo failing if generated properties doesn't
|
YARN-524 TestYarnVersionInfo failing if generated properties doesn't
|
||||||
include an SVN URL. (stevel)
|
include an SVN URL. (stevel)
|
||||||
|
|
||||||
|
Release 2.5.0 - UNRELEASED
|
||||||
|
|
||||||
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
|
NEW FEATURES
|
||||||
|
|
||||||
|
IMPROVEMENTS
|
||||||
|
|
||||||
|
OPTIMIZATIONS
|
||||||
|
|
||||||
|
BUG FIXES
|
||||||
|
|
||||||
Release 2.4.0 - UNRELEASED
|
Release 2.4.0 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -273,6 +285,16 @@ Release 2.4.0 - UNRELEASED
|
||||||
at allocation time so as to prevent RM from shelling out containers with
|
at allocation time so as to prevent RM from shelling out containers with
|
||||||
expired tokens. (Omkar Vinit Joshi and Jian He via vinodkv)
|
expired tokens. (Omkar Vinit Joshi and Jian He via vinodkv)
|
||||||
|
|
||||||
|
YARN-1553. Modified YARN and MR to stop using HttpConfig.isSecure() and
|
||||||
|
instead rely on the http policy framework. And also fix some bugs related
|
||||||
|
to https handling in YARN web-apps. (Haohui Mai via vinodkv)
|
||||||
|
|
||||||
|
YARN-1721. When moving app between queues in Fair Scheduler, grab lock on
|
||||||
|
FSSchedulerApp (Sandy Ryza)
|
||||||
|
|
||||||
|
YARN-1724. Race condition in Fair Scheduler when continuous scheduling is
|
||||||
|
turned on (Sandy Ryza)
|
||||||
|
|
||||||
Release 2.3.1 - UNRELEASED
|
Release 2.3.1 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -100,7 +100,7 @@ public class HAUtil {
|
||||||
StringBuilder setValue = new StringBuilder();
|
StringBuilder setValue = new StringBuilder();
|
||||||
for (String id: ids) {
|
for (String id: ids) {
|
||||||
// verify the RM service addresses configurations for every RMIds
|
// verify the RM service addresses configurations for every RMIds
|
||||||
for (String prefix : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
for (String prefix : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||||
checkAndSetRMRPCAddress(prefix, id, conf);
|
checkAndSetRMRPCAddress(prefix, id, conf);
|
||||||
}
|
}
|
||||||
setValue.append(id);
|
setValue.append(id);
|
||||||
|
@ -158,7 +158,7 @@ public class HAUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void verifyAndSetAllServiceAddresses(Configuration conf) {
|
public static void verifyAndSetAllServiceAddresses(Configuration conf) {
|
||||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||||
verifyAndSetConfValue(confKey, conf);
|
verifyAndSetConfValue(confKey, conf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -236,7 +236,7 @@ public class HAUtil {
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
static String getConfKeyForRMInstance(String prefix, Configuration conf) {
|
static String getConfKeyForRMInstance(String prefix, Configuration conf) {
|
||||||
if (!YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS.contains(prefix)) {
|
if (!YarnConfiguration.getServiceAddressConfKeys(conf).contains(prefix)) {
|
||||||
return prefix;
|
return prefix;
|
||||||
} else {
|
} else {
|
||||||
String RMId = getRMHAId(conf);
|
String RMId = getRMHAId(conf);
|
||||||
|
@ -289,7 +289,7 @@ public class HAUtil {
|
||||||
hostNameConfKey + " or " + addSuffix(prefix, RMId)));
|
hostNameConfKey + " or " + addSuffix(prefix, RMId)));
|
||||||
} else {
|
} else {
|
||||||
conf.set(addSuffix(prefix, RMId), confVal + ":"
|
conf.set(addSuffix(prefix, RMId), confVal + ":"
|
||||||
+ YarnConfiguration.getRMDefaultPortNumber(prefix));
|
+ YarnConfiguration.getRMDefaultPortNumber(prefix, conf));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (IllegalArgumentException iae) {
|
} catch (IllegalArgumentException iae) {
|
||||||
|
|
|
@ -26,10 +26,8 @@ import java.util.List;
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
|
||||||
import org.apache.hadoop.classification.InterfaceStability.Evolving;
|
import org.apache.hadoop.classification.InterfaceStability.Evolving;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
import org.apache.hadoop.http.HttpConfig;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
@ -187,6 +185,8 @@ public class YarnConfiguration extends Configuration {
|
||||||
/** The https address of the RM web application.*/
|
/** The https address of the RM web application.*/
|
||||||
public static final String RM_WEBAPP_HTTPS_ADDRESS =
|
public static final String RM_WEBAPP_HTTPS_ADDRESS =
|
||||||
RM_PREFIX + "webapp.https.address";
|
RM_PREFIX + "webapp.https.address";
|
||||||
|
public static final boolean YARN_SSL_CLIENT_HTTPS_NEED_AUTH_DEFAULT = false;
|
||||||
|
public static final String YARN_SSL_SERVER_RESOURCE_DEFAULT = "ssl-server.xml";
|
||||||
|
|
||||||
public static final int DEFAULT_RM_WEBAPP_HTTPS_PORT = 8090;
|
public static final int DEFAULT_RM_WEBAPP_HTTPS_PORT = 8090;
|
||||||
public static final String DEFAULT_RM_WEBAPP_HTTPS_ADDRESS = "0.0.0.0:"
|
public static final String DEFAULT_RM_WEBAPP_HTTPS_ADDRESS = "0.0.0.0:"
|
||||||
|
@ -361,15 +361,21 @@ public class YarnConfiguration extends Configuration {
|
||||||
public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS =
|
public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS =
|
||||||
"org.apache.hadoop.yarn.LocalConfigurationProvider";
|
"org.apache.hadoop.yarn.LocalConfigurationProvider";
|
||||||
|
|
||||||
@Private
|
private static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS_HTTP =
|
||||||
public static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS =
|
|
||||||
Collections.unmodifiableList(Arrays.asList(
|
Collections.unmodifiableList(Arrays.asList(
|
||||||
RM_ADDRESS,
|
RM_ADDRESS,
|
||||||
RM_SCHEDULER_ADDRESS,
|
RM_SCHEDULER_ADDRESS,
|
||||||
RM_ADMIN_ADDRESS,
|
RM_ADMIN_ADDRESS,
|
||||||
RM_RESOURCE_TRACKER_ADDRESS,
|
RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
HttpConfig.isSecure() ? RM_WEBAPP_HTTPS_ADDRESS
|
RM_WEBAPP_ADDRESS));
|
||||||
: RM_WEBAPP_ADDRESS));
|
|
||||||
|
private static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS_HTTPS =
|
||||||
|
Collections.unmodifiableList(Arrays.asList(
|
||||||
|
RM_ADDRESS,
|
||||||
|
RM_SCHEDULER_ADDRESS,
|
||||||
|
RM_ADMIN_ADDRESS,
|
||||||
|
RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
|
RM_WEBAPP_HTTPS_ADDRESS));
|
||||||
|
|
||||||
public static final String AUTO_FAILOVER_PREFIX =
|
public static final String AUTO_FAILOVER_PREFIX =
|
||||||
RM_HA_PREFIX + "automatic-failover.";
|
RM_HA_PREFIX + "automatic-failover.";
|
||||||
|
@ -1102,10 +1108,9 @@ public class YarnConfiguration extends Configuration {
|
||||||
YARN_PREFIX + "client.max-nodemanagers-proxies";
|
YARN_PREFIX + "client.max-nodemanagers-proxies";
|
||||||
public static final int DEFAULT_NM_CLIENT_MAX_NM_PROXIES = 500;
|
public static final int DEFAULT_NM_CLIENT_MAX_NM_PROXIES = 500;
|
||||||
|
|
||||||
public static final String YARN_HTTP_POLICY_KEY =
|
public static final String YARN_HTTP_POLICY_KEY = YARN_PREFIX + "http.policy";
|
||||||
YARN_PREFIX + "http.policy";
|
public static final String YARN_HTTP_POLICY_DEFAULT = HttpConfig.Policy.HTTP_ONLY
|
||||||
public static final String YARN_HTTP_POLICY_DEFAULT =
|
.name();
|
||||||
CommonConfigurationKeysPublic.HTTP_POLICY_HTTP_ONLY;
|
|
||||||
|
|
||||||
public YarnConfiguration() {
|
public YarnConfiguration() {
|
||||||
super();
|
super();
|
||||||
|
@ -1118,6 +1123,12 @@ public class YarnConfiguration extends Configuration {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Private
|
||||||
|
public static List<String> getServiceAddressConfKeys(Configuration conf) {
|
||||||
|
return useHttps(conf) ? RM_SERVICES_ADDRESS_CONF_KEYS_HTTPS
|
||||||
|
: RM_SERVICES_ADDRESS_CONF_KEYS_HTTP;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the socket address for <code>name</code> property as a
|
* Get the socket address for <code>name</code> property as a
|
||||||
* <code>InetSocketAddress</code>.
|
* <code>InetSocketAddress</code>.
|
||||||
|
@ -1130,7 +1141,7 @@ public class YarnConfiguration extends Configuration {
|
||||||
public InetSocketAddress getSocketAddr(
|
public InetSocketAddress getSocketAddr(
|
||||||
String name, String defaultAddress, int defaultPort) {
|
String name, String defaultAddress, int defaultPort) {
|
||||||
String address;
|
String address;
|
||||||
if (HAUtil.isHAEnabled(this) && RM_SERVICES_ADDRESS_CONF_KEYS.contains(name)) {
|
if (HAUtil.isHAEnabled(this) && getServiceAddressConfKeys(this).contains(name)) {
|
||||||
address = HAUtil.getConfValueForRMInstance(name, defaultAddress, this);
|
address = HAUtil.getConfValueForRMInstance(name, defaultAddress, this);
|
||||||
} else {
|
} else {
|
||||||
address = get(name, defaultAddress);
|
address = get(name, defaultAddress);
|
||||||
|
@ -1149,7 +1160,8 @@ public class YarnConfiguration extends Configuration {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Private
|
@Private
|
||||||
public static int getRMDefaultPortNumber(String addressPrefix) {
|
public static int getRMDefaultPortNumber(String addressPrefix,
|
||||||
|
Configuration conf) {
|
||||||
if (addressPrefix.equals(YarnConfiguration.RM_ADDRESS)) {
|
if (addressPrefix.equals(YarnConfiguration.RM_ADDRESS)) {
|
||||||
return YarnConfiguration.DEFAULT_RM_PORT;
|
return YarnConfiguration.DEFAULT_RM_PORT;
|
||||||
} else if (addressPrefix.equals(YarnConfiguration.RM_SCHEDULER_ADDRESS)) {
|
} else if (addressPrefix.equals(YarnConfiguration.RM_SCHEDULER_ADDRESS)) {
|
||||||
|
@ -1167,7 +1179,13 @@ public class YarnConfiguration extends Configuration {
|
||||||
throw new HadoopIllegalArgumentException(
|
throw new HadoopIllegalArgumentException(
|
||||||
"Invalid RM RPC address Prefix: " + addressPrefix
|
"Invalid RM RPC address Prefix: " + addressPrefix
|
||||||
+ ". The valid value should be one of "
|
+ ". The valid value should be one of "
|
||||||
+ YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS);
|
+ getServiceAddressConfKeys(conf));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static boolean useHttps(Configuration conf) {
|
||||||
|
return HttpConfig.Policy.HTTPS_ONLY == HttpConfig.Policy.fromString(conf
|
||||||
|
.get(YARN_HTTP_POLICY_KEY,
|
||||||
|
YARN_HTTP_POLICY_DEFAULT));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,6 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
|
||||||
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities;
|
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities;
|
||||||
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntity;
|
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntity;
|
||||||
import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors;
|
import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors;
|
||||||
|
@ -65,12 +64,17 @@ public class TimelineClientImpl extends TimelineClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void serviceInit(Configuration conf) throws Exception {
|
protected void serviceInit(Configuration conf) throws Exception {
|
||||||
resURI = new URI(JOINER.join(HttpConfig.getSchemePrefix(),
|
if (YarnConfiguration.useHttps(conf)) {
|
||||||
HttpConfig.isSecure() ? conf.get(
|
resURI = URI
|
||||||
YarnConfiguration.AHS_WEBAPP_HTTPS_ADDRESS,
|
.create(JOINER.join("https://", conf.get(
|
||||||
YarnConfiguration.DEFAULT_AHS_WEBAPP_HTTPS_ADDRESS) : conf.get(
|
YarnConfiguration.AHS_WEBAPP_HTTPS_ADDRESS,
|
||||||
YarnConfiguration.AHS_WEBAPP_ADDRESS,
|
YarnConfiguration.DEFAULT_AHS_WEBAPP_HTTPS_ADDRESS),
|
||||||
YarnConfiguration.DEFAULT_AHS_WEBAPP_ADDRESS), RESOURCE_URI_STR));
|
RESOURCE_URI_STR));
|
||||||
|
} else {
|
||||||
|
resURI = URI.create(JOINER.join("http://", conf.get(
|
||||||
|
YarnConfiguration.AHS_WEBAPP_ADDRESS,
|
||||||
|
YarnConfiguration.DEFAULT_AHS_WEBAPP_ADDRESS), RESOURCE_URI_STR));
|
||||||
|
}
|
||||||
super.serviceInit(conf);
|
super.serviceInit(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.http.HttpServer2;
|
import org.apache.hadoop.http.HttpServer2;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||||
import org.apache.hadoop.yarn.security.AdminACLsManager;
|
import org.apache.hadoop.yarn.security.AdminACLsManager;
|
||||||
|
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -216,9 +218,11 @@ public class WebApps {
|
||||||
System.exit(1);
|
System.exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
HttpServer2.Builder builder = new HttpServer2.Builder().setName(name)
|
HttpServer2.Builder builder = new HttpServer2.Builder()
|
||||||
.addEndpoint(URI.create("http://" + bindAddress + ":" + port))
|
.setName(name)
|
||||||
.setConf(conf).setFindPort(findPort)
|
.addEndpoint(
|
||||||
|
URI.create(WebAppUtils.getHttpSchemePrefix(conf) + bindAddress
|
||||||
|
+ ":" + port)).setConf(conf).setFindPort(findPort)
|
||||||
.setACL(new AdminACLsManager(conf).getAdminAcl())
|
.setACL(new AdminACLsManager(conf).getAdminAcl())
|
||||||
.setPathSpec(pathList.toArray(new String[0]));
|
.setPathSpec(pathList.toArray(new String[0]));
|
||||||
|
|
||||||
|
@ -231,6 +235,11 @@ public class WebApps {
|
||||||
.setKeytabConfKey(spnegoKeytabKey)
|
.setKeytabConfKey(spnegoKeytabKey)
|
||||||
.setSecurityEnabled(UserGroupInformation.isSecurityEnabled());
|
.setSecurityEnabled(UserGroupInformation.isSecurityEnabled());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (YarnConfiguration.useHttps(conf)) {
|
||||||
|
WebAppUtils.loadSslConfiguration(builder);
|
||||||
|
}
|
||||||
|
|
||||||
HttpServer2 server = builder.build();
|
HttpServer2 server = builder.build();
|
||||||
|
|
||||||
for(ServletStruct struct: servlets) {
|
for(ServletStruct struct: servlets) {
|
||||||
|
|
|
@ -26,20 +26,16 @@ import java.net.UnknownHostException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||||
import org.apache.hadoop.classification.InterfaceStability.Evolving;
|
import org.apache.hadoop.classification.InterfaceStability.Evolving;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
|
||||||
import org.apache.hadoop.http.HttpConfig.Policy;
|
import org.apache.hadoop.http.HttpConfig.Policy;
|
||||||
|
import org.apache.hadoop.http.HttpServer2;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||||
|
|
||||||
import com.google.common.base.Joiner;
|
|
||||||
|
|
||||||
@Private
|
@Private
|
||||||
@Evolving
|
@Evolving
|
||||||
public class WebAppUtils {
|
public class WebAppUtils {
|
||||||
private static final Joiner JOINER = Joiner.on("");
|
|
||||||
|
|
||||||
public static void setRMWebAppPort(Configuration conf, int port) {
|
public static void setRMWebAppPort(Configuration conf, int port) {
|
||||||
String hostname = getRMWebAppURLWithoutScheme(conf);
|
String hostname = getRMWebAppURLWithoutScheme(conf);
|
||||||
hostname =
|
hostname =
|
||||||
|
@ -51,7 +47,7 @@ public class WebAppUtils {
|
||||||
public static void setRMWebAppHostnameAndPort(Configuration conf,
|
public static void setRMWebAppHostnameAndPort(Configuration conf,
|
||||||
String hostname, int port) {
|
String hostname, int port) {
|
||||||
String resolvedAddress = hostname + ":" + port;
|
String resolvedAddress = hostname + ":" + port;
|
||||||
if (HttpConfig.isSecure()) {
|
if (YarnConfiguration.useHttps(conf)) {
|
||||||
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, resolvedAddress);
|
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, resolvedAddress);
|
||||||
} else {
|
} else {
|
||||||
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, resolvedAddress);
|
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, resolvedAddress);
|
||||||
|
@ -60,7 +56,7 @@ public class WebAppUtils {
|
||||||
|
|
||||||
public static void setNMWebAppHostNameAndPort(Configuration conf,
|
public static void setNMWebAppHostNameAndPort(Configuration conf,
|
||||||
String hostName, int port) {
|
String hostName, int port) {
|
||||||
if (HttpConfig.isSecure()) {
|
if (YarnConfiguration.useHttps(conf)) {
|
||||||
conf.set(YarnConfiguration.NM_WEBAPP_HTTPS_ADDRESS,
|
conf.set(YarnConfiguration.NM_WEBAPP_HTTPS_ADDRESS,
|
||||||
hostName + ":" + port);
|
hostName + ":" + port);
|
||||||
} else {
|
} else {
|
||||||
|
@ -70,16 +66,11 @@ public class WebAppUtils {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String getRMWebAppURLWithScheme(Configuration conf) {
|
public static String getRMWebAppURLWithScheme(Configuration conf) {
|
||||||
return JOINER.join(HttpConfig.getSchemePrefix(),
|
return getHttpSchemePrefix(conf) + getRMWebAppURLWithoutScheme(conf);
|
||||||
HttpConfig.isSecure() ? conf.get(
|
|
||||||
YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS,
|
|
||||||
YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_ADDRESS) : conf.get(
|
|
||||||
YarnConfiguration.RM_WEBAPP_ADDRESS,
|
|
||||||
YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String getRMWebAppURLWithoutScheme(Configuration conf) {
|
public static String getRMWebAppURLWithoutScheme(Configuration conf) {
|
||||||
if (HttpConfig.isSecure()) {
|
if (YarnConfiguration.useHttps(conf)) {
|
||||||
return conf.get(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS,
|
return conf.get(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS,
|
||||||
YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_ADDRESS);
|
YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_ADDRESS);
|
||||||
}else {
|
}else {
|
||||||
|
@ -97,13 +88,13 @@ public class WebAppUtils {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String getResolvedRMWebAppURLWithScheme(Configuration conf) {
|
public static String getResolvedRMWebAppURLWithScheme(Configuration conf) {
|
||||||
return HttpConfig.getSchemePrefix()
|
return getHttpSchemePrefix(conf)
|
||||||
+ getResolvedRMWebAppURLWithoutScheme(conf);
|
+ getResolvedRMWebAppURLWithoutScheme(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String getResolvedRMWebAppURLWithoutScheme(Configuration conf) {
|
public static String getResolvedRMWebAppURLWithoutScheme(Configuration conf) {
|
||||||
return getResolvedRMWebAppURLWithoutScheme(conf,
|
return getResolvedRMWebAppURLWithoutScheme(conf,
|
||||||
HttpConfig.isSecure() ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY);
|
YarnConfiguration.useHttps(conf) ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String getResolvedRMWebAppURLWithoutScheme(Configuration conf,
|
public static String getResolvedRMWebAppURLWithoutScheme(Configuration conf,
|
||||||
|
@ -140,7 +131,7 @@ public class WebAppUtils {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String getNMWebAppURLWithoutScheme(Configuration conf) {
|
public static String getNMWebAppURLWithoutScheme(Configuration conf) {
|
||||||
if (HttpConfig.isSecure()) {
|
if (YarnConfiguration.useHttps(conf)) {
|
||||||
return conf.get(YarnConfiguration.NM_WEBAPP_HTTPS_ADDRESS,
|
return conf.get(YarnConfiguration.NM_WEBAPP_HTTPS_ADDRESS,
|
||||||
YarnConfiguration.DEFAULT_NM_WEBAPP_HTTPS_ADDRESS);
|
YarnConfiguration.DEFAULT_NM_WEBAPP_HTTPS_ADDRESS);
|
||||||
} else {
|
} else {
|
||||||
|
@ -150,7 +141,7 @@ public class WebAppUtils {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String getAHSWebAppURLWithoutScheme(Configuration conf) {
|
public static String getAHSWebAppURLWithoutScheme(Configuration conf) {
|
||||||
if (HttpConfig.isSecure()) {
|
if (YarnConfiguration.useHttps(conf)) {
|
||||||
return conf.get(YarnConfiguration.AHS_WEBAPP_HTTPS_ADDRESS,
|
return conf.get(YarnConfiguration.AHS_WEBAPP_HTTPS_ADDRESS,
|
||||||
YarnConfiguration.DEFAULT_AHS_WEBAPP_HTTPS_ADDRESS);
|
YarnConfiguration.DEFAULT_AHS_WEBAPP_HTTPS_ADDRESS);
|
||||||
} else {
|
} else {
|
||||||
|
@ -177,8 +168,38 @@ public class WebAppUtils {
|
||||||
|
|
||||||
public static String getLogUrl(String nodeHttpAddress, String allocatedNode,
|
public static String getLogUrl(String nodeHttpAddress, String allocatedNode,
|
||||||
ContainerId containerId, String user) {
|
ContainerId containerId, String user) {
|
||||||
return join(HttpConfig.getSchemePrefix(), nodeHttpAddress, "/logs", "/",
|
return join("//", nodeHttpAddress, "/logs", "/",
|
||||||
allocatedNode, "/", ConverterUtils.toString(containerId), "/",
|
allocatedNode, "/", ConverterUtils.toString(containerId), "/",
|
||||||
ConverterUtils.toString(containerId), "/", user);
|
ConverterUtils.toString(containerId), "/", user);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Choose which scheme (HTTP or HTTPS) to use when generating a URL based on
|
||||||
|
* the configuration.
|
||||||
|
*
|
||||||
|
* @return the schmeme (HTTP / HTTPS)
|
||||||
|
*/
|
||||||
|
public static String getHttpSchemePrefix(Configuration conf) {
|
||||||
|
return YarnConfiguration.useHttps(conf) ? "https://" : "http://";
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load the SSL keystore / truststore into the HttpServer builder.
|
||||||
|
*/
|
||||||
|
public static HttpServer2.Builder loadSslConfiguration(
|
||||||
|
HttpServer2.Builder builder) {
|
||||||
|
Configuration sslConf = new Configuration(false);
|
||||||
|
boolean needsClientAuth = YarnConfiguration.YARN_SSL_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
|
||||||
|
sslConf.addResource(YarnConfiguration.YARN_SSL_SERVER_RESOURCE_DEFAULT);
|
||||||
|
|
||||||
|
return builder
|
||||||
|
.needsClientAuth(needsClientAuth)
|
||||||
|
.keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
|
||||||
|
.keyStore(sslConf.get("ssl.server.keystore.location"),
|
||||||
|
sslConf.get("ssl.server.keystore.password"),
|
||||||
|
sslConf.get("ssl.server.keystore.type", "jks"))
|
||||||
|
.trustStore(sslConf.get("ssl.server.truststore.location"),
|
||||||
|
sslConf.get("ssl.server.truststore.password"),
|
||||||
|
sslConf.get("ssl.server.truststore.type", "jks"));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,7 +54,7 @@ public class TestHAUtil {
|
||||||
conf.set(YarnConfiguration.RM_HA_IDS, RM_NODE_IDS_UNTRIMMED);
|
conf.set(YarnConfiguration.RM_HA_IDS, RM_NODE_IDS_UNTRIMMED);
|
||||||
conf.set(YarnConfiguration.RM_HA_ID, RM1_NODE_ID_UNTRIMMED);
|
conf.set(YarnConfiguration.RM_HA_ID, RM1_NODE_ID_UNTRIMMED);
|
||||||
|
|
||||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||||
// configuration key itself cannot contains space/tab/return chars.
|
// configuration key itself cannot contains space/tab/return chars.
|
||||||
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS_UNTRIMMED);
|
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS_UNTRIMMED);
|
||||||
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
|
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
|
||||||
|
@ -95,7 +95,7 @@ public class TestHAUtil {
|
||||||
StringUtils.getStringCollection(RM_NODE_IDS), HAUtil.getRMHAIds(conf));
|
StringUtils.getStringCollection(RM_NODE_IDS), HAUtil.getRMHAIds(conf));
|
||||||
assertEquals("Should be saved as Trimmed string",
|
assertEquals("Should be saved as Trimmed string",
|
||||||
RM1_NODE_ID, HAUtil.getRMHAId(conf));
|
RM1_NODE_ID, HAUtil.getRMHAId(conf));
|
||||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||||
assertEquals("RPC address not set for " + confKey,
|
assertEquals("RPC address not set for " + confKey,
|
||||||
RM1_ADDRESS, conf.get(confKey));
|
RM1_ADDRESS, conf.get(confKey));
|
||||||
}
|
}
|
||||||
|
@ -117,7 +117,7 @@ public class TestHAUtil {
|
||||||
// simulate the case YarnConfiguration.RM_HA_ID is not set
|
// simulate the case YarnConfiguration.RM_HA_ID is not set
|
||||||
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + ","
|
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + ","
|
||||||
+ RM2_NODE_ID);
|
+ RM2_NODE_ID);
|
||||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||||
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS);
|
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS);
|
||||||
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
|
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
|
||||||
}
|
}
|
||||||
|
@ -134,7 +134,7 @@ public class TestHAUtil {
|
||||||
conf.set(YarnConfiguration.RM_HA_ID, RM_INVALID_NODE_ID);
|
conf.set(YarnConfiguration.RM_HA_ID, RM_INVALID_NODE_ID);
|
||||||
conf.set(YarnConfiguration.RM_HA_IDS, RM_INVALID_NODE_ID + ","
|
conf.set(YarnConfiguration.RM_HA_IDS, RM_INVALID_NODE_ID + ","
|
||||||
+ RM1_NODE_ID);
|
+ RM1_NODE_ID);
|
||||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||||
// simulate xml with invalid node id
|
// simulate xml with invalid node id
|
||||||
conf.set(confKey + RM_INVALID_NODE_ID, RM_INVALID_NODE_ID);
|
conf.set(confKey + RM_INVALID_NODE_ID, RM_INVALID_NODE_ID);
|
||||||
}
|
}
|
||||||
|
@ -169,7 +169,7 @@ public class TestHAUtil {
|
||||||
conf.clear();
|
conf.clear();
|
||||||
conf.set(YarnConfiguration.RM_HA_IDS, RM2_NODE_ID + "," + RM3_NODE_ID);
|
conf.set(YarnConfiguration.RM_HA_IDS, RM2_NODE_ID + "," + RM3_NODE_ID);
|
||||||
conf.set(YarnConfiguration.RM_HA_ID, RM1_NODE_ID_UNTRIMMED);
|
conf.set(YarnConfiguration.RM_HA_ID, RM1_NODE_ID_UNTRIMMED);
|
||||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||||
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS_UNTRIMMED);
|
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS_UNTRIMMED);
|
||||||
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
|
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
|
||||||
conf.set(HAUtil.addSuffix(confKey, RM3_NODE_ID), RM3_ADDRESS);
|
conf.set(HAUtil.addSuffix(confKey, RM3_NODE_ID), RM3_ADDRESS);
|
||||||
|
|
|
@ -163,7 +163,7 @@ public class AppBlock extends HtmlBlock {
|
||||||
.append(startTime)
|
.append(startTime)
|
||||||
.append("\",\"<a href='")
|
.append("\",\"<a href='")
|
||||||
.append(
|
.append(
|
||||||
nodeLink == null ? "#" : url(HttpConfig.getSchemePrefix(), nodeLink))
|
nodeLink == null ? "#" : url("//", nodeLink))
|
||||||
.append("'>")
|
.append("'>")
|
||||||
.append(
|
.append(
|
||||||
nodeLink == null ? "N/A" : StringEscapeUtils
|
nodeLink == null ? "N/A" : StringEscapeUtils
|
||||||
|
|
|
@ -28,8 +28,6 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
|
||||||
import org.apache.hadoop.http.HttpConfig.Policy;
|
|
||||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||||
import org.apache.hadoop.security.SecurityUtil;
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
import org.apache.hadoop.service.CompositeService;
|
import org.apache.hadoop.service.CompositeService;
|
||||||
|
@ -397,15 +395,8 @@ public class NodeManager extends CompositeService
|
||||||
StringUtils.startupShutdownMessage(NodeManager.class, args, LOG);
|
StringUtils.startupShutdownMessage(NodeManager.class, args, LOG);
|
||||||
NodeManager nodeManager = new NodeManager();
|
NodeManager nodeManager = new NodeManager();
|
||||||
Configuration conf = new YarnConfiguration();
|
Configuration conf = new YarnConfiguration();
|
||||||
setHttpPolicy(conf);
|
|
||||||
nodeManager.initAndStartNodeManager(conf, false);
|
nodeManager.initAndStartNodeManager(conf, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void setHttpPolicy(Configuration conf) {
|
|
||||||
HttpConfig.setPolicy(Policy.fromString(conf.get(
|
|
||||||
YarnConfiguration.YARN_HTTP_POLICY_KEY,
|
|
||||||
YarnConfiguration.YARN_HTTP_POLICY_DEFAULT)));
|
|
||||||
}
|
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
@Private
|
@Private
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
package org.apache.hadoop.yarn.server.nodemanager.webapp;
|
package org.apache.hadoop.yarn.server.nodemanager.webapp;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
|
||||||
import org.apache.hadoop.yarn.webapp.YarnWebParams;
|
import org.apache.hadoop.yarn.webapp.YarnWebParams;
|
||||||
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||||
|
|
|
@ -1015,7 +1015,6 @@ public class ResourceManager extends CompositeService implements Recoverable {
|
||||||
ShutdownHookManager.get().addShutdownHook(
|
ShutdownHookManager.get().addShutdownHook(
|
||||||
new CompositeServiceShutdownHook(resourceManager),
|
new CompositeServiceShutdownHook(resourceManager),
|
||||||
SHUTDOWN_HOOK_PRIORITY);
|
SHUTDOWN_HOOK_PRIORITY);
|
||||||
setHttpPolicy(conf);
|
|
||||||
resourceManager.init(conf);
|
resourceManager.init(conf);
|
||||||
resourceManager.start();
|
resourceManager.start();
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
|
@ -1023,12 +1022,6 @@ public class ResourceManager extends CompositeService implements Recoverable {
|
||||||
System.exit(-1);
|
System.exit(-1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void setHttpPolicy(Configuration conf) {
|
|
||||||
HttpConfig.setPolicy(Policy.fromString(conf.get(
|
|
||||||
YarnConfiguration.YARN_HTTP_POLICY_KEY,
|
|
||||||
YarnConfiguration.YARN_HTTP_POLICY_DEFAULT)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the handlers for alwaysOn services
|
* Register the handlers for alwaysOn services
|
||||||
|
|
|
@ -503,10 +503,11 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
|
||||||
final String trackingUriWithoutScheme) {
|
final String trackingUriWithoutScheme) {
|
||||||
this.readLock.lock();
|
this.readLock.lock();
|
||||||
try {
|
try {
|
||||||
|
final String scheme = WebAppUtils.getHttpSchemePrefix(conf);
|
||||||
URI trackingUri = StringUtils.isEmpty(trackingUriWithoutScheme) ? null :
|
URI trackingUri = StringUtils.isEmpty(trackingUriWithoutScheme) ? null :
|
||||||
ProxyUriUtils.getUriFromAMUrl(trackingUriWithoutScheme);
|
ProxyUriUtils.getUriFromAMUrl(scheme, trackingUriWithoutScheme);
|
||||||
String proxy = WebAppUtils.getProxyHostAndPort(conf);
|
String proxy = WebAppUtils.getProxyHostAndPort(conf);
|
||||||
URI proxyUri = ProxyUriUtils.getUriFromAMUrl(proxy);
|
URI proxyUri = ProxyUriUtils.getUriFromAMUrl(scheme, proxy);
|
||||||
URI result = ProxyUriUtils.getProxyUri(trackingUri, proxyUri,
|
URI result = ProxyUriUtils.getProxyUri(trackingUri, proxyUri,
|
||||||
applicationAttemptId.getApplicationId());
|
applicationAttemptId.getApplicationId());
|
||||||
return result.toASCIIString();
|
return result.toASCIIString();
|
||||||
|
|
|
@ -989,7 +989,13 @@ public class FairScheduler extends AbstractYarnScheduler {
|
||||||
private void continuousScheduling() {
|
private void continuousScheduling() {
|
||||||
while (true) {
|
while (true) {
|
||||||
List<NodeId> nodeIdList = new ArrayList<NodeId>(nodes.keySet());
|
List<NodeId> nodeIdList = new ArrayList<NodeId>(nodes.keySet());
|
||||||
Collections.sort(nodeIdList, nodeAvailableResourceComparator);
|
// Sort the nodes by space available on them, so that we offer
|
||||||
|
// containers on emptier nodes first, facilitating an even spread. This
|
||||||
|
// requires holding the scheduler lock, so that the space available on a
|
||||||
|
// node doesn't change during the sort.
|
||||||
|
synchronized (this) {
|
||||||
|
Collections.sort(nodeIdList, nodeAvailableResourceComparator);
|
||||||
|
}
|
||||||
|
|
||||||
// iterate all nodes
|
// iterate all nodes
|
||||||
for (NodeId nodeId : nodeIdList) {
|
for (NodeId nodeId : nodeIdList) {
|
||||||
|
@ -1366,24 +1372,26 @@ public class FairScheduler extends AbstractYarnScheduler {
|
||||||
throw new YarnException("App to be moved " + appId + " not found.");
|
throw new YarnException("App to be moved " + appId + " not found.");
|
||||||
}
|
}
|
||||||
FSSchedulerApp attempt = (FSSchedulerApp) app.getCurrentAppAttempt();
|
FSSchedulerApp attempt = (FSSchedulerApp) app.getCurrentAppAttempt();
|
||||||
|
// To serialize with FairScheduler#allocate, synchronize on app attempt
|
||||||
FSLeafQueue oldQueue = (FSLeafQueue) app.getQueue();
|
synchronized (attempt) {
|
||||||
FSLeafQueue targetQueue = queueMgr.getLeafQueue(queueName, false);
|
FSLeafQueue oldQueue = (FSLeafQueue) app.getQueue();
|
||||||
if (targetQueue == null) {
|
FSLeafQueue targetQueue = queueMgr.getLeafQueue(queueName, false);
|
||||||
throw new YarnException("Target queue " + queueName
|
if (targetQueue == null) {
|
||||||
+ " not found or is not a leaf queue.");
|
throw new YarnException("Target queue " + queueName
|
||||||
|
+ " not found or is not a leaf queue.");
|
||||||
|
}
|
||||||
|
if (targetQueue == oldQueue) {
|
||||||
|
return oldQueue.getQueueName();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (oldQueue.getRunnableAppSchedulables().contains(
|
||||||
|
attempt.getAppSchedulable())) {
|
||||||
|
verifyMoveDoesNotViolateConstraints(attempt, oldQueue, targetQueue);
|
||||||
|
}
|
||||||
|
|
||||||
|
executeMove(app, attempt, oldQueue, targetQueue);
|
||||||
|
return targetQueue.getQueueName();
|
||||||
}
|
}
|
||||||
if (targetQueue == oldQueue) {
|
|
||||||
return oldQueue.getQueueName();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (oldQueue.getRunnableAppSchedulables().contains(
|
|
||||||
attempt.getAppSchedulable())) {
|
|
||||||
verifyMoveDoesNotViolateConstraints(attempt, oldQueue, targetQueue);
|
|
||||||
}
|
|
||||||
|
|
||||||
executeMove(app, attempt, oldQueue, targetQueue);
|
|
||||||
return targetQueue.getQueueName();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void verifyMoveDoesNotViolateConstraints(FSSchedulerApp app,
|
private void verifyMoveDoesNotViolateConstraints(FSSchedulerApp app,
|
||||||
|
@ -1420,8 +1428,8 @@ public class FairScheduler extends AbstractYarnScheduler {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Helper for moveApplication, which is synchronized, so all operations will
|
* Helper for moveApplication, which has appropriate synchronization, so all
|
||||||
* be atomic.
|
* operations will be atomic.
|
||||||
*/
|
*/
|
||||||
private void executeMove(SchedulerApplication app, FSSchedulerApp attempt,
|
private void executeMove(SchedulerApplication app, FSSchedulerApp attempt,
|
||||||
FSLeafQueue oldQueue, FSLeafQueue newQueue) {
|
FSLeafQueue oldQueue, FSLeafQueue newQueue) {
|
||||||
|
|
|
@ -27,7 +27,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
|
||||||
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
|
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
|
||||||
|
@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.util.Times;
|
||||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
|
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
|
||||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
||||||
|
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||||
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
|
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
|
||||||
|
|
||||||
|
@ -55,13 +56,16 @@ public class AppBlock extends HtmlBlock {
|
||||||
|
|
||||||
private ApplicationACLsManager aclsManager;
|
private ApplicationACLsManager aclsManager;
|
||||||
private QueueACLsManager queueACLsManager;
|
private QueueACLsManager queueACLsManager;
|
||||||
|
private final Configuration conf;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
AppBlock(ResourceManager rm, ViewContext ctx,
|
AppBlock(ResourceManager rm, ViewContext ctx,
|
||||||
ApplicationACLsManager aclsManager, QueueACLsManager queueACLsManager) {
|
ApplicationACLsManager aclsManager, QueueACLsManager queueACLsManager,
|
||||||
|
Configuration conf) {
|
||||||
super(ctx);
|
super(ctx);
|
||||||
this.aclsManager = aclsManager;
|
this.aclsManager = aclsManager;
|
||||||
this.queueACLsManager = queueACLsManager;
|
this.queueACLsManager = queueACLsManager;
|
||||||
|
this.conf = conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -86,7 +90,7 @@ public class AppBlock extends HtmlBlock {
|
||||||
puts("Application not found: "+ aid);
|
puts("Application not found: "+ aid);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
AppInfo app = new AppInfo(rmApp, true);
|
AppInfo app = new AppInfo(rmApp, true, WebAppUtils.getHttpSchemePrefix(conf));
|
||||||
|
|
||||||
// Check for the authorization.
|
// Check for the authorization.
|
||||||
String remoteUser = request().getRemoteUser();
|
String remoteUser = request().getRemoteUser();
|
||||||
|
@ -146,7 +150,7 @@ public class AppBlock extends HtmlBlock {
|
||||||
table.tr((odd = !odd) ? _ODD : _EVEN).
|
table.tr((odd = !odd) ? _ODD : _EVEN).
|
||||||
td(String.valueOf(attemptInfo.getAttemptId())).
|
td(String.valueOf(attemptInfo.getAttemptId())).
|
||||||
td(Times.format(attemptInfo.getStartTime())).
|
td(Times.format(attemptInfo.getStartTime())).
|
||||||
td().a(".nodelink", url(HttpConfig.getSchemePrefix(),
|
td().a(".nodelink", url("//",
|
||||||
attemptInfo.getNodeHttpAddress()),
|
attemptInfo.getNodeHttpAddress()),
|
||||||
attemptInfo.getNodeHttpAddress())._().
|
attemptInfo.getNodeHttpAddress())._().
|
||||||
td().a(".logslink", url(attemptInfo.getLogsLink()), "logs")._().
|
td().a(".logslink", url(attemptInfo.getLogsLink()), "logs")._().
|
||||||
|
|
|
@ -28,6 +28,7 @@ import java.util.HashSet;
|
||||||
import java.util.concurrent.ConcurrentMap;
|
import java.util.concurrent.ConcurrentMap;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringEscapeUtils;
|
import org.apache.commons.lang.StringEscapeUtils;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||||
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
|
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||||
|
@ -36,16 +37,19 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
|
||||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
||||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
|
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
|
||||||
|
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||||
|
|
||||||
import com.google.inject.Inject;
|
import com.google.inject.Inject;
|
||||||
|
|
||||||
class AppsBlock extends HtmlBlock {
|
class AppsBlock extends HtmlBlock {
|
||||||
final ConcurrentMap<ApplicationId, RMApp> apps;
|
final ConcurrentMap<ApplicationId, RMApp> apps;
|
||||||
|
private final Configuration conf;
|
||||||
|
|
||||||
@Inject AppsBlock(RMContext rmContext, ViewContext ctx) {
|
@Inject AppsBlock(RMContext rmContext, ViewContext ctx, Configuration conf) {
|
||||||
super(ctx);
|
super(ctx);
|
||||||
apps = rmContext.getRMApps();
|
apps = rmContext.getRMApps();
|
||||||
|
this.conf = conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override public void render(Block html) {
|
@Override public void render(Block html) {
|
||||||
|
@ -79,7 +83,7 @@ class AppsBlock extends HtmlBlock {
|
||||||
if (reqAppStates != null && !reqAppStates.contains(app.createApplicationState())) {
|
if (reqAppStates != null && !reqAppStates.contains(app.createApplicationState())) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
AppInfo appInfo = new AppInfo(app, true);
|
AppInfo appInfo = new AppInfo(app, true, WebAppUtils.getHttpSchemePrefix(conf));
|
||||||
String percent = String.format("%.1f", appInfo.getProgress());
|
String percent = String.format("%.1f", appInfo.getProgress());
|
||||||
//AppID numerical value parsed by parseHadoopID in yarn.dt.plugins.js
|
//AppID numerical value parsed by parseHadoopID in yarn.dt.plugins.js
|
||||||
appsTableData.append("[\"<a href='")
|
appsTableData.append("[\"<a href='")
|
||||||
|
|
|
@ -28,6 +28,7 @@ import java.util.HashSet;
|
||||||
import java.util.concurrent.ConcurrentMap;
|
import java.util.concurrent.ConcurrentMap;
|
||||||
|
|
||||||
import org.apache.commons.lang.StringEscapeUtils;
|
import org.apache.commons.lang.StringEscapeUtils;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||||
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
|
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
|
||||||
|
@ -40,6 +41,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerInf
|
||||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
||||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
|
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
|
||||||
|
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||||
|
|
||||||
import com.google.inject.Inject;
|
import com.google.inject.Inject;
|
||||||
|
@ -51,13 +53,15 @@ import com.google.inject.Inject;
|
||||||
public class FairSchedulerAppsBlock extends HtmlBlock {
|
public class FairSchedulerAppsBlock extends HtmlBlock {
|
||||||
final ConcurrentMap<ApplicationId, RMApp> apps;
|
final ConcurrentMap<ApplicationId, RMApp> apps;
|
||||||
final FairSchedulerInfo fsinfo;
|
final FairSchedulerInfo fsinfo;
|
||||||
|
final Configuration conf;
|
||||||
|
|
||||||
@Inject public FairSchedulerAppsBlock(RMContext rmContext,
|
@Inject public FairSchedulerAppsBlock(RMContext rmContext,
|
||||||
ResourceManager rm, ViewContext ctx) {
|
ResourceManager rm, ViewContext ctx, Configuration conf) {
|
||||||
super(ctx);
|
super(ctx);
|
||||||
FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
|
FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
|
||||||
fsinfo = new FairSchedulerInfo(scheduler);
|
fsinfo = new FairSchedulerInfo(scheduler);
|
||||||
apps = rmContext.getRMApps();
|
apps = rmContext.getRMApps();
|
||||||
|
this.conf = conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override public void render(Block html) {
|
@Override public void render(Block html) {
|
||||||
|
@ -91,7 +95,7 @@ public class FairSchedulerAppsBlock extends HtmlBlock {
|
||||||
if (reqAppStates != null && !reqAppStates.contains(app.getState())) {
|
if (reqAppStates != null && !reqAppStates.contains(app.getState())) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
AppInfo appInfo = new AppInfo(app, true);
|
AppInfo appInfo = new AppInfo(app, true, WebAppUtils.getHttpSchemePrefix(conf));
|
||||||
String percent = String.format("%.1f", appInfo.getProgress());
|
String percent = String.format("%.1f", appInfo.getProgress());
|
||||||
ApplicationAttemptId attemptId = app.getCurrentAppAttempt().getAppAttemptId();
|
ApplicationAttemptId attemptId = app.getCurrentAppAttempt().getAppAttemptId();
|
||||||
int fairShare = fsinfo.getAppFairShare(attemptId);
|
int fairShare = fsinfo.getAppFairShare(attemptId);
|
||||||
|
|
|
@ -26,7 +26,6 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
|
||||||
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.yarn.api.records.NodeState;
|
import org.apache.hadoop.yarn.api.records.NodeState;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||||
|
@ -119,7 +118,7 @@ class NodesPage extends RmView {
|
||||||
row.td()._("N/A")._();
|
row.td()._("N/A")._();
|
||||||
} else {
|
} else {
|
||||||
String httpAddress = info.getNodeHTTPAddress();
|
String httpAddress = info.getNodeHTTPAddress();
|
||||||
row.td().a(HttpConfig.getSchemePrefix() + httpAddress,
|
row.td().a("//" + httpAddress,
|
||||||
httpAddress)._();
|
httpAddress)._();
|
||||||
}
|
}
|
||||||
row.td().br().$title(String.valueOf(info.getLastHealthUpdate()))._().
|
row.td().br().$title(String.valueOf(info.getLastHealthUpdate()))._().
|
||||||
|
|
|
@ -41,6 +41,7 @@ import javax.ws.rs.core.MediaType;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
|
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
|
||||||
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
|
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
|
||||||
|
@ -85,6 +86,7 @@ import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
|
||||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||||
import org.apache.hadoop.yarn.webapp.BadRequestException;
|
import org.apache.hadoop.yarn.webapp.BadRequestException;
|
||||||
import org.apache.hadoop.yarn.webapp.NotFoundException;
|
import org.apache.hadoop.yarn.webapp.NotFoundException;
|
||||||
|
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||||
|
|
||||||
import com.google.inject.Inject;
|
import com.google.inject.Inject;
|
||||||
import com.google.inject.Singleton;
|
import com.google.inject.Singleton;
|
||||||
|
@ -101,16 +103,18 @@ public class RMWebServices {
|
||||||
.getRecordFactory(null);
|
.getRecordFactory(null);
|
||||||
private final ApplicationACLsManager aclsManager;
|
private final ApplicationACLsManager aclsManager;
|
||||||
private final QueueACLsManager queueACLsManager;
|
private final QueueACLsManager queueACLsManager;
|
||||||
|
private final Configuration conf;
|
||||||
private @Context HttpServletResponse response;
|
private @Context HttpServletResponse response;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
public RMWebServices(final ResourceManager rm,
|
public RMWebServices(final ResourceManager rm,
|
||||||
final ApplicationACLsManager aclsManager,
|
final ApplicationACLsManager aclsManager,
|
||||||
final QueueACLsManager queueACLsManager) {
|
final QueueACLsManager queueACLsManager,
|
||||||
|
Configuration conf) {
|
||||||
this.rm = rm;
|
this.rm = rm;
|
||||||
this.aclsManager = aclsManager;
|
this.aclsManager = aclsManager;
|
||||||
this.queueACLsManager = queueACLsManager;
|
this.queueACLsManager = queueACLsManager;
|
||||||
|
this.conf = conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected Boolean hasAccess(RMApp app, HttpServletRequest hsr) {
|
protected Boolean hasAccess(RMApp app, HttpServletRequest hsr) {
|
||||||
|
@ -415,7 +419,8 @@ public class RMWebServices {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
AppInfo app = new AppInfo(rmapp, hasAccess(rmapp, hsr));
|
AppInfo app = new AppInfo(rmapp, hasAccess(rmapp, hsr),
|
||||||
|
WebAppUtils.getHttpSchemePrefix(conf));
|
||||||
allApps.add(app);
|
allApps.add(app);
|
||||||
}
|
}
|
||||||
return allApps;
|
return allApps;
|
||||||
|
@ -555,7 +560,7 @@ public class RMWebServices {
|
||||||
if (app == null) {
|
if (app == null) {
|
||||||
throw new NotFoundException("app with id: " + appId + " not found");
|
throw new NotFoundException("app with id: " + appId + " not found");
|
||||||
}
|
}
|
||||||
return new AppInfo(app, hasAccess(app, hsr));
|
return new AppInfo(app, hasAccess(app, hsr), hsr.getScheme() + "://");
|
||||||
}
|
}
|
||||||
|
|
||||||
@GET
|
@GET
|
||||||
|
|
|
@ -23,7 +23,6 @@ import javax.xml.bind.annotation.XmlAccessType;
|
||||||
import javax.xml.bind.annotation.XmlAccessorType;
|
import javax.xml.bind.annotation.XmlAccessorType;
|
||||||
import javax.xml.bind.annotation.XmlRootElement;
|
import javax.xml.bind.annotation.XmlRootElement;
|
||||||
|
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
|
||||||
import org.apache.hadoop.yarn.api.records.Container;
|
import org.apache.hadoop.yarn.api.records.Container;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
|
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
|
||||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||||
|
@ -56,7 +55,7 @@ public class AppAttemptInfo {
|
||||||
this.containerId = masterContainer.getId().toString();
|
this.containerId = masterContainer.getId().toString();
|
||||||
this.nodeHttpAddress = masterContainer.getNodeHttpAddress();
|
this.nodeHttpAddress = masterContainer.getNodeHttpAddress();
|
||||||
this.nodeId = masterContainer.getNodeId().toString();
|
this.nodeId = masterContainer.getNodeId().toString();
|
||||||
this.logsLink = join(HttpConfig.getSchemePrefix(),
|
this.logsLink = join("//",
|
||||||
masterContainer.getNodeHttpAddress(),
|
masterContainer.getNodeHttpAddress(),
|
||||||
"/node", "/containerlogs/",
|
"/node", "/containerlogs/",
|
||||||
ConverterUtils.toString(masterContainer.getId()), "/", user);
|
ConverterUtils.toString(masterContainer.getId()), "/", user);
|
||||||
|
|
|
@ -25,7 +25,6 @@ import javax.xml.bind.annotation.XmlRootElement;
|
||||||
import javax.xml.bind.annotation.XmlTransient;
|
import javax.xml.bind.annotation.XmlTransient;
|
||||||
|
|
||||||
import com.google.common.base.Joiner;
|
import com.google.common.base.Joiner;
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
|
||||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||||
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
|
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
|
||||||
import org.apache.hadoop.yarn.api.records.Container;
|
import org.apache.hadoop.yarn.api.records.Container;
|
||||||
|
@ -53,6 +52,8 @@ public class AppInfo {
|
||||||
protected boolean amContainerLogsExist = false;
|
protected boolean amContainerLogsExist = false;
|
||||||
@XmlTransient
|
@XmlTransient
|
||||||
protected ApplicationId applicationId;
|
protected ApplicationId applicationId;
|
||||||
|
@XmlTransient
|
||||||
|
private String schemePrefix;
|
||||||
|
|
||||||
// these are ok for any user to see
|
// these are ok for any user to see
|
||||||
protected String id;
|
protected String id;
|
||||||
|
@ -82,12 +83,8 @@ public class AppInfo {
|
||||||
public AppInfo() {
|
public AppInfo() {
|
||||||
} // JAXB needs this
|
} // JAXB needs this
|
||||||
|
|
||||||
public AppInfo(RMApp app, Boolean hasAccess, String host) {
|
public AppInfo(RMApp app, Boolean hasAccess, String schemePrefix) {
|
||||||
this(app, hasAccess);
|
this.schemePrefix = schemePrefix;
|
||||||
}
|
|
||||||
|
|
||||||
public AppInfo(RMApp app, Boolean hasAccess) {
|
|
||||||
|
|
||||||
if (app != null) {
|
if (app != null) {
|
||||||
String trackingUrl = app.getTrackingUrl();
|
String trackingUrl = app.getTrackingUrl();
|
||||||
this.state = app.createApplicationState();
|
this.state = app.createApplicationState();
|
||||||
|
@ -100,7 +97,7 @@ public class AppInfo {
|
||||||
.getFinishTime() == 0 ? "ApplicationMaster" : "History");
|
.getFinishTime() == 0 ? "ApplicationMaster" : "History");
|
||||||
if (!trackingUrlIsNotReady) {
|
if (!trackingUrlIsNotReady) {
|
||||||
this.trackingUrl =
|
this.trackingUrl =
|
||||||
WebAppUtils.getURLWithScheme(HttpConfig.getSchemePrefix(),
|
WebAppUtils.getURLWithScheme(schemePrefix,
|
||||||
trackingUrl);
|
trackingUrl);
|
||||||
this.trackingUrlPretty = this.trackingUrl;
|
this.trackingUrlPretty = this.trackingUrl;
|
||||||
} else {
|
} else {
|
||||||
|
@ -134,7 +131,7 @@ public class AppInfo {
|
||||||
Container masterContainer = attempt.getMasterContainer();
|
Container masterContainer = attempt.getMasterContainer();
|
||||||
if (masterContainer != null) {
|
if (masterContainer != null) {
|
||||||
this.amContainerLogsExist = true;
|
this.amContainerLogsExist = true;
|
||||||
String url = join(HttpConfig.getSchemePrefix(),
|
String url = join(schemePrefix,
|
||||||
masterContainer.getNodeHttpAddress(),
|
masterContainer.getNodeHttpAddress(),
|
||||||
"/node", "/containerlogs/",
|
"/node", "/containerlogs/",
|
||||||
ConverterUtils.toString(masterContainer.getId()),
|
ConverterUtils.toString(masterContainer.getId()),
|
||||||
|
|
|
@ -63,8 +63,10 @@ public class TestRMHA {
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
|
configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
|
||||||
configuration.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
|
configuration.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + ","
|
||||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
+ RM2_NODE_ID);
|
||||||
|
for (String confKey : YarnConfiguration
|
||||||
|
.getServiceAddressConfKeys(configuration)) {
|
||||||
configuration.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS);
|
configuration.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS);
|
||||||
configuration.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
|
configuration.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
|
||||||
configuration.set(HAUtil.addSuffix(confKey, RM3_NODE_ID), RM3_ADDRESS);
|
configuration.set(HAUtil.addSuffix(confKey, RM3_NODE_ID), RM3_ADDRESS);
|
||||||
|
@ -329,7 +331,7 @@ public class TestRMHA {
|
||||||
Configuration conf = new YarnConfiguration(configuration);
|
Configuration conf = new YarnConfiguration(configuration);
|
||||||
rm = new MockRM(conf);
|
rm = new MockRM(conf);
|
||||||
rm.init(conf);
|
rm.init(conf);
|
||||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||||
assertEquals("RPC address not set for " + confKey,
|
assertEquals("RPC address not set for " + confKey,
|
||||||
RM1_ADDRESS, conf.get(HAUtil.addSuffix(confKey, RM1_NODE_ID)));
|
RM1_ADDRESS, conf.get(HAUtil.addSuffix(confKey, RM1_NODE_ID)));
|
||||||
assertEquals("RPC address not set for " + confKey,
|
assertEquals("RPC address not set for " + confKey,
|
||||||
|
|
|
@ -134,7 +134,9 @@ public class TestZKRMStateStore extends RMStateStoreTestBase {
|
||||||
conf.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort);
|
conf.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort);
|
||||||
conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, ZK_TIMEOUT_MS);
|
conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, ZK_TIMEOUT_MS);
|
||||||
conf.set(YarnConfiguration.RM_HA_ID, rmId);
|
conf.set(YarnConfiguration.RM_HA_ID, rmId);
|
||||||
for (String rpcAddress : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "localhost:0");
|
||||||
|
|
||||||
|
for (String rpcAddress : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||||
for (String id : HAUtil.getRMHAIds(conf)) {
|
for (String id : HAUtil.getRMHAIds(conf)) {
|
||||||
conf.set(HAUtil.addSuffix(rpcAddress, id), "localhost:0");
|
conf.set(HAUtil.addSuffix(rpcAddress, id), "localhost:0");
|
||||||
}
|
}
|
||||||
|
|
|
@ -285,13 +285,14 @@ public class TestRMAppAttemptTransitions {
|
||||||
|
|
||||||
private String getProxyUrl(RMAppAttempt appAttempt) {
|
private String getProxyUrl(RMAppAttempt appAttempt) {
|
||||||
String url = null;
|
String url = null;
|
||||||
|
final String scheme = WebAppUtils.getHttpSchemePrefix(conf);
|
||||||
try {
|
try {
|
||||||
URI trackingUri =
|
URI trackingUri =
|
||||||
StringUtils.isEmpty(appAttempt.getOriginalTrackingUrl()) ? null :
|
StringUtils.isEmpty(appAttempt.getOriginalTrackingUrl()) ? null :
|
||||||
ProxyUriUtils
|
ProxyUriUtils
|
||||||
.getUriFromAMUrl(appAttempt.getOriginalTrackingUrl());
|
.getUriFromAMUrl(scheme, appAttempt.getOriginalTrackingUrl());
|
||||||
String proxy = WebAppUtils.getProxyHostAndPort(conf);
|
String proxy = WebAppUtils.getProxyHostAndPort(conf);
|
||||||
URI proxyUri = ProxyUriUtils.getUriFromAMUrl(proxy);
|
URI proxyUri = ProxyUriUtils.getUriFromAMUrl(scheme, proxy);
|
||||||
URI result = ProxyUriUtils.getProxyUri(trackingUri, proxyUri,
|
URI result = ProxyUriUtils.getProxyUri(trackingUri, proxyUri,
|
||||||
appAttempt.getAppAttemptId().getApplicationId());
|
appAttempt.getAppAttemptId().getApplicationId());
|
||||||
url = result.toASCIIString();
|
url = result.toASCIIString();
|
||||||
|
|
|
@ -105,7 +105,7 @@ public class TestRMContainerImpl {
|
||||||
drainDispatcher.await();
|
drainDispatcher.await();
|
||||||
assertEquals(RMContainerState.RUNNING, rmContainer.getState());
|
assertEquals(RMContainerState.RUNNING, rmContainer.getState());
|
||||||
assertEquals(
|
assertEquals(
|
||||||
"http://host:3465/logs/host:3425/container_1_0001_01_000001/container_1_0001_01_000001/user",
|
"//host:3465/logs/host:3425/container_1_0001_01_000001/container_1_0001_01_000001/user",
|
||||||
rmContainer.getLogURL());
|
rmContainer.getLogURL());
|
||||||
|
|
||||||
// In RUNNING state. Verify RELEASED and associated actions.
|
// In RUNNING state. Verify RELEASED and associated actions.
|
||||||
|
@ -192,7 +192,7 @@ public class TestRMContainerImpl {
|
||||||
drainDispatcher.await();
|
drainDispatcher.await();
|
||||||
assertEquals(RMContainerState.RUNNING, rmContainer.getState());
|
assertEquals(RMContainerState.RUNNING, rmContainer.getState());
|
||||||
assertEquals(
|
assertEquals(
|
||||||
"http://host:3465/logs/host:3425/container_1_0001_01_000001/container_1_0001_01_000001/user",
|
"//host:3465/logs/host:3425/container_1_0001_01_000001/container_1_0001_01_000001/user",
|
||||||
rmContainer.getLogURL());
|
rmContainer.getLogURL());
|
||||||
|
|
||||||
// In RUNNING state. Verify EXPIRE and associated actions.
|
// In RUNNING state. Verify EXPIRE and associated actions.
|
||||||
|
|
|
@ -1606,8 +1606,7 @@ public class TestRMWebServicesApps extends JerseyTest {
|
||||||
.getMasterContainer().getNodeHttpAddress(), nodeHttpAddress);
|
.getMasterContainer().getNodeHttpAddress(), nodeHttpAddress);
|
||||||
WebServicesTestUtils.checkStringMatch("nodeId", appAttempt
|
WebServicesTestUtils.checkStringMatch("nodeId", appAttempt
|
||||||
.getMasterContainer().getNodeId().toString(), nodeId);
|
.getMasterContainer().getNodeId().toString(), nodeId);
|
||||||
assertTrue("logsLink doesn't match",
|
assertTrue("logsLink doesn't match", logsLink.startsWith("//"));
|
||||||
logsLink.startsWith("http://"));
|
|
||||||
assertTrue(
|
assertTrue(
|
||||||
"logsLink doesn't contain user info", logsLink.endsWith("/"
|
"logsLink doesn't contain user info", logsLink.endsWith("/"
|
||||||
+ user));
|
+ user));
|
||||||
|
|
|
@ -253,7 +253,7 @@ public class MiniYARNCluster extends CompositeService {
|
||||||
|
|
||||||
private void setHARMConfiguration(final int index, Configuration conf) {
|
private void setHARMConfiguration(final int index, Configuration conf) {
|
||||||
String hostname = MiniYARNCluster.getHostname();
|
String hostname = MiniYARNCluster.getHostname();
|
||||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||||
conf.set(HAUtil.addSuffix(confKey, rmIds[index]), hostname + ":0");
|
conf.set(HAUtil.addSuffix(confKey, rmIds[index]), hostname + ":0");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,23 +18,17 @@
|
||||||
|
|
||||||
package org.apache.hadoop.yarn.server;
|
package org.apache.hadoop.yarn.server;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import static org.junit.Assert.assertFalse;
|
||||||
import org.apache.hadoop.ha.HAServiceProtocol;
|
import static org.junit.Assert.assertTrue;
|
||||||
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
|
|
||||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
|
||||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.AdminService;
|
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.Test;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import static org.junit.Assert.assertFalse;
|
import org.apache.hadoop.ha.HAServiceProtocol;
|
||||||
import static org.junit.Assert.assertNotSame;
|
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||||
import static org.junit.Assert.assertTrue;
|
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||||
import static org.junit.Assert.fail;
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
public class TestMiniYARNClusterForHA {
|
public class TestMiniYARNClusterForHA {
|
||||||
MiniYARNCluster cluster;
|
MiniYARNCluster cluster;
|
||||||
|
@ -43,6 +37,8 @@ public class TestMiniYARNClusterForHA {
|
||||||
public void setup() throws IOException, InterruptedException {
|
public void setup() throws IOException, InterruptedException {
|
||||||
Configuration conf = new YarnConfiguration();
|
Configuration conf = new YarnConfiguration();
|
||||||
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
|
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
|
||||||
|
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "localhost:0");
|
||||||
|
|
||||||
cluster = new MiniYARNCluster(TestMiniYARNClusterForHA.class.getName(),
|
cluster = new MiniYARNCluster(TestMiniYARNClusterForHA.class.getName(),
|
||||||
2, 1, 1, 1);
|
2, 1, 1, 1);
|
||||||
cluster.init(conf);
|
cluster.init(conf);
|
||||||
|
|
|
@ -135,27 +135,6 @@ public class ProxyUriUtils {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a URI form a no scheme Url, such as is returned by the AM.
|
|
||||||
* @param url the URL format returned by an AM. This may or may not contain
|
|
||||||
* scheme.
|
|
||||||
* @return a URI with an http scheme
|
|
||||||
* @throws URISyntaxException if the url is not formatted correctly.
|
|
||||||
*/
|
|
||||||
public static URI getUriFromAMUrl(String url)
|
|
||||||
throws URISyntaxException {
|
|
||||||
if (getSchemeFromUrl(url).isEmpty()) {
|
|
||||||
/*
|
|
||||||
* check is made to make sure if AM reports with scheme then it will be
|
|
||||||
* used by default otherwise it will default to the one configured using
|
|
||||||
* "yarn.http.policy".
|
|
||||||
*/
|
|
||||||
return new URI(HttpConfig.getSchemePrefix() + url);
|
|
||||||
} else {
|
|
||||||
return new URI(url);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a URI form a no scheme Url, such as is returned by the AM.
|
* Create a URI form a no scheme Url, such as is returned by the AM.
|
||||||
* @param noSchemeUrl the URL formate returned by an AM
|
* @param noSchemeUrl the URL formate returned by an AM
|
||||||
|
@ -170,7 +149,7 @@ public class ProxyUriUtils {
|
||||||
* used by default otherwise it will default to the one configured using
|
* used by default otherwise it will default to the one configured using
|
||||||
* "yarn.http.policy".
|
* "yarn.http.policy".
|
||||||
*/
|
*/
|
||||||
return new URI(scheme + "://" + noSchemeUrl);
|
return new URI(scheme + noSchemeUrl);
|
||||||
} else {
|
} else {
|
||||||
return new URI(noSchemeUrl);
|
return new URI(noSchemeUrl);
|
||||||
}
|
}
|
||||||
|
|
|
@ -90,14 +90,22 @@ public class WebAppProxy extends AbstractService {
|
||||||
@Override
|
@Override
|
||||||
protected void serviceStart() throws Exception {
|
protected void serviceStart() throws Exception {
|
||||||
try {
|
try {
|
||||||
proxyServer = new HttpServer2.Builder().setName("proxy")
|
Configuration conf = getConfig();
|
||||||
.addEndpoint(URI.create("http://" + bindAddress + ":" + port))
|
HttpServer2.Builder b = new HttpServer2.Builder()
|
||||||
.setFindPort(port == 0)
|
.setName("proxy")
|
||||||
.setConf(getConfig()).setACL(acl).build();
|
.addEndpoint(
|
||||||
proxyServer.addServlet(ProxyUriUtils.PROXY_SERVLET_NAME,
|
URI.create(WebAppUtils.getHttpSchemePrefix(conf) + bindAddress
|
||||||
|
+ ":" + port)).setFindPort(port == 0).setConf(getConfig())
|
||||||
|
.setACL(acl);
|
||||||
|
if (YarnConfiguration.useHttps(conf)) {
|
||||||
|
WebAppUtils.loadSslConfiguration(b);
|
||||||
|
}
|
||||||
|
proxyServer = b.build();
|
||||||
|
proxyServer.addServlet(ProxyUriUtils.PROXY_SERVLET_NAME,
|
||||||
ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class);
|
ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class);
|
||||||
proxyServer.setAttribute(FETCHER_ATTRIBUTE, fetcher);
|
proxyServer.setAttribute(FETCHER_ATTRIBUTE, fetcher);
|
||||||
proxyServer.setAttribute(IS_SECURITY_ENABLED_ATTRIBUTE, isSecurityEnabled);
|
proxyServer
|
||||||
|
.setAttribute(IS_SECURITY_ENABLED_ATTRIBUTE, isSecurityEnabled);
|
||||||
proxyServer.setAttribute(PROXY_HOST_ATTRIBUTE, proxyHost);
|
proxyServer.setAttribute(PROXY_HOST_ATTRIBUTE, proxyHost);
|
||||||
proxyServer.start();
|
proxyServer.start();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
|
|
@ -69,6 +69,7 @@ public class WebAppProxyServlet extends HttpServlet {
|
||||||
|
|
||||||
private final List<TrackingUriPlugin> trackingUriPlugins;
|
private final List<TrackingUriPlugin> trackingUriPlugins;
|
||||||
private final String rmAppPageUrlBase;
|
private final String rmAppPageUrlBase;
|
||||||
|
private final transient YarnConfiguration conf;
|
||||||
|
|
||||||
private static class _ implements Hamlet._ {
|
private static class _ implements Hamlet._ {
|
||||||
//Empty
|
//Empty
|
||||||
|
@ -90,7 +91,7 @@ public class WebAppProxyServlet extends HttpServlet {
|
||||||
public WebAppProxyServlet()
|
public WebAppProxyServlet()
|
||||||
{
|
{
|
||||||
super();
|
super();
|
||||||
YarnConfiguration conf = new YarnConfiguration();
|
conf = new YarnConfiguration();
|
||||||
this.trackingUriPlugins =
|
this.trackingUriPlugins =
|
||||||
conf.getInstances(YarnConfiguration.YARN_TRACKING_URL_GENERATOR,
|
conf.getInstances(YarnConfiguration.YARN_TRACKING_URL_GENERATOR,
|
||||||
TrackingUriPlugin.class);
|
TrackingUriPlugin.class);
|
||||||
|
@ -300,7 +301,8 @@ public class WebAppProxyServlet extends HttpServlet {
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
if (ProxyUriUtils.getSchemeFromUrl(original).isEmpty()) {
|
if (ProxyUriUtils.getSchemeFromUrl(original).isEmpty()) {
|
||||||
trackingUri = ProxyUriUtils.getUriFromAMUrl("http", original);
|
trackingUri = ProxyUriUtils.getUriFromAMUrl(
|
||||||
|
WebAppUtils.getHttpSchemePrefix(conf), original);
|
||||||
} else {
|
} else {
|
||||||
trackingUri = new URI(original);
|
trackingUri = new URI(original);
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,6 @@ import java.util.Map;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.http.FilterContainer;
|
import org.apache.hadoop.http.FilterContainer;
|
||||||
import org.apache.hadoop.http.FilterInitializer;
|
import org.apache.hadoop.http.FilterInitializer;
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
|
||||||
import org.apache.hadoop.yarn.api.ApplicationConstants;
|
import org.apache.hadoop.yarn.api.ApplicationConstants;
|
||||||
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||||
|
|
||||||
|
@ -39,7 +38,7 @@ public class AmFilterInitializer extends FilterInitializer {
|
||||||
String[] parts = proxy.split(":");
|
String[] parts = proxy.split(":");
|
||||||
params.put(AmIpFilter.PROXY_HOST, parts[0]);
|
params.put(AmIpFilter.PROXY_HOST, parts[0]);
|
||||||
params.put(AmIpFilter.PROXY_URI_BASE,
|
params.put(AmIpFilter.PROXY_URI_BASE,
|
||||||
HttpConfig.getSchemePrefix() + proxy +
|
WebAppUtils.getHttpSchemePrefix(conf) + proxy +
|
||||||
System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV));
|
System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV));
|
||||||
container.addFilter(FILTER_NAME, FILTER_CLASS, params);
|
container.addFilter(FILTER_NAME, FILTER_CLASS, params);
|
||||||
}
|
}
|
||||||
|
|
|
@ -288,8 +288,9 @@ public class TestWebAppProxyServlet {
|
||||||
YarnConfiguration.DEFAULT_YARN_ADMIN_ACL));
|
YarnConfiguration.DEFAULT_YARN_ADMIN_ACL));
|
||||||
proxyServer = new HttpServer2.Builder()
|
proxyServer = new HttpServer2.Builder()
|
||||||
.setName("proxy")
|
.setName("proxy")
|
||||||
.addEndpoint(URI.create("http://" + bindAddress + ":0"))
|
.addEndpoint(
|
||||||
.setFindPort(true)
|
URI.create(WebAppUtils.getHttpSchemePrefix(conf) + bindAddress
|
||||||
|
+ ":0")).setFindPort(true)
|
||||||
.setConf(conf)
|
.setConf(conf)
|
||||||
.setACL(acl)
|
.setACL(acl)
|
||||||
.build();
|
.build();
|
||||||
|
|
Loading…
Reference in New Issue