Merge r1555021 through r1569522 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1569524 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
99b4caa888
|
@ -118,6 +118,9 @@ Trunk (Unreleased)
|
|||
|
||||
HADOOP-10325. Improve jenkins javadoc warnings from test-patch.sh (cmccabe)
|
||||
|
||||
HADOOP-10342. Add a new method to UGI to use a Kerberos login subject to
|
||||
build a new UGI. (Larry McCay via omalley)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-9451. Fault single-layer config if node group topology is enabled.
|
||||
|
@ -298,6 +301,18 @@ Trunk (Unreleased)
|
|||
|
||||
HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
|
||||
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
Release 2.4.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -342,6 +357,8 @@ Release 2.4.0 - UNRELEASED
|
|||
HADOOP-10249. LdapGroupsMapping should trim ldap password read from file.
|
||||
(Dilli Armugam via suresh)
|
||||
|
||||
HADOOP-10346. Deadlock while logging tokens (jlowe)
|
||||
|
||||
Release 2.3.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -279,7 +279,10 @@ public class CommonConfigurationKeysPublic {
|
|||
60;
|
||||
|
||||
// HTTP policies to be used in configuration
|
||||
// Use HttpPolicy.name() instead
|
||||
@Deprecated
|
||||
public static final String HTTP_POLICY_HTTP_ONLY = "HTTP_ONLY";
|
||||
@Deprecated
|
||||
public static final String HTTP_POLICY_HTTPS_ONLY = "HTTPS_ONLY";
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
|||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public class HttpConfig {
|
||||
private static Policy policy;
|
||||
public enum Policy {
|
||||
HTTP_ONLY,
|
||||
HTTPS_ONLY,
|
||||
|
@ -52,28 +51,4 @@ public class HttpConfig {
|
|||
return this == HTTPS_ONLY || this == HTTP_AND_HTTPS;
|
||||
}
|
||||
}
|
||||
|
||||
static {
|
||||
Configuration conf = new Configuration();
|
||||
boolean sslEnabled = conf.getBoolean(
|
||||
CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY,
|
||||
CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_DEFAULT);
|
||||
policy = sslEnabled ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY;
|
||||
}
|
||||
|
||||
public static void setPolicy(Policy policy) {
|
||||
HttpConfig.policy = policy;
|
||||
}
|
||||
|
||||
public static boolean isSecure() {
|
||||
return policy == Policy.HTTPS_ONLY;
|
||||
}
|
||||
|
||||
public static String getSchemePrefix() {
|
||||
return (isSecure()) ? "https://" : "http://";
|
||||
}
|
||||
|
||||
public static String getScheme(Policy policy) {
|
||||
return policy == Policy.HTTPS_ONLY ? "https://" : "http://";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -649,7 +649,7 @@ public class Client {
|
|||
// try re-login
|
||||
if (UserGroupInformation.isLoginKeytabBased()) {
|
||||
UserGroupInformation.getLoginUser().reloginFromKeytab();
|
||||
} else {
|
||||
} else if (UserGroupInformation.isLoginTicketBased()) {
|
||||
UserGroupInformation.getLoginUser().reloginFromTicketCache();
|
||||
}
|
||||
// have granularity of milliseconds
|
||||
|
|
|
@ -702,6 +702,35 @@ public class UserGroupInformation {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a UserGroupInformation from a Subject with Kerberos principal.
|
||||
*
|
||||
* @param user The KerberosPrincipal to use in UGI
|
||||
*
|
||||
* @throws IOException if the kerberos login fails
|
||||
*/
|
||||
public static UserGroupInformation getUGIFromSubject(Subject subject)
|
||||
throws IOException {
|
||||
if (subject == null) {
|
||||
throw new IOException("Subject must not be null");
|
||||
}
|
||||
|
||||
if (subject.getPrincipals(KerberosPrincipal.class).isEmpty()) {
|
||||
throw new IOException("Provided Subject must contain a KerberosPrincipal");
|
||||
}
|
||||
|
||||
KerberosPrincipal principal =
|
||||
subject.getPrincipals(KerberosPrincipal.class).iterator().next();
|
||||
|
||||
User ugiUser = new User(principal.getName(),
|
||||
AuthenticationMethod.KERBEROS, null);
|
||||
subject.getPrincipals().add(ugiUser);
|
||||
UserGroupInformation ugi = new UserGroupInformation(subject);
|
||||
ugi.setLogin(null);
|
||||
ugi.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
|
||||
return ugi;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the currently logged in user.
|
||||
* @return the logged in user
|
||||
|
@ -1101,6 +1130,14 @@ public class UserGroupInformation {
|
|||
return getLoginUser().isKeytab;
|
||||
}
|
||||
|
||||
/**
|
||||
* Did the login happen via ticket cache
|
||||
* @return true or false
|
||||
*/
|
||||
public static boolean isLoginTicketBased() throws IOException {
|
||||
return getLoginUser().isKrbTkt;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a user from a login name. It is intended to be used for remote
|
||||
* users in RPC, since it won't have any credentials.
|
||||
|
@ -1619,5 +1656,4 @@ public class UserGroupInformation {
|
|||
System.out.println("Keytab " + loginUser.isKeytab);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -105,15 +105,18 @@ public class Token<T extends TokenIdentifier> implements Writable {
|
|||
return identifier;
|
||||
}
|
||||
|
||||
private static synchronized Class<? extends TokenIdentifier>
|
||||
private static Class<? extends TokenIdentifier>
|
||||
getClassForIdentifier(Text kind) {
|
||||
Class<? extends TokenIdentifier> cls = null;
|
||||
synchronized (Token.class) {
|
||||
if (tokenKindMap == null) {
|
||||
tokenKindMap = Maps.newHashMap();
|
||||
for (TokenIdentifier id : ServiceLoader.load(TokenIdentifier.class)) {
|
||||
tokenKindMap.put(id.getKind(), id.getClass());
|
||||
}
|
||||
}
|
||||
Class<? extends TokenIdentifier> cls = tokenKindMap.get(kind);
|
||||
cls = tokenKindMap.get(kind);
|
||||
}
|
||||
if (cls == null) {
|
||||
LOG.warn("Cannot find class for token kind " + kind);
|
||||
return null;
|
||||
|
|
|
@ -90,10 +90,6 @@ public abstract class FSMainOperationsBaseTest extends FileSystemTestHelper {
|
|||
public FSMainOperationsBaseTest() {
|
||||
}
|
||||
|
||||
public FSMainOperationsBaseTest(String testRootDir) {
|
||||
super(testRootDir);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
fSys = createFileSystem();
|
||||
|
|
|
@ -49,7 +49,7 @@ public final class FileContextTestHelper {
|
|||
/**
|
||||
* Create a context with the given test root
|
||||
*/
|
||||
public FileContextTestHelper(String testRootDir) {
|
||||
private FileContextTestHelper(String testRootDir) {
|
||||
this.testRootDir = testRootDir;
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ public class FileSystemTestHelper {
|
|||
/**
|
||||
* Create helper with the specified test root dir
|
||||
*/
|
||||
public FileSystemTestHelper(String testRootDir) {
|
||||
private FileSystemTestHelper(String testRootDir) {
|
||||
this.testRootDir = testRootDir;
|
||||
}
|
||||
|
||||
|
|
|
@ -17,8 +17,14 @@
|
|||
package org.apache.hadoop.security;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.security.auth.kerberos.KerberosPrincipal;
|
||||
|
||||
import junit.framework.Assert;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||
|
@ -72,4 +78,40 @@ public class TestUGIWithSecurityOn {
|
|||
ex.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetUGIFromKerberosSubject() throws IOException {
|
||||
String user1keyTabFilepath = System.getProperty("kdc.resource.dir")
|
||||
+ "/keytabs/user1.keytab";
|
||||
|
||||
UserGroupInformation ugi = UserGroupInformation
|
||||
.loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM",
|
||||
user1keyTabFilepath);
|
||||
Set<KerberosPrincipal> principals = ugi.getSubject().getPrincipals(
|
||||
KerberosPrincipal.class);
|
||||
if (principals.isEmpty()) {
|
||||
Assert.fail("There should be a kerberos principal in the subject.");
|
||||
}
|
||||
else {
|
||||
UserGroupInformation ugi2 = UserGroupInformation.getUGIFromSubject(
|
||||
ugi.getSubject());
|
||||
if (ugi2 != null) {
|
||||
ugi2.doAs(new PrivilegedAction<Object>() {
|
||||
|
||||
@Override
|
||||
public Object run() {
|
||||
try {
|
||||
UserGroupInformation ugi3 = UserGroupInformation.getCurrentUser();
|
||||
String doAsUserName = ugi3.getUserName();
|
||||
assertEquals(doAsUserName, "user1@EXAMPLE.COM");
|
||||
System.out.println("DO AS USERNAME: " + doAsUserName);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.util.Shell;
|
|||
import org.junit.*;
|
||||
|
||||
import javax.security.auth.Subject;
|
||||
import javax.security.auth.kerberos.KerberosPrincipal;
|
||||
import javax.security.auth.login.AppConfigurationEntry;
|
||||
import javax.security.auth.login.LoginContext;
|
||||
import java.io.BufferedReader;
|
||||
|
@ -768,6 +769,16 @@ public class TestUserGroupInformation {
|
|||
});
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
public void testGetUGIFromSubject() throws Exception {
|
||||
KerberosPrincipal p = new KerberosPrincipal("guest");
|
||||
Subject subject = new Subject();
|
||||
subject.getPrincipals().add(p);
|
||||
UserGroupInformation ugi = UserGroupInformation.getUGIFromSubject(subject);
|
||||
assertNotNull(ugi);
|
||||
assertEquals("guest@DEFAULT.REALM", ugi.getUserName());
|
||||
}
|
||||
|
||||
/** Test hasSufficientTimeElapsed method */
|
||||
@Test (timeout = 30000)
|
||||
public void testHasSufficientTimeElapsed() throws Exception {
|
||||
|
|
|
@ -259,59 +259,17 @@ Trunk (Unreleased)
|
|||
HDFS-5794. Fix the inconsistency of layout version number of
|
||||
ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
|
||||
|
||||
BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
HDFS-5738. Serialize INode information in protobuf. (Haohui Mai via jing9)
|
||||
NEW FEATURES
|
||||
|
||||
HDFS-5772. Serialize under-construction file information in FSImage. (jing9)
|
||||
IMPROVEMENTS
|
||||
|
||||
HDFS-5783. Compute the digest before loading FSImage. (Haohui Mai via jing9)
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-5785. Serialize symlink in protobuf. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5793. Optimize the serialization of PermissionStatus. (Haohui Mai via
|
||||
jing9)
|
||||
|
||||
HDFS-5743. Use protobuf to serialize snapshot information. (jing9)
|
||||
|
||||
HDFS-5774. Serialize CachePool directives in protobuf. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5744. Serialize information for token managers in protobuf. (Haohui Mai
|
||||
via jing9)
|
||||
|
||||
HDFS-5824. Add a Type field in Snapshot DiffEntry's protobuf definition.
|
||||
(jing9)
|
||||
|
||||
HDFS-5808. Implement cancellation when saving FSImage. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5826. Update the stored edit logs to be consistent with the changes in
|
||||
HDFS-5698 branch. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5797. Implement offline image viewer. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5771. Track progress when loading fsimage. (Haohui Mai via cnauroth)
|
||||
|
||||
HDFS-5871. Use PBHelper to serialize CacheDirectiveInfoExpirationProto.
|
||||
(Haohui Mai via jing9)
|
||||
|
||||
HDFS-5884. LoadDelegator should use IOUtils.readFully() to read the magic
|
||||
header. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5885. Add annotation for repeated fields in the protobuf definition.
|
||||
(Haohui Mai via jing9)
|
||||
|
||||
HDFS-5906. Fixing findbugs and javadoc warnings in the HDFS-5698 branch.
|
||||
(Haohui Mai via jing9)
|
||||
|
||||
HDFS-5911. The id of a CacheDirective instance does not get serialized in
|
||||
the protobuf-fsimage. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5915. Refactor FSImageFormatProtobuf to simplify cross section reads.
|
||||
(Haohui Mai via cnauroth)
|
||||
|
||||
HDFS-5847. Consolidate INodeReference into a separate section. (jing9)
|
||||
BUG FIXES
|
||||
|
||||
Release 2.4.0 - UNRELEASED
|
||||
|
||||
|
@ -319,6 +277,9 @@ Release 2.4.0 - UNRELEASED
|
|||
|
||||
NEW FEATURES
|
||||
|
||||
HDFS-5698. Use protobuf to serialize / deserialize FSImage. (See breakdown
|
||||
of tasks below for features and contributors)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HDFS-5781. Use an array to record the mapping between FSEditLogOpCode and
|
||||
|
@ -451,6 +412,90 @@ Release 2.4.0 - UNRELEASED
|
|||
HDFS-5943. 'dfs.namenode.https-address' property is not loaded from
|
||||
configuration in federation setup. (suresh)
|
||||
|
||||
HDFS-3128. Unit tests should not use a test root in /tmp. (wang)
|
||||
|
||||
HDFS-5948. TestBackupNode flakes with port in use error. (Haohui Mai
|
||||
via Arpit Agarwal)
|
||||
|
||||
HDFS-5949. New Namenode UI when trying to download a file, the browser
|
||||
doesn't know the file name. (Haohui Mai via brandonli)
|
||||
|
||||
HDFS-5716. Allow WebHDFS to use pluggable authentication filter
|
||||
(Haohui Mai via brandonli)
|
||||
|
||||
HDFS-5953. TestBlockReaderFactory fails in trunk. (Akira Ajisaka via wang)
|
||||
|
||||
HDFS-5759. Web UI does not show up during the period of loading FSImage.
|
||||
(Haohui Mai via Arpit Agarwal)
|
||||
|
||||
HDFS-5942. Fix javadoc in OfflineImageViewer. (Akira Ajisaka via cnauroth)
|
||||
|
||||
HDFS-5780. TestRBWBlockInvalidation times out intemittently. (Mit Desai
|
||||
via kihwal)
|
||||
|
||||
HDFS-5803. TestBalancer.testBalancer0 fails. (Chen He via kihwal)
|
||||
|
||||
HDFS-5893. HftpFileSystem.RangeHeaderUrlOpener uses the default
|
||||
URLConnectionFactory which does not import SSL certificates. (Haohui Mai via
|
||||
jing9)
|
||||
|
||||
BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5738. Serialize INode information in protobuf. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5772. Serialize under-construction file information in FSImage. (jing9)
|
||||
|
||||
HDFS-5783. Compute the digest before loading FSImage. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5785. Serialize symlink in protobuf. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5793. Optimize the serialization of PermissionStatus. (Haohui Mai via
|
||||
jing9)
|
||||
|
||||
HDFS-5743. Use protobuf to serialize snapshot information. (jing9)
|
||||
|
||||
HDFS-5774. Serialize CachePool directives in protobuf. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5744. Serialize information for token managers in protobuf. (Haohui Mai
|
||||
via jing9)
|
||||
|
||||
HDFS-5824. Add a Type field in Snapshot DiffEntry's protobuf definition.
|
||||
(jing9)
|
||||
|
||||
HDFS-5808. Implement cancellation when saving FSImage. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5826. Update the stored edit logs to be consistent with the changes in
|
||||
HDFS-5698 branch. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5797. Implement offline image viewer. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5771. Track progress when loading fsimage. (Haohui Mai via cnauroth)
|
||||
|
||||
HDFS-5871. Use PBHelper to serialize CacheDirectiveInfoExpirationProto.
|
||||
(Haohui Mai via jing9)
|
||||
|
||||
HDFS-5884. LoadDelegator should use IOUtils.readFully() to read the magic
|
||||
header. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5885. Add annotation for repeated fields in the protobuf definition.
|
||||
(Haohui Mai via jing9)
|
||||
|
||||
HDFS-5906. Fixing findbugs and javadoc warnings in the HDFS-5698 branch.
|
||||
(Haohui Mai via jing9)
|
||||
|
||||
HDFS-5911. The id of a CacheDirective instance does not get serialized in
|
||||
the protobuf-fsimage. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5915. Refactor FSImageFormatProtobuf to simplify cross section reads.
|
||||
(Haohui Mai via cnauroth)
|
||||
|
||||
HDFS-5847. Consolidate INodeReference into a separate section. (jing9)
|
||||
|
||||
HDFS-5959. Fix typo at section name in FSImageFormatProtobuf.java.
|
||||
(Akira Ajisaka via suresh)
|
||||
|
||||
Release 2.3.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
|
||||
import org.apache.hadoop.hdfs.web.AuthFilter;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
|
||||
/**
|
||||
|
@ -173,6 +174,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final int DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2;
|
||||
public static final String DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY = "dfs.namenode.replication.max-streams-hard-limit";
|
||||
public static final int DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4;
|
||||
public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY = "dfs.web.authentication.filter";
|
||||
public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT = AuthFilter.class.getName();
|
||||
public static final String DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled";
|
||||
public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
|
||||
public static final String DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern";
|
||||
|
|
|
@ -234,7 +234,7 @@ public final class FSImageFormatProtobuf {
|
|||
inodeLoader.loadINodeSection(in);
|
||||
}
|
||||
break;
|
||||
case INODE_REFRENCE:
|
||||
case INODE_REFERENCE:
|
||||
snapshotLoader.loadINodeReferenceSection(in);
|
||||
break;
|
||||
case INODE_DIR:
|
||||
|
@ -553,7 +553,7 @@ public final class FSImageFormatProtobuf {
|
|||
NS_INFO("NS_INFO"),
|
||||
STRING_TABLE("STRING_TABLE"),
|
||||
INODE("INODE"),
|
||||
INODE_REFRENCE("INODE_REFRENCE"),
|
||||
INODE_REFERENCE("INODE_REFERENCE"),
|
||||
SNAPSHOT("SNAPSHOT"),
|
||||
INODE_DIR("INODE_DIR"),
|
||||
FILES_UNDERCONSTRUCTION("FILES_UNDERCONSTRUCTION"),
|
||||
|
|
|
@ -27,7 +27,6 @@ import javax.servlet.http.HttpServletResponse;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
|
@ -61,14 +60,9 @@ public class FileDataServlet extends DfsServlet {
|
|||
} else {
|
||||
hostname = host.getIpAddr();
|
||||
}
|
||||
int port = host.getInfoPort();
|
||||
if ("https".equals(scheme)) {
|
||||
final Integer portObject = (Integer) getServletContext().getAttribute(
|
||||
DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY);
|
||||
if (portObject != null) {
|
||||
port = portObject;
|
||||
}
|
||||
}
|
||||
|
||||
int port = "https".equals(scheme) ? host.getInfoSecurePort() : host
|
||||
.getInfoPort();
|
||||
|
||||
String dtParam = "";
|
||||
if (dt != null) {
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
|||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
|
||||
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
||||
import org.apache.hadoop.hdfs.web.AuthFilter;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||
import org.apache.hadoop.hdfs.web.resources.Param;
|
||||
import org.apache.hadoop.hdfs.web.resources.UserParam;
|
||||
|
@ -70,21 +69,27 @@ public class NameNodeHttpServer {
|
|||
private void initWebHdfs(Configuration conf) throws IOException {
|
||||
if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
|
||||
// set user pattern based on configuration file
|
||||
UserParam.setUserPattern(conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
|
||||
UserParam.setUserPattern(conf.get(
|
||||
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
|
||||
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
|
||||
|
||||
// add authentication filter for webhdfs
|
||||
final String className = conf.get(
|
||||
DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
|
||||
DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
|
||||
final String name = className;
|
||||
|
||||
// add SPNEGO authentication filter for webhdfs
|
||||
final String name = "SPNEGO";
|
||||
final String classname = AuthFilter.class.getName();
|
||||
final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
|
||||
Map<String, String> params = getAuthFilterParams(conf);
|
||||
HttpServer2.defineFilter(httpServer.getWebAppContext(), name, classname, params,
|
||||
new String[]{pathSpec});
|
||||
HttpServer2.LOG.info("Added filter '" + name + "' (class=" + classname + ")");
|
||||
HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
|
||||
params, new String[] { pathSpec });
|
||||
HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
|
||||
+ ")");
|
||||
|
||||
// add webhdfs packages
|
||||
httpServer.addJerseyResourcePackage(
|
||||
NamenodeWebHdfsMethods.class.getPackage().getName()
|
||||
+ ";" + Param.class.getPackage().getName(), pathSpec);
|
||||
httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
|
||||
.getPackage().getName() + ";" + Param.class.getPackage().getName(),
|
||||
pathSpec);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -383,7 +383,7 @@ public class FSImageFormatPBSnapshot {
|
|||
INodeReferenceSection.INodeReference.Builder rb = buildINodeReference(ref);
|
||||
rb.build().writeDelimitedTo(out);
|
||||
}
|
||||
parent.commitSection(headers, SectionName.INODE_REFRENCE);
|
||||
parent.commitSection(headers, SectionName.INODE_REFERENCE);
|
||||
}
|
||||
|
||||
private INodeReferenceSection.INodeReference.Builder buildINodeReference(
|
||||
|
|
|
@ -51,28 +51,16 @@ import com.google.common.collect.Maps;
|
|||
import com.google.common.io.LimitInputStream;
|
||||
|
||||
/**
|
||||
* This is the tool for analyzing file sizes in the namespace image. In order to
|
||||
* run the tool one should define a range of integers <tt>[0, maxSize]</tt> by
|
||||
* specifying <tt>maxSize</tt> and a <tt>step</tt>. The range of integers is
|
||||
* divided into segments of size <tt>step</tt>:
|
||||
* <tt>[0, s<sub>1</sub>, ..., s<sub>n-1</sub>, maxSize]</tt>, and the visitor
|
||||
* calculates how many files in the system fall into each segment
|
||||
* <tt>[s<sub>i-1</sub>, s<sub>i</sub>)</tt>. Note that files larger than
|
||||
* <tt>maxSize</tt> always fall into the very last segment.
|
||||
*
|
||||
* <h3>Input.</h3>
|
||||
* <ul>
|
||||
* <li><tt>filename</tt> specifies the location of the image file;</li>
|
||||
* <li><tt>maxSize</tt> determines the range <tt>[0, maxSize]</tt> of files
|
||||
* sizes considered by the visitor;</li>
|
||||
* <li><tt>step</tt> the range is divided into segments of size step.</li>
|
||||
* </ul>
|
||||
*
|
||||
* <h3>Output.</h3> The output file is formatted as a tab separated two column
|
||||
* table: Size and NumFiles. Where Size represents the start of the segment, and
|
||||
* numFiles is the number of files form the image which size falls in this
|
||||
* segment.
|
||||
* LsrPBImage displays the blocks of the namespace in a format very similar
|
||||
* to the output of ls/lsr. Entries are marked as directories or not,
|
||||
* permissions listed, replication, username and groupname, along with size,
|
||||
* modification date and full path.
|
||||
*
|
||||
* Note: A significant difference between the output of the lsr command
|
||||
* and this image visitor is that this class cannot sort the file entries;
|
||||
* they are listed in the order they are stored within the fsimage file.
|
||||
* Therefore, the output of this class cannot be directly compared to the
|
||||
* output of the lsr command.
|
||||
*/
|
||||
final class LsrPBImage {
|
||||
private final Configuration conf;
|
||||
|
@ -127,7 +115,7 @@ final class LsrPBImage {
|
|||
case INODE:
|
||||
loadINodeSection(is);
|
||||
break;
|
||||
case INODE_REFRENCE:
|
||||
case INODE_REFERENCE:
|
||||
loadINodeReferenceSection(is);
|
||||
break;
|
||||
case INODE_DIR:
|
||||
|
|
|
@ -55,28 +55,8 @@ import com.google.common.collect.Lists;
|
|||
import com.google.common.io.LimitInputStream;
|
||||
|
||||
/**
|
||||
* This is the tool for analyzing file sizes in the namespace image. In order to
|
||||
* run the tool one should define a range of integers <tt>[0, maxSize]</tt> by
|
||||
* specifying <tt>maxSize</tt> and a <tt>step</tt>. The range of integers is
|
||||
* divided into segments of size <tt>step</tt>:
|
||||
* <tt>[0, s<sub>1</sub>, ..., s<sub>n-1</sub>, maxSize]</tt>, and the visitor
|
||||
* calculates how many files in the system fall into each segment
|
||||
* <tt>[s<sub>i-1</sub>, s<sub>i</sub>)</tt>. Note that files larger than
|
||||
* <tt>maxSize</tt> always fall into the very last segment.
|
||||
*
|
||||
* <h3>Input.</h3>
|
||||
* <ul>
|
||||
* <li><tt>filename</tt> specifies the location of the image file;</li>
|
||||
* <li><tt>maxSize</tt> determines the range <tt>[0, maxSize]</tt> of files
|
||||
* sizes considered by the visitor;</li>
|
||||
* <li><tt>step</tt> the range is divided into segments of size step.</li>
|
||||
* </ul>
|
||||
*
|
||||
* <h3>Output.</h3> The output file is formatted as a tab separated two column
|
||||
* table: Size and NumFiles. Where Size represents the start of the segment, and
|
||||
* numFiles is the number of files form the image which size falls in this
|
||||
* segment.
|
||||
*
|
||||
* PBImageXmlWriter walks over an fsimage structure and writes out
|
||||
* an equivalent XML document that contains the fsimage's components.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public final class PBImageXmlWriter {
|
||||
|
@ -133,7 +113,7 @@ public final class PBImageXmlWriter {
|
|||
case INODE:
|
||||
dumpINodeSection(is);
|
||||
break;
|
||||
case INODE_REFRENCE:
|
||||
case INODE_REFERENCE:
|
||||
dumpINodeReferenceSection(is);
|
||||
break;
|
||||
case INODE_DIR:
|
||||
|
|
|
@ -344,14 +344,15 @@ public class HftpFileSystem extends FileSystem
|
|||
}
|
||||
|
||||
static class RangeHeaderUrlOpener extends ByteRangeInputStream.URLOpener {
|
||||
URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY;
|
||||
private final URLConnectionFactory connFactory;
|
||||
|
||||
RangeHeaderUrlOpener(final URL url) {
|
||||
RangeHeaderUrlOpener(URLConnectionFactory connFactory, final URL url) {
|
||||
super(url);
|
||||
this.connFactory = connFactory;
|
||||
}
|
||||
|
||||
protected HttpURLConnection openConnection() throws IOException {
|
||||
return (HttpURLConnection)connectionFactory.openConnection(url);
|
||||
return (HttpURLConnection)connFactory.openConnection(url);
|
||||
}
|
||||
|
||||
/** Use HTTP Range header for specifying offset. */
|
||||
|
@ -381,8 +382,9 @@ public class HftpFileSystem extends FileSystem
|
|||
super(o, r);
|
||||
}
|
||||
|
||||
RangeHeaderInputStream(final URL url) {
|
||||
this(new RangeHeaderUrlOpener(url), new RangeHeaderUrlOpener(null));
|
||||
RangeHeaderInputStream(URLConnectionFactory connFactory, final URL url) {
|
||||
this(new RangeHeaderUrlOpener(connFactory, url),
|
||||
new RangeHeaderUrlOpener(connFactory, null));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -397,7 +399,7 @@ public class HftpFileSystem extends FileSystem
|
|||
String path = "/data" + ServletUtil.encodePath(f.toUri().getPath());
|
||||
String query = addDelegationTokenParam("ugi=" + getEncodedUgiParameter());
|
||||
URL u = getNamenodeURL(path, query);
|
||||
return new FSDataInputStream(new RangeHeaderInputStream(u));
|
||||
return new FSDataInputStream(new RangeHeaderInputStream(connectionFactory, u));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -151,7 +151,7 @@
|
|||
{/fs}
|
||||
</table>
|
||||
|
||||
<div class="page-header"><h1>Namenode Journal Status</h1></div>
|
||||
<div class="page-header"><h1>NameNode Journal Status</h1></div>
|
||||
<p><b>Current transaction ID:</b> {nn.JournalTransactionInfo.LastAppliedOrWrittenTxId}</p>
|
||||
<table class="table" title="NameNode Journals">
|
||||
<thead>
|
||||
|
|
|
@ -50,24 +50,23 @@
|
|||
var data = {};
|
||||
|
||||
// Workarounds for the fact that JMXJsonServlet returns non-standard JSON strings
|
||||
function data_workaround(d) {
|
||||
d.nn.JournalTransactionInfo = JSON.parse(d.nn.JournalTransactionInfo);
|
||||
d.nn.NameJournalStatus = JSON.parse(d.nn.NameJournalStatus);
|
||||
d.nn.NameDirStatuses = JSON.parse(d.nn.NameDirStatuses);
|
||||
d.nn.NodeUsage = JSON.parse(d.nn.NodeUsage);
|
||||
d.nn.CorruptFiles = JSON.parse(d.nn.CorruptFiles);
|
||||
return d;
|
||||
function workaround(nn) {
|
||||
nn.JournalTransactionInfo = JSON.parse(nn.JournalTransactionInfo);
|
||||
nn.NameJournalStatus = JSON.parse(nn.NameJournalStatus);
|
||||
nn.NameDirStatuses = JSON.parse(nn.NameDirStatuses);
|
||||
nn.NodeUsage = JSON.parse(nn.NodeUsage);
|
||||
nn.CorruptFiles = JSON.parse(nn.CorruptFiles);
|
||||
return nn;
|
||||
}
|
||||
|
||||
load_json(
|
||||
BEANS,
|
||||
function(d) {
|
||||
guard_with_startup_progress(function(d) {
|
||||
for (var k in d) {
|
||||
data[k] = d[k].beans[0];
|
||||
data[k] = k === 'nn' ? workaround(d[k].beans[0]) : d[k].beans[0];
|
||||
}
|
||||
data = data_workaround(data);
|
||||
render();
|
||||
},
|
||||
}),
|
||||
function (url, jqxhr, text, err) {
|
||||
show_err_msg('<p>Failed to retrieve data from ' + url + ', cause: ' + err + '</p>');
|
||||
});
|
||||
|
@ -92,6 +91,19 @@
|
|||
show_err_msg('<p>Failed to retrieve data from ' + url + ', cause: ' + err + '</p>');
|
||||
}
|
||||
|
||||
function guard_with_startup_progress(fn) {
|
||||
return function() {
|
||||
try {
|
||||
fn.apply(this, arguments);
|
||||
} catch (err) {
|
||||
if (err instanceof TypeError) {
|
||||
show_err_msg('NameNode is still loading. Redirecting to the Startup Progress page.');
|
||||
load_startup_progress();
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function load_startup_progress() {
|
||||
function workaround(r) {
|
||||
function rename_property(o, s, d) {
|
||||
|
@ -143,25 +155,29 @@
|
|||
return r;
|
||||
}
|
||||
|
||||
$.get('/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo', function (resp) {
|
||||
$.get(
|
||||
'/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo',
|
||||
guard_with_startup_progress(function (resp) {
|
||||
var data = workaround(resp.beans[0]);
|
||||
dust.render('datanode-info', data, function(err, out) {
|
||||
$('#tab-datanode').html(out);
|
||||
$('#ui-tabs a[href="#tab-datanode"]').tab('show');
|
||||
});
|
||||
}).error(ajax_error_handler);
|
||||
})).error(ajax_error_handler);
|
||||
}
|
||||
|
||||
$('a[href="#tab-datanode"]').click(load_datanode_info);
|
||||
|
||||
function load_snapshot_info() {
|
||||
$.get('/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState', function (resp) {
|
||||
$.get(
|
||||
'/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState',
|
||||
guard_with_startup_progress(function (resp) {
|
||||
var data = JSON.parse(resp.beans[0].SnapshotStats);
|
||||
dust.render('snapshot-info', data, function(err, out) {
|
||||
$('#tab-snapshot').html(out);
|
||||
$('#ui-tabs a[href="#tab-snapshot"]').tab('show');
|
||||
});
|
||||
}).error(ajax_error_handler);
|
||||
})).error(ajax_error_handler);
|
||||
}
|
||||
|
||||
$('#ui-tabs a[href="#tab-snapshot"]').click(load_snapshot_info);
|
||||
|
|
|
@ -124,7 +124,7 @@
|
|||
$('#file-info-tail').hide();
|
||||
$('#file-info-title').text("File information - " + path);
|
||||
|
||||
var download_url = '/webhdfs/v1' + abs_path + '/?op=OPEN';
|
||||
var download_url = '/webhdfs/v1' + abs_path + '?op=OPEN';
|
||||
|
||||
$('#file-info-download').attr('href', download_url);
|
||||
$('#file-info-preview').click(function() {
|
||||
|
|
|
@ -40,7 +40,7 @@ public class TestFcHdfsCreateMkdir extends
|
|||
|
||||
@Override
|
||||
protected FileContextTestHelper createFileContextHelper() {
|
||||
return new FileContextTestHelper("/tmp/TestFcHdfsCreateMkdir");
|
||||
return new FileContextTestHelper();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.junit.BeforeClass;
|
|||
public class TestFcHdfsPermission extends FileContextPermissionBase {
|
||||
|
||||
private static final FileContextTestHelper fileContextTestHelper =
|
||||
new FileContextTestHelper("/tmp/TestFcHdfsPermission");
|
||||
new FileContextTestHelper();
|
||||
private static FileContext fc;
|
||||
|
||||
private static MiniDFSCluster cluster;
|
||||
|
|
|
@ -43,7 +43,7 @@ import org.junit.Test;
|
|||
public class TestFcHdfsSetUMask {
|
||||
|
||||
private static FileContextTestHelper fileContextTestHelper =
|
||||
new FileContextTestHelper("/tmp/TestFcHdfsSetUMask");
|
||||
new FileContextTestHelper();
|
||||
private static MiniDFSCluster cluster;
|
||||
private static Path defaultWorkingDirectory;
|
||||
private static FileContext fc;
|
||||
|
|
|
@ -49,7 +49,7 @@ public class TestHDFSFileContextMainOperations extends
|
|||
|
||||
@Override
|
||||
protected FileContextTestHelper createFileContextHelper() {
|
||||
return new FileContextTestHelper("/tmp/TestHDFSFileContextMainOperations");
|
||||
return new FileContextTestHelper();
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
|
|
|
@ -50,7 +50,7 @@ import org.junit.Test;
|
|||
* underlying file system as Hdfs.
|
||||
*/
|
||||
public class TestResolveHdfsSymlink {
|
||||
private static File TEST_ROOT_DIR = PathUtils.getTestDir(TestResolveHdfsSymlink.class);
|
||||
private static FileContextTestHelper helper = new FileContextTestHelper();
|
||||
private static MiniDFSCluster cluster = null;
|
||||
|
||||
@BeforeClass
|
||||
|
@ -82,13 +82,14 @@ public class TestResolveHdfsSymlink {
|
|||
FileContext fcHdfs = FileContext.getFileContext(cluster.getFileSystem()
|
||||
.getUri());
|
||||
|
||||
final String localTestRoot = helper.getAbsoluteTestRootDir(fcLocal);
|
||||
Path alphaLocalPath = new Path(fcLocal.getDefaultFileSystem().getUri()
|
||||
.toString(), new File(TEST_ROOT_DIR, "alpha").getAbsolutePath());
|
||||
.toString(), new File(localTestRoot, "alpha").getAbsolutePath());
|
||||
DFSTestUtil.createFile(FileSystem.getLocal(conf), alphaLocalPath, 16,
|
||||
(short) 1, 2);
|
||||
|
||||
Path linkTarget = new Path(fcLocal.getDefaultFileSystem().getUri()
|
||||
.toString(), TEST_ROOT_DIR.getAbsolutePath());
|
||||
.toString(), localTestRoot);
|
||||
Path hdfsLink = new Path(fcHdfs.getDefaultFileSystem().getUri().toString(),
|
||||
"/tmp/link");
|
||||
fcHdfs.createSymlink(linkTarget, hdfsLink, true);
|
||||
|
|
|
@ -42,8 +42,7 @@ public class TestSymlinkHdfsDisable {
|
|||
DistributedFileSystem dfs = cluster.getFileSystem();
|
||||
FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
|
||||
// Create test files/links
|
||||
FileContextTestHelper helper = new FileContextTestHelper(
|
||||
"/tmp/TestSymlinkHdfsDisable");
|
||||
FileContextTestHelper helper = new FileContextTestHelper();
|
||||
Path root = helper.getTestRootPath(fc);
|
||||
Path target = new Path(root, "target");
|
||||
Path link = new Path(root, "link");
|
||||
|
|
|
@ -45,7 +45,7 @@ public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest {
|
|||
|
||||
@Override
|
||||
protected FileSystemTestHelper createFileSystemHelper() {
|
||||
return new FileSystemTestHelper("/tmp/TestViewFileSystemAtHdfsRoot");
|
||||
return new FileSystemTestHelper();
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
|
|
|
@ -52,7 +52,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
|
|||
|
||||
@Override
|
||||
protected FileSystemTestHelper createFileSystemHelper() {
|
||||
return new FileSystemTestHelper("/tmp/TestViewFileSystemHdfs");
|
||||
return new FileSystemTestHelper();
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
|
|
|
@ -46,7 +46,7 @@ public class TestViewFsAtHdfsRoot extends ViewFsBaseTest {
|
|||
|
||||
@Override
|
||||
protected FileContextTestHelper createFileContextHelper() {
|
||||
return new FileContextTestHelper("/tmp/TestViewFsAtHdfsRoot");
|
||||
return new FileContextTestHelper();
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
|
|
|
@ -42,7 +42,7 @@ public class TestViewFsHdfs extends ViewFsBaseTest {
|
|||
|
||||
@Override
|
||||
protected FileContextTestHelper createFileContextHelper() {
|
||||
return new FileContextTestHelper("/tmp/TestViewFsHdfs");
|
||||
return new FileContextTestHelper();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.net.unix.DomainSocket;
|
|||
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Assume;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -47,6 +48,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
|
|||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
||||
public class TestBlockReaderFactory {
|
||||
static final Log LOG = LogFactory.getLog(TestBlockReaderFactory.class);
|
||||
|
@ -56,6 +58,11 @@ public class TestBlockReaderFactory {
|
|||
DomainSocket.disableBindPathValidation();
|
||||
}
|
||||
|
||||
@Before
|
||||
public void before() {
|
||||
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
|
||||
}
|
||||
|
||||
@After
|
||||
public void cleanup() {
|
||||
DFSInputStream.tcpReadsDisabledForTesting = false;
|
||||
|
|
|
@ -74,7 +74,7 @@ public class TestBalancer {
|
|||
|
||||
ClientProtocol client;
|
||||
|
||||
static final long TIMEOUT = 20000L; //msec
|
||||
static final long TIMEOUT = 40000L; //msec
|
||||
static final double CAPACITY_ALLOWED_VARIANCE = 0.005; // 0.5%
|
||||
static final double BALANCE_ALLOWED_VARIANCE = 0.11; // 10%+delta
|
||||
static final int DEFAULT_BLOCK_SIZE = 10;
|
||||
|
|
|
@ -66,7 +66,7 @@ public class TestRBWBlockInvalidation {
|
|||
* datanode, namenode should ask to invalidate that corrupted block and
|
||||
* schedule replication for one more replica for that under replicated block.
|
||||
*/
|
||||
@Test(timeout=60000)
|
||||
@Test(timeout=600000)
|
||||
public void testBlockInvalidationWhenRBWReplicaMissedInDN()
|
||||
throws IOException, InterruptedException {
|
||||
// This test cannot pass on Windows due to file locking enforcement. It will
|
||||
|
@ -75,7 +75,7 @@ public class TestRBWBlockInvalidation {
|
|||
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 100);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 300);
|
||||
conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
|
||||
|
@ -105,22 +105,23 @@ public class TestRBWBlockInvalidation {
|
|||
|
||||
out.close();
|
||||
|
||||
// Check datanode has reported the corrupt block.
|
||||
int corruptReplicas = 0;
|
||||
int liveReplicas = 0;
|
||||
while (true) {
|
||||
if ((corruptReplicas = countReplicas(namesystem, blk).corruptReplicas()) > 0) {
|
||||
if ((liveReplicas = countReplicas(namesystem, blk).liveReplicas()) < 2) {
|
||||
// This confirms we have a corrupt replica
|
||||
LOG.info("Live Replicas after corruption: " + liveReplicas);
|
||||
break;
|
||||
}
|
||||
Thread.sleep(100);
|
||||
}
|
||||
assertEquals("There should be 1 replica in the corruptReplicasMap", 1,
|
||||
corruptReplicas);
|
||||
assertEquals("There should be less than 2 replicas in the "
|
||||
+ "liveReplicasMap", 1, liveReplicas);
|
||||
|
||||
// Check the block has got replicated to another datanode.
|
||||
blk = DFSTestUtil.getFirstBlock(fs, testPath);
|
||||
int liveReplicas = 0;
|
||||
while (true) {
|
||||
if ((liveReplicas = countReplicas(namesystem, blk).liveReplicas()) > 1) {
|
||||
if ((liveReplicas =
|
||||
countReplicas(namesystem, blk).liveReplicas()) > 1) {
|
||||
//Wait till the live replica count becomes equal to Replication Factor
|
||||
LOG.info("Live Replicas after Rereplication: " + liveReplicas);
|
||||
break;
|
||||
}
|
||||
Thread.sleep(100);
|
||||
|
@ -128,9 +129,9 @@ public class TestRBWBlockInvalidation {
|
|||
assertEquals("There should be two live replicas", 2,
|
||||
liveReplicas);
|
||||
|
||||
// sleep for 1 second, so that by this time datanode reports the corrupt
|
||||
// sleep for 2 seconds, so that by this time datanode reports the corrupt
|
||||
// block after a live replica of block got replicated.
|
||||
Thread.sleep(1000);
|
||||
Thread.sleep(2000);
|
||||
|
||||
// Check that there is no corrupt block in the corruptReplicasMap.
|
||||
assertEquals("There should not be any replica in the corruptReplicasMap",
|
||||
|
|
|
@ -282,6 +282,7 @@ public class TestBackupNode {
|
|||
HAUtil.setAllowStandbyReads(conf, true);
|
||||
short replication = (short)conf.getInt("dfs.replication", 3);
|
||||
int numDatanodes = Math.max(3, replication);
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "localhost:0");
|
||||
conf.set(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, "0");
|
||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // disable block scanner
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
|
||||
|
|
|
@ -97,12 +97,13 @@ public static class MockHttpURLConnection extends HttpURLConnection {
|
|||
|
||||
@Test
|
||||
public void testByteRange() throws IOException {
|
||||
URLConnectionFactory factory = mock(URLConnectionFactory.class);
|
||||
HftpFileSystem.RangeHeaderUrlOpener ospy = spy(
|
||||
new HftpFileSystem.RangeHeaderUrlOpener(new URL("http://test/")));
|
||||
new HftpFileSystem.RangeHeaderUrlOpener(factory, new URL("http://test/")));
|
||||
doReturn(new MockHttpURLConnection(ospy.getURL())).when(ospy)
|
||||
.openConnection();
|
||||
HftpFileSystem.RangeHeaderUrlOpener rspy = spy(
|
||||
new HftpFileSystem.RangeHeaderUrlOpener((URL) null));
|
||||
new HftpFileSystem.RangeHeaderUrlOpener(factory, (URL) null));
|
||||
doReturn(new MockHttpURLConnection(rspy.getURL())).when(rspy)
|
||||
.openConnection();
|
||||
ByteRangeInputStream is = new HftpFileSystem.RangeHeaderInputStream(ospy, rspy);
|
||||
|
@ -171,12 +172,15 @@ public static class MockHttpURLConnection extends HttpURLConnection {
|
|||
assertEquals("Should fail because incorrect response code was sent",
|
||||
"HTTP_OK expected, received 206", e.getMessage());
|
||||
}
|
||||
is.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPropagatedClose() throws IOException {
|
||||
ByteRangeInputStream brs = spy(
|
||||
new HftpFileSystem.RangeHeaderInputStream(new URL("http://test/")));
|
||||
URLConnectionFactory factory = mock(URLConnectionFactory.class);
|
||||
|
||||
ByteRangeInputStream brs = spy(new HftpFileSystem.RangeHeaderInputStream(
|
||||
factory, new URL("http://test/")));
|
||||
|
||||
InputStream mockStream = mock(InputStream.class);
|
||||
doReturn(mockStream).when(brs).openInputStream();
|
||||
|
|
|
@ -52,7 +52,7 @@ public class TestFSMainOperationsWebHdfs extends FSMainOperationsBaseTest {
|
|||
private static FileSystem fileSystem;
|
||||
|
||||
public TestFSMainOperationsWebHdfs() {
|
||||
super("/tmp/TestFSMainOperationsWebHdfs");
|
||||
super();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.web;
|
|||
|
||||
import java.io.File;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
|
||||
|
@ -30,6 +31,7 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
|
@ -65,9 +67,11 @@ public class TestHttpsFileSystem {
|
|||
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||
cluster.waitActive();
|
||||
cluster.getFileSystem().create(new Path("/test")).close();
|
||||
OutputStream os = cluster.getFileSystem().create(new Path("/test"));
|
||||
os.write(23);
|
||||
os.close();
|
||||
InetSocketAddress addr = cluster.getNameNode().getHttpsAddress();
|
||||
nnAddr = addr.getHostName() + ":" + addr.getPort();
|
||||
nnAddr = NetUtils.getHostPortString(addr);
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr);
|
||||
}
|
||||
|
||||
|
@ -82,6 +86,9 @@ public class TestHttpsFileSystem {
|
|||
public void testHsftpFileSystem() throws Exception {
|
||||
FileSystem fs = FileSystem.get(new URI("hsftp://" + nnAddr), conf);
|
||||
Assert.assertTrue(fs.exists(new Path("/test")));
|
||||
InputStream is = fs.open(new Path("/test"));
|
||||
Assert.assertEquals(23, is.read());
|
||||
is.close();
|
||||
fs.close();
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,103 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.web;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
|
||||
import javax.servlet.Filter;
|
||||
import javax.servlet.FilterChain;
|
||||
import javax.servlet.FilterConfig;
|
||||
import javax.servlet.ServletException;
|
||||
import javax.servlet.ServletRequest;
|
||||
import javax.servlet.ServletResponse;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestWebHdfsWithAuthenticationFilter {
|
||||
private static boolean authorized = false;
|
||||
|
||||
public static final class CustomizedFilter implements Filter {
|
||||
@Override
|
||||
public void init(FilterConfig filterConfig) throws ServletException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doFilter(ServletRequest request, ServletResponse response,
|
||||
FilterChain chain) throws IOException, ServletException {
|
||||
if (authorized) {
|
||||
chain.doFilter(request, response);
|
||||
} else {
|
||||
((HttpServletResponse) response)
|
||||
.sendError(HttpServletResponse.SC_FORBIDDEN);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static Configuration conf;
|
||||
private static MiniDFSCluster cluster;
|
||||
private static FileSystem fs;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws IOException {
|
||||
conf = new Configuration();
|
||||
conf.set(DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
|
||||
CustomizedFilter.class.getName());
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:0");
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||
InetSocketAddress addr = cluster.getNameNode().getHttpAddress();
|
||||
fs = FileSystem.get(
|
||||
URI.create("webhdfs://" + NetUtils.getHostPortString(addr)), conf);
|
||||
cluster.waitActive();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() throws IOException {
|
||||
fs.close();
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWebHdfsAuthFilter() throws IOException {
|
||||
// getFileStatus() is supposed to pass through with the default filter.
|
||||
authorized = false;
|
||||
try {
|
||||
fs.getFileStatus(new Path("/"));
|
||||
Assert.fail("The filter fails to block the request");
|
||||
} catch (IOException e) {
|
||||
}
|
||||
authorized = true;
|
||||
fs.getFileStatus(new Path("/"));
|
||||
}
|
||||
}
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.test;
|
|||
|
||||
import java.io.File;
|
||||
|
||||
import org.apache.commons.lang.RandomStringUtils;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
public class PathUtils {
|
||||
|
@ -36,7 +37,10 @@ public class PathUtils {
|
|||
}
|
||||
|
||||
public static File getTestDir(Class<?> caller, boolean create) {
|
||||
File dir = new File(System.getProperty("test.build.data", "/tmp"), caller.getSimpleName());
|
||||
File dir =
|
||||
new File(System.getProperty("test.build.data", "target/test/data")
|
||||
+ "/" + RandomStringUtils.randomAlphanumeric(10),
|
||||
caller.getSimpleName());
|
||||
if (create) {
|
||||
dir.mkdirs();
|
||||
}
|
||||
|
|
|
@ -139,6 +139,18 @@ Trunk (Unreleased)
|
|||
|
||||
MAPREDUCE-5717. Task pings are interpreted as task progress (jlowe)
|
||||
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
Release 2.4.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -165,6 +177,9 @@ Release 2.4.0 - UNRELEASED
|
|||
MAPREDUCE-5670. CombineFileRecordReader should report progress when moving
|
||||
to the next file (Chen He via jlowe)
|
||||
|
||||
MAPREDUCE-5757. ConcurrentModificationException in JobControl.toList
|
||||
(jlowe)
|
||||
|
||||
Release 2.3.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -1387,7 +1387,8 @@ public class MRAppMaster extends CompositeService {
|
|||
// RM/NM to issue SSL certificates but definitely not MR-AM as it is
|
||||
// running in user-land.
|
||||
MRWebAppUtil.initialize(conf);
|
||||
HttpConfig.setPolicy(HttpConfig.Policy.HTTP_ONLY);
|
||||
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
|
||||
HttpConfig.Policy.HTTP_ONLY.name());
|
||||
// log the system properties
|
||||
String systemPropsToLog = MRApps.getSystemPropertiesToLog(conf);
|
||||
if (systemPropsToLog != null) {
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.mapreduce.v2.jobhistory;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
|
||||
/**
|
||||
* Stores Job History configuration keys that can be set by administrators of
|
||||
|
@ -135,7 +136,7 @@ public class JHAdminConfig {
|
|||
public static final String MR_HS_HTTP_POLICY = MR_HISTORY_PREFIX
|
||||
+ "http.policy";
|
||||
public static String DEFAULT_MR_HS_HTTP_POLICY =
|
||||
CommonConfigurationKeysPublic.HTTP_POLICY_HTTP_ONLY;
|
||||
HttpConfig.Policy.HTTP_ONLY.name();
|
||||
|
||||
/**The address the history server webapp is on.*/
|
||||
public static final String MR_HISTORY_WEBAPP_ADDRESS =
|
||||
|
|
|
@ -71,11 +71,13 @@ public class MRWebAppUtil {
|
|||
}
|
||||
|
||||
public static String getYARNWebappScheme() {
|
||||
return HttpConfig.getScheme(httpPolicyInYarn);
|
||||
return httpPolicyInYarn == HttpConfig.Policy.HTTPS_ONLY ? "https://"
|
||||
: "http://";
|
||||
}
|
||||
|
||||
public static String getJHSWebappScheme() {
|
||||
return HttpConfig.getScheme(httpPolicyInJHS);
|
||||
return httpPolicyInJHS == HttpConfig.Policy.HTTPS_ONLY ? "https://"
|
||||
: "http://";
|
||||
}
|
||||
|
||||
public static void setJHSWebappURLWithoutScheme(Configuration conf,
|
||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
|
|||
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
|
||||
import org.apache.hadoop.mapreduce.util.HostUtil;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||
|
||||
/**
|
||||
* HistoryViewer is used to parse and view the JobHistory files
|
||||
|
@ -231,7 +232,8 @@ public class HistoryViewer {
|
|||
taskList.append("\t");
|
||||
taskList.append(attempt.getHostname()).append("\t");
|
||||
taskList.append(attempt.getError());
|
||||
String taskLogsUrl = getTaskLogsUrl(attempt);
|
||||
String taskLogsUrl = getTaskLogsUrl(
|
||||
WebAppUtils.getHttpSchemePrefix(fs.getConf()), attempt);
|
||||
taskList.append(taskLogsUrl != null ? taskLogsUrl : "n/a");
|
||||
System.out.println(taskList.toString());
|
||||
}
|
||||
|
@ -446,7 +448,7 @@ public class HistoryViewer {
|
|||
* @return the taskLogsUrl. null if http-port or tracker-name or
|
||||
* task-attempt-id are unavailable.
|
||||
*/
|
||||
public static String getTaskLogsUrl(
|
||||
public static String getTaskLogsUrl(String scheme,
|
||||
JobHistoryParser.TaskAttemptInfo attempt) {
|
||||
if (attempt.getHttpPort() == -1
|
||||
|| attempt.getTrackerName().equals("")
|
||||
|
@ -457,7 +459,7 @@ public class HistoryViewer {
|
|||
String taskTrackerName =
|
||||
HostUtil.convertTrackerNameToHostName(
|
||||
attempt.getTrackerName());
|
||||
return HostUtil.getTaskLogUrl(taskTrackerName,
|
||||
return HostUtil.getTaskLogUrl(scheme, taskTrackerName,
|
||||
Integer.toString(attempt.getHttpPort()),
|
||||
attempt.getAttemptId().toString());
|
||||
}
|
||||
|
|
|
@ -79,7 +79,7 @@ public class JobControl implements Runnable {
|
|||
this.runnerState = ThreadState.READY;
|
||||
}
|
||||
|
||||
synchronized private static List<ControlledJob> toList(
|
||||
private static List<ControlledJob> toList(
|
||||
LinkedList<ControlledJob> jobs) {
|
||||
ArrayList<ControlledJob> retv = new ArrayList<ControlledJob>();
|
||||
for (ControlledJob job : jobs) {
|
||||
|
@ -122,11 +122,11 @@ public class JobControl implements Runnable {
|
|||
/**
|
||||
* @return the jobs in the success state
|
||||
*/
|
||||
public List<ControlledJob> getSuccessfulJobList() {
|
||||
synchronized public List<ControlledJob> getSuccessfulJobList() {
|
||||
return toList(this.successfulJobs);
|
||||
}
|
||||
|
||||
public List<ControlledJob> getFailedJobList() {
|
||||
synchronized public List<ControlledJob> getFailedJobList() {
|
||||
return toList(this.failedJobs);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.mapreduce.util;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
|
@ -33,9 +32,9 @@ public class HostUtil {
|
|||
* @param taskAttemptID
|
||||
* @return the taskLogUrl
|
||||
*/
|
||||
public static String getTaskLogUrl(String taskTrackerHostName,
|
||||
public static String getTaskLogUrl(String scheme, String taskTrackerHostName,
|
||||
String httpPort, String taskAttemptID) {
|
||||
return (HttpConfig.getSchemePrefix() + taskTrackerHostName + ":" +
|
||||
return (scheme + taskTrackerHostName + ":" +
|
||||
httpPort + "/tasklog?attemptid=" + taskAttemptID);
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.mapred.JobConf;
|
||||
import org.apache.hadoop.mapreduce.MRConfig;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.HistoryServerStateStoreService.HistoryServerState;
|
||||
|
@ -121,7 +120,6 @@ public class JobHistoryServer extends CompositeService {
|
|||
|
||||
// This is required for WebApps to use https if enabled.
|
||||
MRWebAppUtil.initialize(getConfig());
|
||||
HttpConfig.setPolicy(MRWebAppUtil.getJHSHttpPolicy());
|
||||
try {
|
||||
doSecureLogin(conf);
|
||||
} catch(IOException ie) {
|
||||
|
|
|
@ -230,9 +230,9 @@ public class MiniMRYarnCluster extends MiniYARNCluster {
|
|||
WebAppUtils.getRMWebAppURLWithoutScheme(getConfig()));
|
||||
LOG.info("MiniMRYARN HistoryServer address: " +
|
||||
getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS));
|
||||
LOG.info("MiniMRYARN HistoryServer web address: " +
|
||||
getResolvedMRHistoryWebAppURLWithoutScheme(getConfig(),
|
||||
HttpConfig.isSecure()));
|
||||
LOG.info("MiniMRYARN HistoryServer web address: "
|
||||
+ getResolvedMRHistoryWebAppURLWithoutScheme(getConfig(),
|
||||
MRWebAppUtil.getJHSHttpPolicy() == HttpConfig.Policy.HTTPS_ONLY));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -15,6 +15,18 @@ Trunk - Unreleased
|
|||
YARN-524 TestYarnVersionInfo failing if generated properties doesn't
|
||||
include an SVN URL. (stevel)
|
||||
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
Release 2.4.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -273,6 +285,16 @@ Release 2.4.0 - UNRELEASED
|
|||
at allocation time so as to prevent RM from shelling out containers with
|
||||
expired tokens. (Omkar Vinit Joshi and Jian He via vinodkv)
|
||||
|
||||
YARN-1553. Modified YARN and MR to stop using HttpConfig.isSecure() and
|
||||
instead rely on the http policy framework. And also fix some bugs related
|
||||
to https handling in YARN web-apps. (Haohui Mai via vinodkv)
|
||||
|
||||
YARN-1721. When moving app between queues in Fair Scheduler, grab lock on
|
||||
FSSchedulerApp (Sandy Ryza)
|
||||
|
||||
YARN-1724. Race condition in Fair Scheduler when continuous scheduling is
|
||||
turned on (Sandy Ryza)
|
||||
|
||||
Release 2.3.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -100,7 +100,7 @@ public class HAUtil {
|
|||
StringBuilder setValue = new StringBuilder();
|
||||
for (String id: ids) {
|
||||
// verify the RM service addresses configurations for every RMIds
|
||||
for (String prefix : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||
for (String prefix : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||
checkAndSetRMRPCAddress(prefix, id, conf);
|
||||
}
|
||||
setValue.append(id);
|
||||
|
@ -158,7 +158,7 @@ public class HAUtil {
|
|||
}
|
||||
|
||||
public static void verifyAndSetAllServiceAddresses(Configuration conf) {
|
||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||
verifyAndSetConfValue(confKey, conf);
|
||||
}
|
||||
}
|
||||
|
@ -236,7 +236,7 @@ public class HAUtil {
|
|||
@InterfaceAudience.Private
|
||||
@VisibleForTesting
|
||||
static String getConfKeyForRMInstance(String prefix, Configuration conf) {
|
||||
if (!YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS.contains(prefix)) {
|
||||
if (!YarnConfiguration.getServiceAddressConfKeys(conf).contains(prefix)) {
|
||||
return prefix;
|
||||
} else {
|
||||
String RMId = getRMHAId(conf);
|
||||
|
@ -289,7 +289,7 @@ public class HAUtil {
|
|||
hostNameConfKey + " or " + addSuffix(prefix, RMId)));
|
||||
} else {
|
||||
conf.set(addSuffix(prefix, RMId), confVal + ":"
|
||||
+ YarnConfiguration.getRMDefaultPortNumber(prefix));
|
||||
+ YarnConfiguration.getRMDefaultPortNumber(prefix, conf));
|
||||
}
|
||||
}
|
||||
} catch (IllegalArgumentException iae) {
|
||||
|
|
|
@ -26,10 +26,8 @@ import java.util.List;
|
|||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Evolving;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
@ -187,6 +185,8 @@ public class YarnConfiguration extends Configuration {
|
|||
/** The https address of the RM web application.*/
|
||||
public static final String RM_WEBAPP_HTTPS_ADDRESS =
|
||||
RM_PREFIX + "webapp.https.address";
|
||||
public static final boolean YARN_SSL_CLIENT_HTTPS_NEED_AUTH_DEFAULT = false;
|
||||
public static final String YARN_SSL_SERVER_RESOURCE_DEFAULT = "ssl-server.xml";
|
||||
|
||||
public static final int DEFAULT_RM_WEBAPP_HTTPS_PORT = 8090;
|
||||
public static final String DEFAULT_RM_WEBAPP_HTTPS_ADDRESS = "0.0.0.0:"
|
||||
|
@ -361,15 +361,21 @@ public class YarnConfiguration extends Configuration {
|
|||
public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS =
|
||||
"org.apache.hadoop.yarn.LocalConfigurationProvider";
|
||||
|
||||
@Private
|
||||
public static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS =
|
||||
private static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS_HTTP =
|
||||
Collections.unmodifiableList(Arrays.asList(
|
||||
RM_ADDRESS,
|
||||
RM_SCHEDULER_ADDRESS,
|
||||
RM_ADMIN_ADDRESS,
|
||||
RM_RESOURCE_TRACKER_ADDRESS,
|
||||
HttpConfig.isSecure() ? RM_WEBAPP_HTTPS_ADDRESS
|
||||
: RM_WEBAPP_ADDRESS));
|
||||
RM_WEBAPP_ADDRESS));
|
||||
|
||||
private static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS_HTTPS =
|
||||
Collections.unmodifiableList(Arrays.asList(
|
||||
RM_ADDRESS,
|
||||
RM_SCHEDULER_ADDRESS,
|
||||
RM_ADMIN_ADDRESS,
|
||||
RM_RESOURCE_TRACKER_ADDRESS,
|
||||
RM_WEBAPP_HTTPS_ADDRESS));
|
||||
|
||||
public static final String AUTO_FAILOVER_PREFIX =
|
||||
RM_HA_PREFIX + "automatic-failover.";
|
||||
|
@ -1102,10 +1108,9 @@ public class YarnConfiguration extends Configuration {
|
|||
YARN_PREFIX + "client.max-nodemanagers-proxies";
|
||||
public static final int DEFAULT_NM_CLIENT_MAX_NM_PROXIES = 500;
|
||||
|
||||
public static final String YARN_HTTP_POLICY_KEY =
|
||||
YARN_PREFIX + "http.policy";
|
||||
public static final String YARN_HTTP_POLICY_DEFAULT =
|
||||
CommonConfigurationKeysPublic.HTTP_POLICY_HTTP_ONLY;
|
||||
public static final String YARN_HTTP_POLICY_KEY = YARN_PREFIX + "http.policy";
|
||||
public static final String YARN_HTTP_POLICY_DEFAULT = HttpConfig.Policy.HTTP_ONLY
|
||||
.name();
|
||||
|
||||
public YarnConfiguration() {
|
||||
super();
|
||||
|
@ -1118,6 +1123,12 @@ public class YarnConfiguration extends Configuration {
|
|||
}
|
||||
}
|
||||
|
||||
@Private
|
||||
public static List<String> getServiceAddressConfKeys(Configuration conf) {
|
||||
return useHttps(conf) ? RM_SERVICES_ADDRESS_CONF_KEYS_HTTPS
|
||||
: RM_SERVICES_ADDRESS_CONF_KEYS_HTTP;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the socket address for <code>name</code> property as a
|
||||
* <code>InetSocketAddress</code>.
|
||||
|
@ -1130,7 +1141,7 @@ public class YarnConfiguration extends Configuration {
|
|||
public InetSocketAddress getSocketAddr(
|
||||
String name, String defaultAddress, int defaultPort) {
|
||||
String address;
|
||||
if (HAUtil.isHAEnabled(this) && RM_SERVICES_ADDRESS_CONF_KEYS.contains(name)) {
|
||||
if (HAUtil.isHAEnabled(this) && getServiceAddressConfKeys(this).contains(name)) {
|
||||
address = HAUtil.getConfValueForRMInstance(name, defaultAddress, this);
|
||||
} else {
|
||||
address = get(name, defaultAddress);
|
||||
|
@ -1149,7 +1160,8 @@ public class YarnConfiguration extends Configuration {
|
|||
}
|
||||
|
||||
@Private
|
||||
public static int getRMDefaultPortNumber(String addressPrefix) {
|
||||
public static int getRMDefaultPortNumber(String addressPrefix,
|
||||
Configuration conf) {
|
||||
if (addressPrefix.equals(YarnConfiguration.RM_ADDRESS)) {
|
||||
return YarnConfiguration.DEFAULT_RM_PORT;
|
||||
} else if (addressPrefix.equals(YarnConfiguration.RM_SCHEDULER_ADDRESS)) {
|
||||
|
@ -1167,7 +1179,13 @@ public class YarnConfiguration extends Configuration {
|
|||
throw new HadoopIllegalArgumentException(
|
||||
"Invalid RM RPC address Prefix: " + addressPrefix
|
||||
+ ". The valid value should be one of "
|
||||
+ YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS);
|
||||
+ getServiceAddressConfKeys(conf));
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean useHttps(Configuration conf) {
|
||||
return HttpConfig.Policy.HTTPS_ONLY == HttpConfig.Policy.fromString(conf
|
||||
.get(YARN_HTTP_POLICY_KEY,
|
||||
YARN_HTTP_POLICY_DEFAULT));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities;
|
||||
import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntity;
|
||||
import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors;
|
||||
|
@ -65,12 +64,17 @@ public class TimelineClientImpl extends TimelineClient {
|
|||
}
|
||||
|
||||
protected void serviceInit(Configuration conf) throws Exception {
|
||||
resURI = new URI(JOINER.join(HttpConfig.getSchemePrefix(),
|
||||
HttpConfig.isSecure() ? conf.get(
|
||||
if (YarnConfiguration.useHttps(conf)) {
|
||||
resURI = URI
|
||||
.create(JOINER.join("https://", conf.get(
|
||||
YarnConfiguration.AHS_WEBAPP_HTTPS_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_AHS_WEBAPP_HTTPS_ADDRESS) : conf.get(
|
||||
YarnConfiguration.DEFAULT_AHS_WEBAPP_HTTPS_ADDRESS),
|
||||
RESOURCE_URI_STR));
|
||||
} else {
|
||||
resURI = URI.create(JOINER.join("http://", conf.get(
|
||||
YarnConfiguration.AHS_WEBAPP_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_AHS_WEBAPP_ADDRESS), RESOURCE_URI_STR));
|
||||
}
|
||||
super.serviceInit(conf);
|
||||
}
|
||||
|
||||
|
|
|
@ -37,7 +37,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.HttpServer2;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.security.AdminACLsManager;
|
||||
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -216,9 +218,11 @@ public class WebApps {
|
|||
System.exit(1);
|
||||
}
|
||||
}
|
||||
HttpServer2.Builder builder = new HttpServer2.Builder().setName(name)
|
||||
.addEndpoint(URI.create("http://" + bindAddress + ":" + port))
|
||||
.setConf(conf).setFindPort(findPort)
|
||||
HttpServer2.Builder builder = new HttpServer2.Builder()
|
||||
.setName(name)
|
||||
.addEndpoint(
|
||||
URI.create(WebAppUtils.getHttpSchemePrefix(conf) + bindAddress
|
||||
+ ":" + port)).setConf(conf).setFindPort(findPort)
|
||||
.setACL(new AdminACLsManager(conf).getAdminAcl())
|
||||
.setPathSpec(pathList.toArray(new String[0]));
|
||||
|
||||
|
@ -231,6 +235,11 @@ public class WebApps {
|
|||
.setKeytabConfKey(spnegoKeytabKey)
|
||||
.setSecurityEnabled(UserGroupInformation.isSecurityEnabled());
|
||||
}
|
||||
|
||||
if (YarnConfiguration.useHttps(conf)) {
|
||||
WebAppUtils.loadSslConfiguration(builder);
|
||||
}
|
||||
|
||||
HttpServer2 server = builder.build();
|
||||
|
||||
for(ServletStruct struct: servlets) {
|
||||
|
|
|
@ -26,20 +26,16 @@ import java.net.UnknownHostException;
|
|||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Evolving;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.http.HttpConfig.Policy;
|
||||
import org.apache.hadoop.http.HttpServer2;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
|
||||
@Private
|
||||
@Evolving
|
||||
public class WebAppUtils {
|
||||
private static final Joiner JOINER = Joiner.on("");
|
||||
|
||||
public static void setRMWebAppPort(Configuration conf, int port) {
|
||||
String hostname = getRMWebAppURLWithoutScheme(conf);
|
||||
hostname =
|
||||
|
@ -51,7 +47,7 @@ public class WebAppUtils {
|
|||
public static void setRMWebAppHostnameAndPort(Configuration conf,
|
||||
String hostname, int port) {
|
||||
String resolvedAddress = hostname + ":" + port;
|
||||
if (HttpConfig.isSecure()) {
|
||||
if (YarnConfiguration.useHttps(conf)) {
|
||||
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, resolvedAddress);
|
||||
} else {
|
||||
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, resolvedAddress);
|
||||
|
@ -60,7 +56,7 @@ public class WebAppUtils {
|
|||
|
||||
public static void setNMWebAppHostNameAndPort(Configuration conf,
|
||||
String hostName, int port) {
|
||||
if (HttpConfig.isSecure()) {
|
||||
if (YarnConfiguration.useHttps(conf)) {
|
||||
conf.set(YarnConfiguration.NM_WEBAPP_HTTPS_ADDRESS,
|
||||
hostName + ":" + port);
|
||||
} else {
|
||||
|
@ -70,16 +66,11 @@ public class WebAppUtils {
|
|||
}
|
||||
|
||||
public static String getRMWebAppURLWithScheme(Configuration conf) {
|
||||
return JOINER.join(HttpConfig.getSchemePrefix(),
|
||||
HttpConfig.isSecure() ? conf.get(
|
||||
YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_ADDRESS) : conf.get(
|
||||
YarnConfiguration.RM_WEBAPP_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS));
|
||||
return getHttpSchemePrefix(conf) + getRMWebAppURLWithoutScheme(conf);
|
||||
}
|
||||
|
||||
public static String getRMWebAppURLWithoutScheme(Configuration conf) {
|
||||
if (HttpConfig.isSecure()) {
|
||||
if (YarnConfiguration.useHttps(conf)) {
|
||||
return conf.get(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_ADDRESS);
|
||||
}else {
|
||||
|
@ -97,13 +88,13 @@ public class WebAppUtils {
|
|||
}
|
||||
|
||||
public static String getResolvedRMWebAppURLWithScheme(Configuration conf) {
|
||||
return HttpConfig.getSchemePrefix()
|
||||
return getHttpSchemePrefix(conf)
|
||||
+ getResolvedRMWebAppURLWithoutScheme(conf);
|
||||
}
|
||||
|
||||
public static String getResolvedRMWebAppURLWithoutScheme(Configuration conf) {
|
||||
return getResolvedRMWebAppURLWithoutScheme(conf,
|
||||
HttpConfig.isSecure() ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY);
|
||||
YarnConfiguration.useHttps(conf) ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY);
|
||||
}
|
||||
|
||||
public static String getResolvedRMWebAppURLWithoutScheme(Configuration conf,
|
||||
|
@ -140,7 +131,7 @@ public class WebAppUtils {
|
|||
}
|
||||
|
||||
public static String getNMWebAppURLWithoutScheme(Configuration conf) {
|
||||
if (HttpConfig.isSecure()) {
|
||||
if (YarnConfiguration.useHttps(conf)) {
|
||||
return conf.get(YarnConfiguration.NM_WEBAPP_HTTPS_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_NM_WEBAPP_HTTPS_ADDRESS);
|
||||
} else {
|
||||
|
@ -150,7 +141,7 @@ public class WebAppUtils {
|
|||
}
|
||||
|
||||
public static String getAHSWebAppURLWithoutScheme(Configuration conf) {
|
||||
if (HttpConfig.isSecure()) {
|
||||
if (YarnConfiguration.useHttps(conf)) {
|
||||
return conf.get(YarnConfiguration.AHS_WEBAPP_HTTPS_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_AHS_WEBAPP_HTTPS_ADDRESS);
|
||||
} else {
|
||||
|
@ -177,8 +168,38 @@ public class WebAppUtils {
|
|||
|
||||
public static String getLogUrl(String nodeHttpAddress, String allocatedNode,
|
||||
ContainerId containerId, String user) {
|
||||
return join(HttpConfig.getSchemePrefix(), nodeHttpAddress, "/logs", "/",
|
||||
return join("//", nodeHttpAddress, "/logs", "/",
|
||||
allocatedNode, "/", ConverterUtils.toString(containerId), "/",
|
||||
ConverterUtils.toString(containerId), "/", user);
|
||||
}
|
||||
|
||||
/**
|
||||
* Choose which scheme (HTTP or HTTPS) to use when generating a URL based on
|
||||
* the configuration.
|
||||
*
|
||||
* @return the schmeme (HTTP / HTTPS)
|
||||
*/
|
||||
public static String getHttpSchemePrefix(Configuration conf) {
|
||||
return YarnConfiguration.useHttps(conf) ? "https://" : "http://";
|
||||
}
|
||||
|
||||
/**
|
||||
* Load the SSL keystore / truststore into the HttpServer builder.
|
||||
*/
|
||||
public static HttpServer2.Builder loadSslConfiguration(
|
||||
HttpServer2.Builder builder) {
|
||||
Configuration sslConf = new Configuration(false);
|
||||
boolean needsClientAuth = YarnConfiguration.YARN_SSL_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
|
||||
sslConf.addResource(YarnConfiguration.YARN_SSL_SERVER_RESOURCE_DEFAULT);
|
||||
|
||||
return builder
|
||||
.needsClientAuth(needsClientAuth)
|
||||
.keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
|
||||
.keyStore(sslConf.get("ssl.server.keystore.location"),
|
||||
sslConf.get("ssl.server.keystore.password"),
|
||||
sslConf.get("ssl.server.keystore.type", "jks"))
|
||||
.trustStore(sslConf.get("ssl.server.truststore.location"),
|
||||
sslConf.get("ssl.server.truststore.password"),
|
||||
sslConf.get("ssl.server.truststore.type", "jks"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ public class TestHAUtil {
|
|||
conf.set(YarnConfiguration.RM_HA_IDS, RM_NODE_IDS_UNTRIMMED);
|
||||
conf.set(YarnConfiguration.RM_HA_ID, RM1_NODE_ID_UNTRIMMED);
|
||||
|
||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||
// configuration key itself cannot contains space/tab/return chars.
|
||||
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS_UNTRIMMED);
|
||||
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
|
||||
|
@ -95,7 +95,7 @@ public class TestHAUtil {
|
|||
StringUtils.getStringCollection(RM_NODE_IDS), HAUtil.getRMHAIds(conf));
|
||||
assertEquals("Should be saved as Trimmed string",
|
||||
RM1_NODE_ID, HAUtil.getRMHAId(conf));
|
||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||
assertEquals("RPC address not set for " + confKey,
|
||||
RM1_ADDRESS, conf.get(confKey));
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ public class TestHAUtil {
|
|||
// simulate the case YarnConfiguration.RM_HA_ID is not set
|
||||
conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + ","
|
||||
+ RM2_NODE_ID);
|
||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS);
|
||||
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ public class TestHAUtil {
|
|||
conf.set(YarnConfiguration.RM_HA_ID, RM_INVALID_NODE_ID);
|
||||
conf.set(YarnConfiguration.RM_HA_IDS, RM_INVALID_NODE_ID + ","
|
||||
+ RM1_NODE_ID);
|
||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||
// simulate xml with invalid node id
|
||||
conf.set(confKey + RM_INVALID_NODE_ID, RM_INVALID_NODE_ID);
|
||||
}
|
||||
|
@ -169,7 +169,7 @@ public class TestHAUtil {
|
|||
conf.clear();
|
||||
conf.set(YarnConfiguration.RM_HA_IDS, RM2_NODE_ID + "," + RM3_NODE_ID);
|
||||
conf.set(YarnConfiguration.RM_HA_ID, RM1_NODE_ID_UNTRIMMED);
|
||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||
conf.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS_UNTRIMMED);
|
||||
conf.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
|
||||
conf.set(HAUtil.addSuffix(confKey, RM3_NODE_ID), RM3_ADDRESS);
|
||||
|
|
|
@ -163,7 +163,7 @@ public class AppBlock extends HtmlBlock {
|
|||
.append(startTime)
|
||||
.append("\",\"<a href='")
|
||||
.append(
|
||||
nodeLink == null ? "#" : url(HttpConfig.getSchemePrefix(), nodeLink))
|
||||
nodeLink == null ? "#" : url("//", nodeLink))
|
||||
.append("'>")
|
||||
.append(
|
||||
nodeLink == null ? "N/A" : StringEscapeUtils
|
||||
|
|
|
@ -28,8 +28,6 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.http.HttpConfig.Policy;
|
||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.service.CompositeService;
|
||||
|
@ -397,16 +395,9 @@ public class NodeManager extends CompositeService
|
|||
StringUtils.startupShutdownMessage(NodeManager.class, args, LOG);
|
||||
NodeManager nodeManager = new NodeManager();
|
||||
Configuration conf = new YarnConfiguration();
|
||||
setHttpPolicy(conf);
|
||||
nodeManager.initAndStartNodeManager(conf, false);
|
||||
}
|
||||
|
||||
private static void setHttpPolicy(Configuration conf) {
|
||||
HttpConfig.setPolicy(Policy.fromString(conf.get(
|
||||
YarnConfiguration.YARN_HTTP_POLICY_KEY,
|
||||
YarnConfiguration.YARN_HTTP_POLICY_DEFAULT)));
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
@Private
|
||||
public NodeStatusUpdater getNodeStatusUpdater() {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.apache.hadoop.yarn.server.nodemanager.webapp;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.yarn.webapp.YarnWebParams;
|
||||
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||
|
|
|
@ -1015,7 +1015,6 @@ public class ResourceManager extends CompositeService implements Recoverable {
|
|||
ShutdownHookManager.get().addShutdownHook(
|
||||
new CompositeServiceShutdownHook(resourceManager),
|
||||
SHUTDOWN_HOOK_PRIORITY);
|
||||
setHttpPolicy(conf);
|
||||
resourceManager.init(conf);
|
||||
resourceManager.start();
|
||||
} catch (Throwable t) {
|
||||
|
@ -1024,12 +1023,6 @@ public class ResourceManager extends CompositeService implements Recoverable {
|
|||
}
|
||||
}
|
||||
|
||||
private static void setHttpPolicy(Configuration conf) {
|
||||
HttpConfig.setPolicy(Policy.fromString(conf.get(
|
||||
YarnConfiguration.YARN_HTTP_POLICY_KEY,
|
||||
YarnConfiguration.YARN_HTTP_POLICY_DEFAULT)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Register the handlers for alwaysOn services
|
||||
*/
|
||||
|
|
|
@ -503,10 +503,11 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
|
|||
final String trackingUriWithoutScheme) {
|
||||
this.readLock.lock();
|
||||
try {
|
||||
final String scheme = WebAppUtils.getHttpSchemePrefix(conf);
|
||||
URI trackingUri = StringUtils.isEmpty(trackingUriWithoutScheme) ? null :
|
||||
ProxyUriUtils.getUriFromAMUrl(trackingUriWithoutScheme);
|
||||
ProxyUriUtils.getUriFromAMUrl(scheme, trackingUriWithoutScheme);
|
||||
String proxy = WebAppUtils.getProxyHostAndPort(conf);
|
||||
URI proxyUri = ProxyUriUtils.getUriFromAMUrl(proxy);
|
||||
URI proxyUri = ProxyUriUtils.getUriFromAMUrl(scheme, proxy);
|
||||
URI result = ProxyUriUtils.getProxyUri(trackingUri, proxyUri,
|
||||
applicationAttemptId.getApplicationId());
|
||||
return result.toASCIIString();
|
||||
|
|
|
@ -989,7 +989,13 @@ public class FairScheduler extends AbstractYarnScheduler {
|
|||
private void continuousScheduling() {
|
||||
while (true) {
|
||||
List<NodeId> nodeIdList = new ArrayList<NodeId>(nodes.keySet());
|
||||
// Sort the nodes by space available on them, so that we offer
|
||||
// containers on emptier nodes first, facilitating an even spread. This
|
||||
// requires holding the scheduler lock, so that the space available on a
|
||||
// node doesn't change during the sort.
|
||||
synchronized (this) {
|
||||
Collections.sort(nodeIdList, nodeAvailableResourceComparator);
|
||||
}
|
||||
|
||||
// iterate all nodes
|
||||
for (NodeId nodeId : nodeIdList) {
|
||||
|
@ -1366,7 +1372,8 @@ public class FairScheduler extends AbstractYarnScheduler {
|
|||
throw new YarnException("App to be moved " + appId + " not found.");
|
||||
}
|
||||
FSSchedulerApp attempt = (FSSchedulerApp) app.getCurrentAppAttempt();
|
||||
|
||||
// To serialize with FairScheduler#allocate, synchronize on app attempt
|
||||
synchronized (attempt) {
|
||||
FSLeafQueue oldQueue = (FSLeafQueue) app.getQueue();
|
||||
FSLeafQueue targetQueue = queueMgr.getLeafQueue(queueName, false);
|
||||
if (targetQueue == null) {
|
||||
|
@ -1385,6 +1392,7 @@ public class FairScheduler extends AbstractYarnScheduler {
|
|||
executeMove(app, attempt, oldQueue, targetQueue);
|
||||
return targetQueue.getQueueName();
|
||||
}
|
||||
}
|
||||
|
||||
private void verifyMoveDoesNotViolateConstraints(FSSchedulerApp app,
|
||||
FSLeafQueue oldQueue, FSLeafQueue targetQueue) throws YarnException {
|
||||
|
@ -1420,8 +1428,8 @@ public class FairScheduler extends AbstractYarnScheduler {
|
|||
}
|
||||
|
||||
/**
|
||||
* Helper for moveApplication, which is synchronized, so all operations will
|
||||
* be atomic.
|
||||
* Helper for moveApplication, which has appropriate synchronization, so all
|
||||
* operations will be atomic.
|
||||
*/
|
||||
private void executeMove(SchedulerApplication app, FSSchedulerApp attempt,
|
||||
FSLeafQueue oldQueue, FSLeafQueue newQueue) {
|
||||
|
|
|
@ -27,7 +27,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
|
|||
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
|
||||
|
@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.util.Times;
|
|||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
||||
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
|
||||
|
||||
|
@ -55,13 +56,16 @@ public class AppBlock extends HtmlBlock {
|
|||
|
||||
private ApplicationACLsManager aclsManager;
|
||||
private QueueACLsManager queueACLsManager;
|
||||
private final Configuration conf;
|
||||
|
||||
@Inject
|
||||
AppBlock(ResourceManager rm, ViewContext ctx,
|
||||
ApplicationACLsManager aclsManager, QueueACLsManager queueACLsManager) {
|
||||
ApplicationACLsManager aclsManager, QueueACLsManager queueACLsManager,
|
||||
Configuration conf) {
|
||||
super(ctx);
|
||||
this.aclsManager = aclsManager;
|
||||
this.queueACLsManager = queueACLsManager;
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -86,7 +90,7 @@ public class AppBlock extends HtmlBlock {
|
|||
puts("Application not found: "+ aid);
|
||||
return;
|
||||
}
|
||||
AppInfo app = new AppInfo(rmApp, true);
|
||||
AppInfo app = new AppInfo(rmApp, true, WebAppUtils.getHttpSchemePrefix(conf));
|
||||
|
||||
// Check for the authorization.
|
||||
String remoteUser = request().getRemoteUser();
|
||||
|
@ -146,7 +150,7 @@ public class AppBlock extends HtmlBlock {
|
|||
table.tr((odd = !odd) ? _ODD : _EVEN).
|
||||
td(String.valueOf(attemptInfo.getAttemptId())).
|
||||
td(Times.format(attemptInfo.getStartTime())).
|
||||
td().a(".nodelink", url(HttpConfig.getSchemePrefix(),
|
||||
td().a(".nodelink", url("//",
|
||||
attemptInfo.getNodeHttpAddress()),
|
||||
attemptInfo.getNodeHttpAddress())._().
|
||||
td().a(".logslink", url(attemptInfo.getLogsLink()), "logs")._().
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.util.HashSet;
|
|||
import java.util.concurrent.ConcurrentMap;
|
||||
|
||||
import org.apache.commons.lang.StringEscapeUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
|
@ -36,16 +37,19 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
|
|||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
|
||||
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
|
||||
class AppsBlock extends HtmlBlock {
|
||||
final ConcurrentMap<ApplicationId, RMApp> apps;
|
||||
private final Configuration conf;
|
||||
|
||||
@Inject AppsBlock(RMContext rmContext, ViewContext ctx) {
|
||||
@Inject AppsBlock(RMContext rmContext, ViewContext ctx, Configuration conf) {
|
||||
super(ctx);
|
||||
apps = rmContext.getRMApps();
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
@Override public void render(Block html) {
|
||||
|
@ -79,7 +83,7 @@ class AppsBlock extends HtmlBlock {
|
|||
if (reqAppStates != null && !reqAppStates.contains(app.createApplicationState())) {
|
||||
continue;
|
||||
}
|
||||
AppInfo appInfo = new AppInfo(app, true);
|
||||
AppInfo appInfo = new AppInfo(app, true, WebAppUtils.getHttpSchemePrefix(conf));
|
||||
String percent = String.format("%.1f", appInfo.getProgress());
|
||||
//AppID numerical value parsed by parseHadoopID in yarn.dt.plugins.js
|
||||
appsTableData.append("[\"<a href='")
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.util.HashSet;
|
|||
import java.util.concurrent.ConcurrentMap;
|
||||
|
||||
import org.apache.commons.lang.StringEscapeUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
|
||||
|
@ -40,6 +41,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerInf
|
|||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
|
||||
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
|
@ -51,13 +53,15 @@ import com.google.inject.Inject;
|
|||
public class FairSchedulerAppsBlock extends HtmlBlock {
|
||||
final ConcurrentMap<ApplicationId, RMApp> apps;
|
||||
final FairSchedulerInfo fsinfo;
|
||||
final Configuration conf;
|
||||
|
||||
@Inject public FairSchedulerAppsBlock(RMContext rmContext,
|
||||
ResourceManager rm, ViewContext ctx) {
|
||||
ResourceManager rm, ViewContext ctx, Configuration conf) {
|
||||
super(ctx);
|
||||
FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
|
||||
fsinfo = new FairSchedulerInfo(scheduler);
|
||||
apps = rmContext.getRMApps();
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
@Override public void render(Block html) {
|
||||
|
@ -91,7 +95,7 @@ public class FairSchedulerAppsBlock extends HtmlBlock {
|
|||
if (reqAppStates != null && !reqAppStates.contains(app.getState())) {
|
||||
continue;
|
||||
}
|
||||
AppInfo appInfo = new AppInfo(app, true);
|
||||
AppInfo appInfo = new AppInfo(app, true, WebAppUtils.getHttpSchemePrefix(conf));
|
||||
String percent = String.format("%.1f", appInfo.getProgress());
|
||||
ApplicationAttemptId attemptId = app.getCurrentAppAttempt().getAppAttemptId();
|
||||
int fairShare = fsinfo.getAppFairShare(attemptId);
|
||||
|
|
|
@ -26,7 +26,6 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
|
|||
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.yarn.api.records.NodeState;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
|
@ -119,7 +118,7 @@ class NodesPage extends RmView {
|
|||
row.td()._("N/A")._();
|
||||
} else {
|
||||
String httpAddress = info.getNodeHTTPAddress();
|
||||
row.td().a(HttpConfig.getSchemePrefix() + httpAddress,
|
||||
row.td().a("//" + httpAddress,
|
||||
httpAddress)._();
|
||||
}
|
||||
row.td().br().$title(String.valueOf(info.getLastHealthUpdate()))._().
|
||||
|
|
|
@ -41,6 +41,7 @@ import javax.ws.rs.core.MediaType;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
|
||||
|
@ -85,6 +86,7 @@ import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
|
|||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
import org.apache.hadoop.yarn.webapp.BadRequestException;
|
||||
import org.apache.hadoop.yarn.webapp.NotFoundException;
|
||||
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.Singleton;
|
||||
|
@ -101,16 +103,18 @@ public class RMWebServices {
|
|||
.getRecordFactory(null);
|
||||
private final ApplicationACLsManager aclsManager;
|
||||
private final QueueACLsManager queueACLsManager;
|
||||
|
||||
private final Configuration conf;
|
||||
private @Context HttpServletResponse response;
|
||||
|
||||
@Inject
|
||||
public RMWebServices(final ResourceManager rm,
|
||||
final ApplicationACLsManager aclsManager,
|
||||
final QueueACLsManager queueACLsManager) {
|
||||
final QueueACLsManager queueACLsManager,
|
||||
Configuration conf) {
|
||||
this.rm = rm;
|
||||
this.aclsManager = aclsManager;
|
||||
this.queueACLsManager = queueACLsManager;
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
protected Boolean hasAccess(RMApp app, HttpServletRequest hsr) {
|
||||
|
@ -415,7 +419,8 @@ public class RMWebServices {
|
|||
}
|
||||
}
|
||||
|
||||
AppInfo app = new AppInfo(rmapp, hasAccess(rmapp, hsr));
|
||||
AppInfo app = new AppInfo(rmapp, hasAccess(rmapp, hsr),
|
||||
WebAppUtils.getHttpSchemePrefix(conf));
|
||||
allApps.add(app);
|
||||
}
|
||||
return allApps;
|
||||
|
@ -555,7 +560,7 @@ public class RMWebServices {
|
|||
if (app == null) {
|
||||
throw new NotFoundException("app with id: " + appId + " not found");
|
||||
}
|
||||
return new AppInfo(app, hasAccess(app, hsr));
|
||||
return new AppInfo(app, hasAccess(app, hsr), hsr.getScheme() + "://");
|
||||
}
|
||||
|
||||
@GET
|
||||
|
|
|
@ -23,7 +23,6 @@ import javax.xml.bind.annotation.XmlAccessType;
|
|||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
|
@ -56,7 +55,7 @@ public class AppAttemptInfo {
|
|||
this.containerId = masterContainer.getId().toString();
|
||||
this.nodeHttpAddress = masterContainer.getNodeHttpAddress();
|
||||
this.nodeId = masterContainer.getNodeId().toString();
|
||||
this.logsLink = join(HttpConfig.getSchemePrefix(),
|
||||
this.logsLink = join("//",
|
||||
masterContainer.getNodeHttpAddress(),
|
||||
"/node", "/containerlogs/",
|
||||
ConverterUtils.toString(masterContainer.getId()), "/", user);
|
||||
|
|
|
@ -25,7 +25,6 @@ import javax.xml.bind.annotation.XmlRootElement;
|
|||
import javax.xml.bind.annotation.XmlTransient;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
|
||||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
|
@ -53,6 +52,8 @@ public class AppInfo {
|
|||
protected boolean amContainerLogsExist = false;
|
||||
@XmlTransient
|
||||
protected ApplicationId applicationId;
|
||||
@XmlTransient
|
||||
private String schemePrefix;
|
||||
|
||||
// these are ok for any user to see
|
||||
protected String id;
|
||||
|
@ -82,12 +83,8 @@ public class AppInfo {
|
|||
public AppInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public AppInfo(RMApp app, Boolean hasAccess, String host) {
|
||||
this(app, hasAccess);
|
||||
}
|
||||
|
||||
public AppInfo(RMApp app, Boolean hasAccess) {
|
||||
|
||||
public AppInfo(RMApp app, Boolean hasAccess, String schemePrefix) {
|
||||
this.schemePrefix = schemePrefix;
|
||||
if (app != null) {
|
||||
String trackingUrl = app.getTrackingUrl();
|
||||
this.state = app.createApplicationState();
|
||||
|
@ -100,7 +97,7 @@ public class AppInfo {
|
|||
.getFinishTime() == 0 ? "ApplicationMaster" : "History");
|
||||
if (!trackingUrlIsNotReady) {
|
||||
this.trackingUrl =
|
||||
WebAppUtils.getURLWithScheme(HttpConfig.getSchemePrefix(),
|
||||
WebAppUtils.getURLWithScheme(schemePrefix,
|
||||
trackingUrl);
|
||||
this.trackingUrlPretty = this.trackingUrl;
|
||||
} else {
|
||||
|
@ -134,7 +131,7 @@ public class AppInfo {
|
|||
Container masterContainer = attempt.getMasterContainer();
|
||||
if (masterContainer != null) {
|
||||
this.amContainerLogsExist = true;
|
||||
String url = join(HttpConfig.getSchemePrefix(),
|
||||
String url = join(schemePrefix,
|
||||
masterContainer.getNodeHttpAddress(),
|
||||
"/node", "/containerlogs/",
|
||||
ConverterUtils.toString(masterContainer.getId()),
|
||||
|
|
|
@ -63,8 +63,10 @@ public class TestRMHA {
|
|||
@Before
|
||||
public void setUp() throws Exception {
|
||||
configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
|
||||
configuration.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID);
|
||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||
configuration.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + ","
|
||||
+ RM2_NODE_ID);
|
||||
for (String confKey : YarnConfiguration
|
||||
.getServiceAddressConfKeys(configuration)) {
|
||||
configuration.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS);
|
||||
configuration.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
|
||||
configuration.set(HAUtil.addSuffix(confKey, RM3_NODE_ID), RM3_ADDRESS);
|
||||
|
@ -329,7 +331,7 @@ public class TestRMHA {
|
|||
Configuration conf = new YarnConfiguration(configuration);
|
||||
rm = new MockRM(conf);
|
||||
rm.init(conf);
|
||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||
assertEquals("RPC address not set for " + confKey,
|
||||
RM1_ADDRESS, conf.get(HAUtil.addSuffix(confKey, RM1_NODE_ID)));
|
||||
assertEquals("RPC address not set for " + confKey,
|
||||
|
|
|
@ -134,7 +134,9 @@ public class TestZKRMStateStore extends RMStateStoreTestBase {
|
|||
conf.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort);
|
||||
conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, ZK_TIMEOUT_MS);
|
||||
conf.set(YarnConfiguration.RM_HA_ID, rmId);
|
||||
for (String rpcAddress : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "localhost:0");
|
||||
|
||||
for (String rpcAddress : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||
for (String id : HAUtil.getRMHAIds(conf)) {
|
||||
conf.set(HAUtil.addSuffix(rpcAddress, id), "localhost:0");
|
||||
}
|
||||
|
|
|
@ -285,13 +285,14 @@ public class TestRMAppAttemptTransitions {
|
|||
|
||||
private String getProxyUrl(RMAppAttempt appAttempt) {
|
||||
String url = null;
|
||||
final String scheme = WebAppUtils.getHttpSchemePrefix(conf);
|
||||
try {
|
||||
URI trackingUri =
|
||||
StringUtils.isEmpty(appAttempt.getOriginalTrackingUrl()) ? null :
|
||||
ProxyUriUtils
|
||||
.getUriFromAMUrl(appAttempt.getOriginalTrackingUrl());
|
||||
.getUriFromAMUrl(scheme, appAttempt.getOriginalTrackingUrl());
|
||||
String proxy = WebAppUtils.getProxyHostAndPort(conf);
|
||||
URI proxyUri = ProxyUriUtils.getUriFromAMUrl(proxy);
|
||||
URI proxyUri = ProxyUriUtils.getUriFromAMUrl(scheme, proxy);
|
||||
URI result = ProxyUriUtils.getProxyUri(trackingUri, proxyUri,
|
||||
appAttempt.getAppAttemptId().getApplicationId());
|
||||
url = result.toASCIIString();
|
||||
|
|
|
@ -105,7 +105,7 @@ public class TestRMContainerImpl {
|
|||
drainDispatcher.await();
|
||||
assertEquals(RMContainerState.RUNNING, rmContainer.getState());
|
||||
assertEquals(
|
||||
"http://host:3465/logs/host:3425/container_1_0001_01_000001/container_1_0001_01_000001/user",
|
||||
"//host:3465/logs/host:3425/container_1_0001_01_000001/container_1_0001_01_000001/user",
|
||||
rmContainer.getLogURL());
|
||||
|
||||
// In RUNNING state. Verify RELEASED and associated actions.
|
||||
|
@ -192,7 +192,7 @@ public class TestRMContainerImpl {
|
|||
drainDispatcher.await();
|
||||
assertEquals(RMContainerState.RUNNING, rmContainer.getState());
|
||||
assertEquals(
|
||||
"http://host:3465/logs/host:3425/container_1_0001_01_000001/container_1_0001_01_000001/user",
|
||||
"//host:3465/logs/host:3425/container_1_0001_01_000001/container_1_0001_01_000001/user",
|
||||
rmContainer.getLogURL());
|
||||
|
||||
// In RUNNING state. Verify EXPIRE and associated actions.
|
||||
|
|
|
@ -1606,8 +1606,7 @@ public class TestRMWebServicesApps extends JerseyTest {
|
|||
.getMasterContainer().getNodeHttpAddress(), nodeHttpAddress);
|
||||
WebServicesTestUtils.checkStringMatch("nodeId", appAttempt
|
||||
.getMasterContainer().getNodeId().toString(), nodeId);
|
||||
assertTrue("logsLink doesn't match",
|
||||
logsLink.startsWith("http://"));
|
||||
assertTrue("logsLink doesn't match", logsLink.startsWith("//"));
|
||||
assertTrue(
|
||||
"logsLink doesn't contain user info", logsLink.endsWith("/"
|
||||
+ user));
|
||||
|
|
|
@ -253,7 +253,7 @@ public class MiniYARNCluster extends CompositeService {
|
|||
|
||||
private void setHARMConfiguration(final int index, Configuration conf) {
|
||||
String hostname = MiniYARNCluster.getHostname();
|
||||
for (String confKey : YarnConfiguration.RM_SERVICES_ADDRESS_CONF_KEYS) {
|
||||
for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
|
||||
conf.set(HAUtil.addSuffix(confKey, rmIds[index]), hostname + ":0");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,23 +18,17 @@
|
|||
|
||||
package org.apache.hadoop.yarn.server;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.AdminService;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotSame;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestMiniYARNClusterForHA {
|
||||
MiniYARNCluster cluster;
|
||||
|
@ -43,6 +37,8 @@ public class TestMiniYARNClusterForHA {
|
|||
public void setup() throws IOException, InterruptedException {
|
||||
Configuration conf = new YarnConfiguration();
|
||||
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
|
||||
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "localhost:0");
|
||||
|
||||
cluster = new MiniYARNCluster(TestMiniYARNClusterForHA.class.getName(),
|
||||
2, 1, 1, 1);
|
||||
cluster.init(conf);
|
||||
|
|
|
@ -135,27 +135,6 @@ public class ProxyUriUtils {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a URI form a no scheme Url, such as is returned by the AM.
|
||||
* @param url the URL format returned by an AM. This may or may not contain
|
||||
* scheme.
|
||||
* @return a URI with an http scheme
|
||||
* @throws URISyntaxException if the url is not formatted correctly.
|
||||
*/
|
||||
public static URI getUriFromAMUrl(String url)
|
||||
throws URISyntaxException {
|
||||
if (getSchemeFromUrl(url).isEmpty()) {
|
||||
/*
|
||||
* check is made to make sure if AM reports with scheme then it will be
|
||||
* used by default otherwise it will default to the one configured using
|
||||
* "yarn.http.policy".
|
||||
*/
|
||||
return new URI(HttpConfig.getSchemePrefix() + url);
|
||||
} else {
|
||||
return new URI(url);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a URI form a no scheme Url, such as is returned by the AM.
|
||||
* @param noSchemeUrl the URL formate returned by an AM
|
||||
|
@ -170,7 +149,7 @@ public class ProxyUriUtils {
|
|||
* used by default otherwise it will default to the one configured using
|
||||
* "yarn.http.policy".
|
||||
*/
|
||||
return new URI(scheme + "://" + noSchemeUrl);
|
||||
return new URI(scheme + noSchemeUrl);
|
||||
} else {
|
||||
return new URI(noSchemeUrl);
|
||||
}
|
||||
|
|
|
@ -90,14 +90,22 @@ public class WebAppProxy extends AbstractService {
|
|||
@Override
|
||||
protected void serviceStart() throws Exception {
|
||||
try {
|
||||
proxyServer = new HttpServer2.Builder().setName("proxy")
|
||||
.addEndpoint(URI.create("http://" + bindAddress + ":" + port))
|
||||
.setFindPort(port == 0)
|
||||
.setConf(getConfig()).setACL(acl).build();
|
||||
Configuration conf = getConfig();
|
||||
HttpServer2.Builder b = new HttpServer2.Builder()
|
||||
.setName("proxy")
|
||||
.addEndpoint(
|
||||
URI.create(WebAppUtils.getHttpSchemePrefix(conf) + bindAddress
|
||||
+ ":" + port)).setFindPort(port == 0).setConf(getConfig())
|
||||
.setACL(acl);
|
||||
if (YarnConfiguration.useHttps(conf)) {
|
||||
WebAppUtils.loadSslConfiguration(b);
|
||||
}
|
||||
proxyServer = b.build();
|
||||
proxyServer.addServlet(ProxyUriUtils.PROXY_SERVLET_NAME,
|
||||
ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class);
|
||||
proxyServer.setAttribute(FETCHER_ATTRIBUTE, fetcher);
|
||||
proxyServer.setAttribute(IS_SECURITY_ENABLED_ATTRIBUTE, isSecurityEnabled);
|
||||
proxyServer
|
||||
.setAttribute(IS_SECURITY_ENABLED_ATTRIBUTE, isSecurityEnabled);
|
||||
proxyServer.setAttribute(PROXY_HOST_ATTRIBUTE, proxyHost);
|
||||
proxyServer.start();
|
||||
} catch (IOException e) {
|
||||
|
|
|
@ -69,6 +69,7 @@ public class WebAppProxyServlet extends HttpServlet {
|
|||
|
||||
private final List<TrackingUriPlugin> trackingUriPlugins;
|
||||
private final String rmAppPageUrlBase;
|
||||
private final transient YarnConfiguration conf;
|
||||
|
||||
private static class _ implements Hamlet._ {
|
||||
//Empty
|
||||
|
@ -90,7 +91,7 @@ public class WebAppProxyServlet extends HttpServlet {
|
|||
public WebAppProxyServlet()
|
||||
{
|
||||
super();
|
||||
YarnConfiguration conf = new YarnConfiguration();
|
||||
conf = new YarnConfiguration();
|
||||
this.trackingUriPlugins =
|
||||
conf.getInstances(YarnConfiguration.YARN_TRACKING_URL_GENERATOR,
|
||||
TrackingUriPlugin.class);
|
||||
|
@ -300,7 +301,8 @@ public class WebAppProxyServlet extends HttpServlet {
|
|||
return;
|
||||
} else {
|
||||
if (ProxyUriUtils.getSchemeFromUrl(original).isEmpty()) {
|
||||
trackingUri = ProxyUriUtils.getUriFromAMUrl("http", original);
|
||||
trackingUri = ProxyUriUtils.getUriFromAMUrl(
|
||||
WebAppUtils.getHttpSchemePrefix(conf), original);
|
||||
} else {
|
||||
trackingUri = new URI(original);
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import java.util.Map;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.FilterContainer;
|
||||
import org.apache.hadoop.http.FilterInitializer;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.yarn.api.ApplicationConstants;
|
||||
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||
|
||||
|
@ -39,7 +38,7 @@ public class AmFilterInitializer extends FilterInitializer {
|
|||
String[] parts = proxy.split(":");
|
||||
params.put(AmIpFilter.PROXY_HOST, parts[0]);
|
||||
params.put(AmIpFilter.PROXY_URI_BASE,
|
||||
HttpConfig.getSchemePrefix() + proxy +
|
||||
WebAppUtils.getHttpSchemePrefix(conf) + proxy +
|
||||
System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV));
|
||||
container.addFilter(FILTER_NAME, FILTER_CLASS, params);
|
||||
}
|
||||
|
|
|
@ -288,8 +288,9 @@ public class TestWebAppProxyServlet {
|
|||
YarnConfiguration.DEFAULT_YARN_ADMIN_ACL));
|
||||
proxyServer = new HttpServer2.Builder()
|
||||
.setName("proxy")
|
||||
.addEndpoint(URI.create("http://" + bindAddress + ":0"))
|
||||
.setFindPort(true)
|
||||
.addEndpoint(
|
||||
URI.create(WebAppUtils.getHttpSchemePrefix(conf) + bindAddress
|
||||
+ ":0")).setFindPort(true)
|
||||
.setConf(conf)
|
||||
.setACL(acl)
|
||||
.build();
|
||||
|
|
Loading…
Reference in New Issue