Merge trunk into branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3077@1372630 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2012-08-13 21:31:44 +00:00
commit 4b67401565
37 changed files with 390 additions and 89 deletions

View File

@ -213,6 +213,8 @@ Branch-2 ( Unreleased changes )
HADOOP-8644. AuthenticatedURL should be able to use SSLFactory. (tucu) HADOOP-8644. AuthenticatedURL should be able to use SSLFactory. (tucu)
HADOOP-8681. add support for HTTPS to the web UIs. (tucu)
IMPROVEMENTS IMPROVEMENTS
HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual

View File

@ -18,17 +18,18 @@
cmake_minimum_required(VERSION 2.6 FATAL_ERROR) cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
find_package(JNI REQUIRED)
# If JVM_ARCH_DATA_MODEL is 32, compile all binaries as 32-bit. # If JVM_ARCH_DATA_MODEL is 32, compile all binaries as 32-bit.
# This variable is set by maven. # This variable is set by maven.
if (JVM_ARCH_DATA_MODEL EQUAL 32) if (JVM_ARCH_DATA_MODEL EQUAL 32)
# Force 32-bit code generation on amd64/x86_64, ppc64, sparc64 # Force 32-bit code generation on amd64/x86_64, ppc64, sparc64
if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_SYSTEM_PROCESSOR MATCHES ".*64") if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_SYSTEM_PROCESSOR MATCHES ".*64")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m32")
set(CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} -m32") set(CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} -m32")
endif () endif ()
if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64") if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
# Set CMAKE_SYSTEM_PROCESSOR to ensure that find_package(JNI) will use
# the 32-bit version of libjvm.so.
set(CMAKE_SYSTEM_PROCESSOR "i686") set(CMAKE_SYSTEM_PROCESSOR "i686")
endif () endif ()
endif (JVM_ARCH_DATA_MODEL EQUAL 32) endif (JVM_ARCH_DATA_MODEL EQUAL 32)
@ -63,3 +64,5 @@ if (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm" AND CMAKE_SYSTEM_NAME STREQUAL "Linux"
endif () endif ()
endif (READELF MATCHES "NOTFOUND") endif (READELF MATCHES "NOTFOUND")
endif (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm" AND CMAKE_SYSTEM_NAME STREQUAL "Linux") endif (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm" AND CMAKE_SYSTEM_NAME STREQUAL "Linux")
find_package(JNI REQUIRED)

View File

@ -239,5 +239,8 @@ public class CommonConfigurationKeysPublic {
public static final String HADOOP_SECURITY_AUTH_TO_LOCAL = public static final String HADOOP_SECURITY_AUTH_TO_LOCAL =
"hadoop.security.auth_to_local"; "hadoop.security.auth_to_local";
public static final String HADOOP_SSL_ENABLED_KEY = "hadoop.ssl.enabled";
public static final boolean HADOOP_SSL_ENABLED_DEFAULT = false;
} }

View File

@ -0,0 +1,48 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
/**
* Singleton to get access to Http related configuration.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class HttpConfig {
private static boolean sslEnabled;
static {
Configuration conf = new Configuration();
sslEnabled = conf.getBoolean(
CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY,
CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_DEFAULT);
}
public static boolean isSecure() {
return sslEnabled;
}
public static String getSchemePrefix() {
return (isSecure()) ? "https://" : "http://";
}
}

View File

@ -24,12 +24,14 @@ import java.io.InterruptedIOException;
import java.net.BindException; import java.net.BindException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.URL; import java.net.URL;
import java.security.GeneralSecurityException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Enumeration; import java.util.Enumeration;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import javax.net.ssl.SSLServerSocketFactory;
import javax.servlet.Filter; import javax.servlet.Filter;
import javax.servlet.FilterChain; import javax.servlet.FilterChain;
import javax.servlet.FilterConfig; import javax.servlet.FilterConfig;
@ -56,6 +58,7 @@ import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
import org.mortbay.io.Buffer; import org.mortbay.io.Buffer;
import org.mortbay.jetty.Connector; import org.mortbay.jetty.Connector;
@ -105,6 +108,7 @@ public class HttpServer implements FilterContainer {
private AccessControlList adminsAcl; private AccessControlList adminsAcl;
private SSLFactory sslFactory;
protected final Server webServer; protected final Server webServer;
protected final Connector listener; protected final Connector listener;
protected final WebAppContext webAppContext; protected final WebAppContext webAppContext;
@ -208,7 +212,23 @@ public class HttpServer implements FilterContainer {
if(connector == null) { if(connector == null) {
listenerStartedExternally = false; listenerStartedExternally = false;
if (HttpConfig.isSecure()) {
sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
try {
sslFactory.init();
} catch (GeneralSecurityException ex) {
throw new IOException(ex);
}
SslSocketConnector sslListener = new SslSocketConnector() {
@Override
protected SSLServerSocketFactory createFactory() throws Exception {
return sslFactory.createSSLServerSocketFactory();
}
};
listener = sslListener;
} else {
listener = createBaseListener(conf); listener = createBaseListener(conf);
}
listener.setHost(bindAddress); listener.setHost(bindAddress);
listener.setPort(port); listener.setPort(port);
} else { } else {
@ -720,6 +740,16 @@ public class HttpServer implements FilterContainer {
exception = addMultiException(exception, e); exception = addMultiException(exception, e);
} }
try {
if (sslFactory != null) {
sslFactory.destroy();
}
} catch (Exception e) {
LOG.error("Error while destroying the SSLFactory"
+ webAppContext.getDisplayName(), e);
exception = addMultiException(exception, e);
}
try { try {
// clear & stop webAppContext attributes to avoid memory leaks. // clear & stop webAppContext attributes to avoid memory leaks.
webAppContext.clearAttributes(); webAppContext.clearAttributes();

View File

@ -40,10 +40,12 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenInfo; import org.apache.hadoop.security.token.TokenInfo;
@ -66,11 +68,22 @@ public class SecurityUtil {
@VisibleForTesting @VisibleForTesting
static HostResolver hostResolver; static HostResolver hostResolver;
private static SSLFactory sslFactory;
static { static {
boolean useIp = new Configuration().getBoolean( Configuration conf = new Configuration();
boolean useIp = conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP,
CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT); CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT);
setTokenServiceUseIp(useIp); setTokenServiceUseIp(useIp);
if (HttpConfig.isSecure()) {
sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
try {
sslFactory.init();
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
} }
/** /**
@ -456,7 +469,7 @@ public class SecurityUtil {
AuthenticatedURL.Token token = new AuthenticatedURL.Token(); AuthenticatedURL.Token token = new AuthenticatedURL.Token();
try { try {
return new AuthenticatedURL().openConnection(url, token); return new AuthenticatedURL(null, sslFactory).openConnection(url, token);
} catch (AuthenticationException e) { } catch (AuthenticationException e) {
throw new IOException("Exception trying to open authenticated connection to " throw new IOException("Exception trying to open authenticated connection to "
+ url, e); + url, e);

View File

@ -1073,4 +1073,14 @@
</description> </description>
</property> </property>
<property>
<name>hadoop.ssl.enabled</name>
<value>false</value>
<description>
Whether to use SSL for the HTTP endpoints. If set to true, the
NameNode, DataNode, ResourceManager, NodeManager, HistoryServer and
MapReduceAppMaster web UIs will be served over HTTPS instead HTTP.
</description>
</property>
</configuration> </configuration>

View File

@ -0,0 +1,114 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import javax.net.ssl.HttpsURLConnection;
import java.io.File;
import java.io.FileWriter;
import java.io.InputStream;
import java.io.Writer;
import java.net.URL;
/**
* This testcase issues SSL certificates configures the HttpServer to serve
* HTTPS using the created certficates and calls an echo servlet using the
* corresponding HTTPS URL.
*/
public class TestSSLHttpServer extends HttpServerFunctionalTest {
private static final String BASEDIR =
System.getProperty("test.build.dir", "target/test-dir") + "/" +
TestSSLHttpServer.class.getSimpleName();
static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class);
private static HttpServer server;
private static URL baseUrl;
@Before
public void setup() throws Exception {
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
String classpathDir =
KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
Configuration conf = new Configuration();
String keystoresDir = new File(BASEDIR).getAbsolutePath();
String sslConfsDir =
KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf, false);
conf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY, true);
//we do this trick because the MR AppMaster is started in another VM and
//the HttpServer configuration is not loaded from the job.xml but from the
//site.xml files in the classpath
Writer writer = new FileWriter(classpathDir + "/core-site.xml");
conf.writeXml(writer);
writer.close();
conf.setInt(HttpServer.HTTP_MAX_THREADS, 10);
server = createServer("test", conf);
server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class);
server.start();
baseUrl = new URL("https://localhost:" + server.getPort() + "/");
LOG.info("HTTP server started: "+ baseUrl);
}
@After
public void cleanup() throws Exception {
server.stop();
String classpathDir =
KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
new File(classpathDir + "/core-site.xml").delete();
}
@Test
public void testEcho() throws Exception {
assertEquals("a:b\nc:d\n",
readOut(new URL(baseUrl, "/echo?a=b&c=d")));
assertEquals("a:b\nc&lt;:d\ne:&gt;\n",
readOut(new URL(baseUrl, "/echo?a=b&c<=d&e=>")));
}
private static String readOut(URL url) throws Exception {
StringBuilder out = new StringBuilder();
HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
SSLFactory sslf = new SSLFactory(SSLFactory.Mode.CLIENT, new Configuration());
sslf.init();
conn.setSSLSocketFactory(sslf.createSSLSocketFactory());
InputStream in = conn.getInputStream();
byte[] buffer = new byte[64 * 1024];
int len = in.read(buffer);
while (len > 0) {
out.append(new String(buffer, 0, len));
len = in.read(buffer);
}
return out.toString();
}
}

View File

@ -104,9 +104,6 @@ Trunk (unreleased changes)
HDFS-3573. Supply NamespaceInfo when instantiating JournalManagers (todd) HDFS-3573. Supply NamespaceInfo when instantiating JournalManagers (todd)
HDFS-3190. Simple refactors in existing NN code to assist
QuorumJournalManager extension. (todd)
HDFS-3630 Modify TestPersistBlocks to use both flush and hflush (sanjay) HDFS-3630 Modify TestPersistBlocks to use both flush and hflush (sanjay)
HDFS-3768. Exception in TestJettyHelper is incorrect. HDFS-3768. Exception in TestJettyHelper is incorrect.
@ -114,6 +111,9 @@ Trunk (unreleased changes)
HDFS-3695. Genericize format() to non-file JournalManagers. (todd) HDFS-3695. Genericize format() to non-file JournalManagers. (todd)
HDFS-3789. JournalManager#format() should be able to throw IOException
(Ivan Kelly via todd)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -127,10 +127,6 @@ Trunk (unreleased changes)
HDFS-2314. MRV1 test compilation broken after HDFS-2197 (todd) HDFS-2314. MRV1 test compilation broken after HDFS-2197 (todd)
HDFS-2330. In NNStorage and FSImagePreTransactionalStorageInspector,
IOExceptions of stream closures can mask root exceptions. (Uma Maheswara
Rao G via szetszwo)
HDFS-46. Change default namespace quota of root directory from HDFS-46. Change default namespace quota of root directory from
Integer.MAX_VALUE to Long.MAX_VALUE. (Uma Maheswara Rao G via szetszwo) Integer.MAX_VALUE to Long.MAX_VALUE. (Uma Maheswara Rao G via szetszwo)
@ -380,6 +376,11 @@ Branch-2 ( Unreleased changes )
HDFS-3634. Add self-contained, mavenized fuse_dfs test. (Colin Patrick HDFS-3634. Add self-contained, mavenized fuse_dfs test. (Colin Patrick
McCabe via atm) McCabe via atm)
HDFS-3190. Simple refactors in existing NN code to assist
QuorumJournalManager extension. (todd)
HDFS-3276. initializeSharedEdits should have a -nonInteractive flag (todd)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-2982. Startup performance suffers when there are many edit log HDFS-2982. Startup performance suffers when there are many edit log
@ -570,9 +571,6 @@ Branch-2 ( Unreleased changes )
HDFS-3756. DelegationTokenFetcher creates 2 HTTP connections, the second HDFS-3756. DelegationTokenFetcher creates 2 HTTP connections, the second
one not properly configured. (tucu) one not properly configured. (tucu)
HDFS-3719. Re-enable append-related tests in TestFileConcurrentReader.
(Andrew Wang via atm)
HDFS-3579. libhdfs: fix exception handling. (Colin Patrick McCabe via atm) HDFS-3579. libhdfs: fix exception handling. (Colin Patrick McCabe via atm)
HDFS-3754. BlockSender doesn't shutdown ReadaheadPool threads. (eli) HDFS-3754. BlockSender doesn't shutdown ReadaheadPool threads. (eli)
@ -583,6 +581,12 @@ Branch-2 ( Unreleased changes )
HDFS-3721. hsync support broke wire compatibility. (todd and atm) HDFS-3721. hsync support broke wire compatibility. (todd and atm)
HDFS-3758. TestFuseDFS test failing. (Colin Patrick McCabe via eli)
HDFS-2330. In NNStorage and FSImagePreTransactionalStorageInspector,
IOExceptions of stream closures can mask root exceptions. (Uma Maheswara
Rao G via szetszwo)
BREAKDOWN OF HDFS-3042 SUBTASKS BREAKDOWN OF HDFS-3042 SUBTASKS
HDFS-2185. HDFS portion of ZK-based FailoverController (todd) HDFS-2185. HDFS portion of ZK-based FailoverController (todd)

View File

@ -272,7 +272,7 @@ public class BookKeeperJournalManager implements JournalManager {
} }
@Override @Override
public void format(NamespaceInfo ns) { public void format(NamespaceInfo ns) throws IOException {
// Currently, BKJM automatically formats itself when first accessed. // Currently, BKJM automatically formats itself when first accessed.
// TODO: change over to explicit formatting so that the admin can // TODO: change over to explicit formatting so that the admin can
// clear out the BK storage when reformatting a cluster. // clear out the BK storage when reformatting a cluster.

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.URL; import java.net.URL;
import java.net.URLEncoder; import java.net.URLEncoder;
@ -37,7 +36,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -45,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
@ -140,7 +139,7 @@ public class DatanodeJspHelper {
DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf); DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf);
String fqdn = canonicalize(chosenNode.getIpAddr()); String fqdn = canonicalize(chosenNode.getIpAddr());
int datanodePort = chosenNode.getXferPort(); int datanodePort = chosenNode.getXferPort();
String redirectLocation = "http://" + fqdn + ":" String redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":"
+ chosenNode.getInfoPort() + "/browseBlock.jsp?blockId=" + chosenNode.getInfoPort() + "/browseBlock.jsp?blockId="
+ firstBlock.getBlock().getBlockId() + "&blockSize=" + firstBlock.getBlock().getBlockId() + "&blockSize="
+ firstBlock.getBlock().getNumBytes() + "&genstamp=" + firstBlock.getBlock().getNumBytes() + "&genstamp="
@ -220,7 +219,7 @@ public class DatanodeJspHelper {
JspHelper.addTableFooter(out); JspHelper.addTableFooter(out);
} }
} }
out.print("<br><a href=\"http://" out.print("<br><a href=\"" + HttpConfig.getSchemePrefix()
+ canonicalize(nnAddr) + ":" + canonicalize(nnAddr) + ":"
+ namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>"); + namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
dfs.close(); dfs.close();
@ -296,7 +295,7 @@ public class DatanodeJspHelper {
Long.MAX_VALUE).getLocatedBlocks(); Long.MAX_VALUE).getLocatedBlocks();
// Add the various links for looking at the file contents // Add the various links for looking at the file contents
// URL for downloading the full file // URL for downloading the full file
String downloadUrl = "http://" + req.getServerName() + ":" String downloadUrl = HttpConfig.getSchemePrefix() + req.getServerName() + ":"
+ req.getServerPort() + "/streamFile" + ServletUtil.encodePath(filename) + req.getServerPort() + "/streamFile" + ServletUtil.encodePath(filename)
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr, true) + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr, true)
+ JspHelper.getDelegationTokenUrlParam(tokenString); + JspHelper.getDelegationTokenUrlParam(tokenString);
@ -314,7 +313,7 @@ public class DatanodeJspHelper {
return; return;
} }
String fqdn = canonicalize(chosenNode.getIpAddr()); String fqdn = canonicalize(chosenNode.getIpAddr());
String tailUrl = "http://" + fqdn + ":" + chosenNode.getInfoPort() String tailUrl = HttpConfig.getSchemePrefix() + fqdn + ":" + chosenNode.getInfoPort()
+ "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8") + "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8")
+ "&namenodeInfoPort=" + namenodeInfoPort + "&namenodeInfoPort=" + namenodeInfoPort
+ "&chunkSizeToView=" + chunkSizeToView + "&chunkSizeToView=" + chunkSizeToView
@ -363,7 +362,7 @@ public class DatanodeJspHelper {
String datanodeAddr = locs[j].getXferAddr(); String datanodeAddr = locs[j].getXferAddr();
datanodePort = locs[j].getXferPort(); datanodePort = locs[j].getXferPort();
fqdn = canonicalize(locs[j].getIpAddr()); fqdn = canonicalize(locs[j].getIpAddr());
String blockUrl = "http://" + fqdn + ":" + locs[j].getInfoPort() String blockUrl = HttpConfig.getSchemePrefix() + fqdn + ":" + locs[j].getInfoPort()
+ "/browseBlock.jsp?blockId=" + blockidstring + "/browseBlock.jsp?blockId=" + blockidstring
+ "&blockSize=" + blockSize + "&blockSize=" + blockSize
+ "&filename=" + URLEncoder.encode(filename, "UTF-8") + "&filename=" + URLEncoder.encode(filename, "UTF-8")
@ -374,7 +373,7 @@ public class DatanodeJspHelper {
+ JspHelper.getDelegationTokenUrlParam(tokenString) + JspHelper.getDelegationTokenUrlParam(tokenString)
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr); + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
String blockInfoUrl = "http://" + nnCanonicalName + ":" String blockInfoUrl = HttpConfig.getSchemePrefix() + nnCanonicalName + ":"
+ namenodeInfoPort + namenodeInfoPort
+ "/block_info_xml.jsp?blockId=" + blockidstring; + "/block_info_xml.jsp?blockId=" + blockidstring;
out.print("<td>&nbsp</td><td><a href=\"" + blockUrl + "\">" out.print("<td>&nbsp</td><td><a href=\"" + blockUrl + "\">"
@ -385,7 +384,7 @@ public class DatanodeJspHelper {
} }
out.println("</table>"); out.println("</table>");
out.print("<hr>"); out.print("<hr>");
out.print("<br><a href=\"http://" out.print("<br><a href=\"" + HttpConfig.getSchemePrefix()
+ nnCanonicalName + ":" + nnCanonicalName + ":"
+ namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>"); + namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
dfs.close(); dfs.close();
@ -485,7 +484,7 @@ public class DatanodeJspHelper {
String parent = new File(filename).getParent(); String parent = new File(filename).getParent();
JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, parent, nnAddr); JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, parent, nnAddr);
out.print("<hr>"); out.print("<hr>");
out.print("<a href=\"http://" out.print("<a href=\"" + HttpConfig.getSchemePrefix()
+ req.getServerName() + ":" + req.getServerPort() + req.getServerName() + ":" + req.getServerPort()
+ "/browseDirectory.jsp?dir=" + URLEncoder.encode(parent, "UTF-8") + "/browseDirectory.jsp?dir=" + URLEncoder.encode(parent, "UTF-8")
+ "&namenodeInfoPort=" + namenodeInfoPort + "&namenodeInfoPort=" + namenodeInfoPort
@ -533,7 +532,7 @@ public class DatanodeJspHelper {
} }
String nextUrl = null; String nextUrl = null;
if (nextBlockIdStr != null) { if (nextBlockIdStr != null) {
nextUrl = "http://" + canonicalize(nextHost) + ":" + nextPort nextUrl = HttpConfig.getSchemePrefix() + canonicalize(nextHost) + ":" + nextPort
+ "/browseBlock.jsp?blockId=" + nextBlockIdStr + "/browseBlock.jsp?blockId=" + nextBlockIdStr
+ "&blockSize=" + nextBlockSize + "&blockSize=" + nextBlockSize
+ "&startOffset=" + nextStartOffset + "&startOffset=" + nextStartOffset
@ -588,7 +587,7 @@ public class DatanodeJspHelper {
String prevUrl = null; String prevUrl = null;
if (prevBlockIdStr != null) { if (prevBlockIdStr != null) {
prevUrl = "http://" + canonicalize(prevHost) + ":" + prevPort prevUrl = HttpConfig.getSchemePrefix() + canonicalize(prevHost) + ":" + prevPort
+ "/browseBlock.jsp?blockId=" + prevBlockIdStr + "/browseBlock.jsp?blockId=" + prevBlockIdStr
+ "&blockSize=" + prevBlockSize + "&blockSize=" + prevBlockSize
+ "&startOffset=" + prevStartOffset + "&startOffset=" + prevStartOffset

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress; import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.codehaus.jackson.JsonNode; import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.map.ObjectMapper;
@ -823,7 +824,7 @@ class ClusterJspHelper {
doc.startTag("item"); doc.startTag("item");
doc.attribute("label", label); doc.attribute("label", label);
doc.attribute("value", value); doc.attribute("value", value);
doc.attribute("link", "http://" + url); doc.attribute("link", HttpConfig.getSchemePrefix() + url);
doc.endTag(); // item doc.endTag(); // item
} }
@ -883,7 +884,7 @@ class ClusterJspHelper {
private static String queryMbean(String httpAddress, Configuration conf) private static String queryMbean(String httpAddress, Configuration conf)
throws IOException { throws IOException {
URL url = new URL("http://"+httpAddress+JMX_QRY); URL url = new URL(HttpConfig.getSchemePrefix() + httpAddress+JMX_QRY);
return readOutput(url); return readOutput(url);
} }
/** /**

View File

@ -82,7 +82,7 @@ public class FileJournalManager implements JournalManager {
public void close() throws IOException {} public void close() throws IOException {}
@Override @Override
public void format(NamespaceInfo ns) { public void format(NamespaceInfo ns) throws IOException {
// Formatting file journals is done by the StorageDirectory // Formatting file journals is done by the StorageDirectory
// format code, since they may share their directory with // format code, since they may share their directory with
// checkpoints, etc. // checkpoints, etc.

View File

@ -41,7 +41,7 @@ public interface JournalManager extends Closeable, FormatConfirmable {
* Format the underlying storage, removing any previously * Format the underlying storage, removing any previously
* stored data. * stored data.
*/ */
void format(NamespaceInfo ns); void format(NamespaceInfo ns) throws IOException;
/** /**
* Begin writing to a new segment of the log stream, which starts at * Begin writing to a new segment of the log stream, which starts at

View File

@ -174,7 +174,7 @@ public class JournalSet implements JournalManager {
} }
@Override @Override
public void format(NamespaceInfo nsInfo) { public void format(NamespaceInfo nsInfo) throws IOException {
// The iteration is done by FSEditLog itself // The iteration is done by FSEditLog itself
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }

View File

@ -894,7 +894,10 @@ public class NameNode {
StartupOption.ROLLBACK.getName() + "] | [" + StartupOption.ROLLBACK.getName() + "] | [" +
StartupOption.FINALIZE.getName() + "] | [" + StartupOption.FINALIZE.getName() + "] | [" +
StartupOption.IMPORT.getName() + "] | [" + StartupOption.IMPORT.getName() + "] | [" +
StartupOption.INITIALIZESHAREDEDITS.getName() + "] | [" + StartupOption.INITIALIZESHAREDEDITS.getName() +
" [" + StartupOption.FORCE.getName() + "] [" +
StartupOption.NONINTERACTIVE.getName() + "]" +
"] | [" +
StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" + StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" +
StartupOption.RECOVER.getName() + " [ " + StartupOption.RECOVER.getName() + " [ " +
StartupOption.FORCE.getName() + " ] ]"); StartupOption.FORCE.getName() + " ] ]");
@ -964,6 +967,16 @@ public class NameNode {
return startOpt; return startOpt;
} else if (StartupOption.INITIALIZESHAREDEDITS.getName().equalsIgnoreCase(cmd)) { } else if (StartupOption.INITIALIZESHAREDEDITS.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.INITIALIZESHAREDEDITS; startOpt = StartupOption.INITIALIZESHAREDEDITS;
for (i = i + 1 ; i < argsLen; i++) {
if (StartupOption.NONINTERACTIVE.getName().equals(args[i])) {
startOpt.setInteractiveFormat(false);
} else if (StartupOption.FORCE.getName().equals(args[i])) {
startOpt.setForceFormat(true);
} else {
LOG.fatal("Invalid argument: " + args[i]);
return null;
}
}
return startOpt; return startOpt;
} else if (StartupOption.RECOVER.getName().equalsIgnoreCase(cmd)) { } else if (StartupOption.RECOVER.getName().equalsIgnoreCase(cmd)) {
if (startOpt != StartupOption.REGULAR) { if (startOpt != StartupOption.REGULAR) {
@ -1073,7 +1086,9 @@ public class NameNode {
return null; // avoid warning return null; // avoid warning
} }
case INITIALIZESHAREDEDITS: { case INITIALIZESHAREDEDITS: {
boolean aborted = initializeSharedEdits(conf, false, true); boolean aborted = initializeSharedEdits(conf,
startOpt.getForceFormat(),
startOpt.getInteractiveFormat());
terminate(aborted ? 1 : 0); terminate(aborted ? 1 : 0);
return null; // avoid warning return null; // avoid warning
} }

View File

@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.net.NodeBase;
@ -413,7 +414,7 @@ class NamenodeJspHelper {
} }
String addr = NetUtils.getHostPortString(nn.getNameNodeAddress()); String addr = NetUtils.getHostPortString(nn.getNameNodeAddress());
String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName(); String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName();
redirectLocation = "http://" + fqdn + ":" + redirectPort redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":" + redirectPort
+ "/browseDirectory.jsp?namenodeInfoPort=" + "/browseDirectory.jsp?namenodeInfoPort="
+ nn.getHttpAddress().getPort() + "&dir=/" + nn.getHttpAddress().getPort() + "&dir=/"
+ (tokenString == null ? "" : + (tokenString == null ? "" :
@ -462,7 +463,8 @@ class NamenodeJspHelper {
String suffix, boolean alive, int nnHttpPort, String nnaddr) String suffix, boolean alive, int nnHttpPort, String nnaddr)
throws IOException { throws IOException {
// from nn_browsedfscontent.jsp: // from nn_browsedfscontent.jsp:
String url = "http://" + d.getHostName() + ":" + d.getInfoPort() String url = HttpConfig.getSchemePrefix() + d.getHostName() + ":"
+ d.getInfoPort()
+ "/browseDirectory.jsp?namenodeInfoPort=" + nnHttpPort + "&dir=" + "/browseDirectory.jsp?namenodeInfoPort=" + nnHttpPort + "&dir="
+ URLEncoder.encode("/", "UTF-8") + URLEncoder.encode("/", "UTF-8")
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnaddr); + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnaddr);

View File

@ -32,12 +32,12 @@ import javax.servlet.http.HttpServletResponse;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.StorageErrorReporter; import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.hdfs.util.DataTransferThrottler;
@ -206,7 +206,8 @@ public class TransferFsImage {
String queryString, List<File> localPaths, String queryString, List<File> localPaths,
Storage dstStorage, boolean getChecksum) throws IOException { Storage dstStorage, boolean getChecksum) throws IOException {
String str = "http://" + nnHostPort + "/getimage?" + queryString; String str = HttpConfig.getSchemePrefix() + nnHostPort + "/getimage?" +
queryString;
LOG.info("Opening connection to " + str); LOG.info("Opening connection to " + str);
// //
// open connection to remote server // open connection to remote server

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck; import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
@ -226,7 +227,7 @@ public class DFSck extends Configured implements Tool {
} }
private int doWork(final String[] args) throws IOException { private int doWork(final String[] args) throws IOException {
final StringBuilder url = new StringBuilder("http://"); final StringBuilder url = new StringBuilder(HttpConfig.getSchemePrefix());
String namenodeAddress = getCurrentNamenodeAddress(); String namenodeAddress = getCurrentNamenodeAddress();
if (namenodeAddress == null) { if (namenodeAddress == null) {

View File

@ -44,6 +44,7 @@ public class TestFuseDFS {
private static MiniDFSCluster cluster; private static MiniDFSCluster cluster;
private static FileSystem fs; private static FileSystem fs;
private static Process fuseProcess;
private static Runtime r; private static Runtime r;
private static String mountPoint; private static String mountPoint;
@ -137,8 +138,28 @@ public class TestFuseDFS {
assertEquals("File content differs", expectedContents, s); assertEquals("File content differs", expectedContents, s);
} }
private static class RedirectToStdoutThread extends Thread {
private InputStream is;
RedirectToStdoutThread(InputStream is) {
this.is = is;
}
public void run() {
try {
InputStreamReader isr = new InputStreamReader(is);
BufferedReader br = new BufferedReader(isr);
String line=null;
while ( (line = br.readLine()) != null) {
LOG.error("FUSE_LINE:" + line);
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
/** Run a fuse-dfs process to mount the given DFS */ /** Run a fuse-dfs process to mount the given DFS */
private static void establishMount(URI uri) throws IOException { private static Process establishMount(URI uri) throws IOException {
Runtime r = Runtime.getRuntime(); Runtime r = Runtime.getRuntime();
String cp = System.getProperty("java.class.path"); String cp = System.getProperty("java.class.path");
@ -163,6 +184,8 @@ public class TestFuseDFS {
"-obig_writes", // Allow >4kb writes "-obig_writes", // Allow >4kb writes
"-oentry_timeout=0.1", // Don't cache dents long "-oentry_timeout=0.1", // Don't cache dents long
"-oattribute_timeout=0.1", // Don't cache attributes long "-oattribute_timeout=0.1", // Don't cache attributes long
"-ononempty", // Don't complain about junk in mount point
"-f", // Don't background the process
"-ordbuffer=32768", // Read buffer size in kb "-ordbuffer=32768", // Read buffer size in kb
"rw" "rw"
}; };
@ -178,17 +201,35 @@ public class TestFuseDFS {
execAssertSucceeds("mkdir -p " + mountPoint); execAssertSucceeds("mkdir -p " + mountPoint);
// Mount the mini cluster // Mount the mini cluster
try { String cmdStr = "";
Process fuseProcess = r.exec(mountCmd, env); for (String c : mountCmd) {
assertEquals(0, fuseProcess.waitFor()); cmdStr += (" " + c);
} catch (InterruptedException ie) {
fail("Failed to mount");
} }
LOG.info("now mounting with:" + cmdStr);
Process fuseProcess = r.exec(mountCmd, env);
RedirectToStdoutThread stdoutThread =
new RedirectToStdoutThread(fuseProcess.getInputStream());
RedirectToStdoutThread stderrThread =
new RedirectToStdoutThread(fuseProcess.getErrorStream());
stdoutThread.start();
stderrThread.start();
// Wait for fusermount to start up, so that we know we're operating on the
// FUSE FS when we run the tests.
try {
Thread.sleep(50000);
} catch (InterruptedException e) {
}
return fuseProcess;
} }
/** Tear down the fuse-dfs process and mount */ /** Tear down the fuse-dfs process and mount */
private static void teardownMount() throws IOException { private static void teardownMount() throws IOException {
execWaitRet("fusermount -u " + mountPoint); execWaitRet("fusermount -u " + mountPoint);
try {
assertEquals(0, fuseProcess.waitFor()); // fuse_dfs should exit cleanly
} catch (InterruptedException e) {
fail("interrupted while waiting for fuse_dfs process to exit.");
}
} }
@BeforeClass @BeforeClass
@ -200,7 +241,7 @@ public class TestFuseDFS {
cluster = new MiniDFSCluster.Builder(conf).build(); cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitClusterUp(); cluster.waitClusterUp();
fs = cluster.getFileSystem(); fs = cluster.getFileSystem();
establishMount(fs.getUri()); fuseProcess = establishMount(fs.getUri());
} }
@AfterClass @AfterClass

View File

@ -288,8 +288,10 @@ public class TestFileConcurrentReader {
runTestUnfinishedBlockCRCError(true, SyncType.SYNC, SMALL_WRITE_SIZE); runTestUnfinishedBlockCRCError(true, SyncType.SYNC, SMALL_WRITE_SIZE);
} }
// fails due to issue w/append, disable
@Ignore
@Test @Test
public void testUnfinishedBlockCRCErrorTransferToAppend() public void _testUnfinishedBlockCRCErrorTransferToAppend()
throws IOException { throws IOException {
runTestUnfinishedBlockCRCError(true, SyncType.APPEND, DEFAULT_WRITE_SIZE); runTestUnfinishedBlockCRCError(true, SyncType.APPEND, DEFAULT_WRITE_SIZE);
} }
@ -305,8 +307,10 @@ public class TestFileConcurrentReader {
runTestUnfinishedBlockCRCError(false, SyncType.SYNC, SMALL_WRITE_SIZE); runTestUnfinishedBlockCRCError(false, SyncType.SYNC, SMALL_WRITE_SIZE);
} }
// fails due to issue w/append, disable
@Ignore
@Test @Test
public void testUnfinishedBlockCRCErrorNormalTransferAppend() public void _testUnfinishedBlockCRCErrorNormalTransferAppend()
throws IOException { throws IOException {
runTestUnfinishedBlockCRCError(false, SyncType.APPEND, DEFAULT_WRITE_SIZE); runTestUnfinishedBlockCRCError(false, SyncType.APPEND, DEFAULT_WRITE_SIZE);
} }

View File

@ -154,7 +154,7 @@ public class TestGenericJournalConf {
} }
@Override @Override
public void format(NamespaceInfo nsInfo) { public void format(NamespaceInfo nsInfo) throws IOException {
formatCalled = true; formatCalled = true;
} }

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.mapreduce.v2.app.webapp;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID;
import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.util.StringHelper.join;
import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD;
@ -31,6 +30,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
import java.util.Date; import java.util.Date;
import java.util.List; import java.util.List;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.AppContext;
@ -40,8 +40,6 @@ import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo;
import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI; import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
@ -106,7 +104,8 @@ public class JobBlock extends HtmlBlock {
table.tr(). table.tr().
td(String.valueOf(attempt.getAttemptId())). td(String.valueOf(attempt.getAttemptId())).
td(new Date(attempt.getStartTime()).toString()). td(new Date(attempt.getStartTime()).toString()).
td().a(".nodelink", url("http://", attempt.getNodeHttpAddress()), td().a(".nodelink", url(HttpConfig.getSchemePrefix(),
attempt.getNodeHttpAddress()),
attempt.getNodeHttpAddress())._(). attempt.getNodeHttpAddress())._().
td().a(".logslink", url(attempt.getLogsLink()), td().a(".logslink", url(attempt.getLogsLink()),
"logs")._(). "logs")._().

View File

@ -24,6 +24,7 @@ import com.google.inject.Inject;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
@ -62,7 +63,8 @@ public class NavBlock extends HtmlBlock {
li().a(url("conf", jobid), "Configuration")._(). li().a(url("conf", jobid), "Configuration")._().
li().a(url("tasks", jobid, "m"), "Map tasks")._(). li().a(url("tasks", jobid, "m"), "Map tasks")._().
li().a(url("tasks", jobid, "r"), "Reduce tasks")._(). li().a(url("tasks", jobid, "r"), "Reduce tasks")._().
li().a(".logslink", url("http://", nodeHttpAddress, "node", li().a(".logslink", url(HttpConfig.getSchemePrefix(),
nodeHttpAddress, "node",
"containerlogs", thisAmInfo.getContainerId().toString(), "containerlogs", thisAmInfo.getContainerId().toString(),
app.getJob().getUserName()), app.getJob().getUserName()),
"AM Logs")._()._(); "AM Logs")._()._();

View File

@ -27,6 +27,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
import java.util.Collection; import java.util.Collection;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
@ -93,13 +94,15 @@ public class TaskPage extends AppView {
nodeTd._("N/A"); nodeTd._("N/A");
} else { } else {
nodeTd. nodeTd.
a(".nodelink", url("http://", nodeHttpAddr), nodeHttpAddr); a(".nodelink", url(HttpConfig.getSchemePrefix(),
nodeHttpAddr), nodeHttpAddr);
} }
nodeTd._(); nodeTd._();
if (containerId != null) { if (containerId != null) {
String containerIdStr = ta.getAssignedContainerIdStr(); String containerIdStr = ta.getAssignedContainerIdStr();
row.td(). row.td().
a(".logslink", url("http://", nodeHttpAddr, "node", "containerlogs", a(".logslink", url(HttpConfig.getSchemePrefix(),
nodeHttpAddr, "node", "containerlogs",
containerIdStr, app.getJob().getUserName()), "logs")._(); containerIdStr, app.getJob().getUserName()), "logs")._();
} else { } else {
row.td()._("N/A")._(); row.td()._("N/A")._();

View File

@ -24,6 +24,7 @@ import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeId;
@ -63,7 +64,7 @@ public class AMAttemptInfo {
ContainerId containerId = amInfo.getContainerId(); ContainerId containerId = amInfo.getContainerId();
if (containerId != null) { if (containerId != null) {
this.containerId = containerId.toString(); this.containerId = containerId.toString();
this.logsLink = join("http://" + nodeHttpAddress, this.logsLink = join(HttpConfig.getSchemePrefix() + nodeHttpAddress,
ujoin("node", "containerlogs", this.containerId, user)); ujoin("node", "containerlogs", this.containerId, user));
} }
} }

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.util;
import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.http.HttpConfig;
@Private @Private
@Unstable @Unstable
@ -34,8 +35,8 @@ public class HostUtil {
*/ */
public static String getTaskLogUrl(String taskTrackerHostName, public static String getTaskLogUrl(String taskTrackerHostName,
String httpPort, String taskAttemptID) { String httpPort, String taskAttemptID) {
return ("http://" + taskTrackerHostName + ":" + httpPort return (HttpConfig.getSchemePrefix() + taskTrackerHostName + ":" +
+ "/tasklog?attemptid=" + taskAttemptID); httpPort + "/tasklog?attemptid=" + taskAttemptID);
} }
public static String convertTrackerNameToHostName(String trackerName) { public static String convertTrackerNameToHostName(String trackerName) {

View File

@ -21,28 +21,18 @@ package org.apache.hadoop.mapreduce.v2.hs.webapp;
import com.google.inject.Inject; import com.google.inject.Inject;
import java.util.Date; import java.util.Date;
import java.util.List; import java.util.List;
import java.util.Map;
import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptInfo; import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo; import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI; import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.util.Times;
import org.apache.hadoop.yarn.webapp.ResponseInfo; import org.apache.hadoop.yarn.webapp.ResponseInfo;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
@ -142,7 +132,8 @@ public class HsJobBlock extends HtmlBlock {
table.tr((odd = !odd) ? _ODD : _EVEN). table.tr((odd = !odd) ? _ODD : _EVEN).
td(String.valueOf(attempt.getAttemptId())). td(String.valueOf(attempt.getAttemptId())).
td(new Date(attempt.getStartTime()).toString()). td(new Date(attempt.getStartTime()).toString()).
td().a(".nodelink", url("http://", attempt.getNodeHttpAddress()), td().a(".nodelink", url(HttpConfig.getSchemePrefix(),
attempt.getNodeHttpAddress()),
attempt.getNodeHttpAddress())._(). attempt.getNodeHttpAddress())._().
td().a(".logslink", url(attempt.getShortLogsLink()), td().a(".logslink", url(attempt.getShortLogsLink()),
"logs")._(). "logs")._().

View File

@ -29,6 +29,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
import java.util.Collection; import java.util.Collection;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
@ -143,7 +144,7 @@ public class HsTaskPage extends HsView {
td.br().$title(String.valueOf(sortId))._(). // sorting td.br().$title(String.valueOf(sortId))._(). // sorting
_(taid)._().td(ta.getState().toString()).td().a(".nodelink", _(taid)._().td(ta.getState().toString()).td().a(".nodelink",
"http://"+ nodeHttpAddr, HttpConfig.getSchemePrefix()+ nodeHttpAddr,
nodeRackName + "/" + nodeHttpAddr); nodeRackName + "/" + nodeHttpAddr);
td._(); td._();
row.td(). row.td().

View File

@ -32,6 +32,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobStatus; import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.MRJobConfig;
@ -393,7 +394,7 @@ public class ClientServiceDelegate {
String url = StringUtils.isNotEmpty(historyTrackingUrl) String url = StringUtils.isNotEmpty(historyTrackingUrl)
? historyTrackingUrl : trackingUrl; ? historyTrackingUrl : trackingUrl;
if (!UNAVAILABLE.equals(url)) { if (!UNAVAILABLE.equals(url)) {
url = "http://" + url; url = HttpConfig.getSchemePrefix() + url;
} }
jobStatus = TypeConverter.fromYarn(report, url); jobStatus = TypeConverter.fromYarn(report, url);
} }

View File

@ -33,6 +33,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@ -399,7 +400,7 @@ public class RMAppAttemptImpl implements RMAppAttempt {
URI result = ProxyUriUtils.getProxyUri(trackingUri, proxyUri, URI result = ProxyUriUtils.getProxyUri(trackingUri, proxyUri,
applicationAttemptId.getApplicationId()); applicationAttemptId.getApplicationId());
//We need to strip off the scheme to have it match what was there before //We need to strip off the scheme to have it match what was there before
return result.toASCIIString().substring(7); return result.toASCIIString().substring(HttpConfig.getSchemePrefix().length());
} catch (URISyntaxException e) { } catch (URISyntaxException e) {
LOG.warn("Could not proxify "+trackingUriWithoutScheme,e); LOG.warn("Could not proxify "+trackingUriWithoutScheme,e);
return trackingUriWithoutScheme; return trackingUriWithoutScheme;

View File

@ -30,6 +30,7 @@ import java.util.Collection;
import com.google.inject.Inject; import com.google.inject.Inject;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
@ -137,7 +138,8 @@ public class AppBlock extends HtmlBlock {
table.tr((odd = !odd) ? _ODD : _EVEN). table.tr((odd = !odd) ? _ODD : _EVEN).
td(String.valueOf(attemptInfo.getAttemptId())). td(String.valueOf(attemptInfo.getAttemptId())).
td(Times.format(attemptInfo.getStartTime())). td(Times.format(attemptInfo.getStartTime())).
td().a(".nodelink", url("http://", attemptInfo.getNodeHttpAddress()), td().a(".nodelink", url(HttpConfig.getSchemePrefix(),
attemptInfo.getNodeHttpAddress()),
attemptInfo.getNodeHttpAddress())._(). attemptInfo.getNodeHttpAddress())._().
td().a(".logslink", url(attemptInfo.getLogsLink()), "logs")._(). td().a(".logslink", url(attemptInfo.getLogsLink()), "logs")._().
_(); _();

View File

@ -26,6 +26,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
import java.util.Collection; import java.util.Collection;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@ -118,7 +119,8 @@ class NodesPage extends RmView {
row.td()._("N/A")._(); row.td()._("N/A")._();
} else { } else {
String httpAddress = info.getNodeHTTPAddress(); String httpAddress = info.getNodeHTTPAddress();
row.td().a("http://" + httpAddress, httpAddress)._(); row.td().a(HttpConfig.getSchemePrefix() + httpAddress,
httpAddress)._();
} }
row.td(info.getHealthStatus()). row.td(info.getHealthStatus()).
td().br().$title(String.valueOf(info.getLastHealthUpdate()))._(). td().br().$title(String.valueOf(info.getLastHealthUpdate()))._().

View File

@ -23,6 +23,7 @@ import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.ConverterUtils;
@ -55,7 +56,8 @@ public class AppAttemptInfo {
this.containerId = masterContainer.getId().toString(); this.containerId = masterContainer.getId().toString();
this.nodeHttpAddress = masterContainer.getNodeHttpAddress(); this.nodeHttpAddress = masterContainer.getNodeHttpAddress();
this.nodeId = masterContainer.getNodeId().toString(); this.nodeId = masterContainer.getNodeId().toString();
this.logsLink = join("http://", masterContainer.getNodeHttpAddress(), this.logsLink = join(HttpConfig.getSchemePrefix(),
masterContainer.getNodeHttpAddress(),
"/node", "/containerlogs/", "/node", "/containerlogs/",
ConverterUtils.toString(masterContainer.getId()), ConverterUtils.toString(masterContainer.getId()),
"/", attempt.getSubmissionContext().getUser()); "/", attempt.getSubmissionContext().getUser());

View File

@ -24,6 +24,7 @@ import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlTransient; import javax.xml.bind.annotation.XmlTransient;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@ -87,10 +88,10 @@ public class AppInfo {
this.trackingUI = this.trackingUrlIsNotReady ? "UNASSIGNED" : (app this.trackingUI = this.trackingUrlIsNotReady ? "UNASSIGNED" : (app
.getFinishTime() == 0 ? "ApplicationMaster" : "History"); .getFinishTime() == 0 ? "ApplicationMaster" : "History");
if (!trackingUrlIsNotReady) { if (!trackingUrlIsNotReady) {
this.trackingUrl = join("http://", trackingUrl); this.trackingUrl = join(HttpConfig.getSchemePrefix(), trackingUrl);
} }
this.trackingUrlPretty = trackingUrlIsNotReady ? "UNASSIGNED" : join( this.trackingUrlPretty = trackingUrlIsNotReady ? "UNASSIGNED" : join(
"http://", trackingUrl); HttpConfig.getSchemePrefix(), trackingUrl);
this.applicationId = app.getApplicationId(); this.applicationId = app.getApplicationId();
this.appIdNum = String.valueOf(app.getApplicationId().getId()); this.appIdNum = String.valueOf(app.getApplicationId().getId());
this.id = app.getApplicationId().toString(); this.id = app.getApplicationId().toString();
@ -104,7 +105,6 @@ public class AppInfo {
} }
this.finalStatus = app.getFinalApplicationStatus(); this.finalStatus = app.getFinalApplicationStatus();
this.clusterId = ResourceManager.clusterTimeStamp; this.clusterId = ResourceManager.clusterTimeStamp;
if (hasAccess) { if (hasAccess) {
this.startedTime = app.getStartTime(); this.startedTime = app.getStartTime();
this.finishedTime = app.getFinishTime(); this.finishedTime = app.getFinishTime();
@ -116,7 +116,8 @@ public class AppInfo {
Container masterContainer = attempt.getMasterContainer(); Container masterContainer = attempt.getMasterContainer();
if (masterContainer != null) { if (masterContainer != null) {
this.amContainerLogsExist = true; this.amContainerLogsExist = true;
String url = join("http://", masterContainer.getNodeHttpAddress(), String url = join(HttpConfig.getSchemePrefix(),
masterContainer.getNodeHttpAddress(),
"/node", "/containerlogs/", "/node", "/containerlogs/",
ConverterUtils.toString(masterContainer.getId()), ConverterUtils.toString(masterContainer.getId()),
"/", app.getUser()); "/", app.getUser());

View File

@ -27,6 +27,7 @@ import java.net.URLEncoder;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
public class ProxyUriUtils { public class ProxyUriUtils {
@ -140,6 +141,6 @@ public class ProxyUriUtils {
*/ */
public static URI getUriFromAMUrl(String noSchemeUrl) public static URI getUriFromAMUrl(String noSchemeUrl)
throws URISyntaxException { throws URISyntaxException {
return new URI("http://"+noSchemeUrl); return new URI(HttpConfig.getSchemePrefix() + noSchemeUrl);
} }
} }

View File

@ -24,6 +24,7 @@ import java.util.Map;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer; import org.apache.hadoop.http.FilterContainer;
import org.apache.hadoop.http.FilterInitializer; import org.apache.hadoop.http.FilterInitializer;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -37,7 +38,8 @@ public class AmFilterInitializer extends FilterInitializer {
String proxy = YarnConfiguration.getProxyHostAndPort(conf); String proxy = YarnConfiguration.getProxyHostAndPort(conf);
String[] parts = proxy.split(":"); String[] parts = proxy.split(":");
params.put(AmIpFilter.PROXY_HOST, parts[0]); params.put(AmIpFilter.PROXY_HOST, parts[0]);
params.put(AmIpFilter.PROXY_URI_BASE, "http://"+proxy+ params.put(AmIpFilter.PROXY_URI_BASE,
HttpConfig.getSchemePrefix() + proxy +
System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV)); System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV));
container.addFilter(FILTER_NAME, FILTER_CLASS, params); container.addFilter(FILTER_NAME, FILTER_CLASS, params);
} }