HDFS-3987. Merge change r1543962 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1543967 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2013-11-20 22:00:32 +00:00
parent 0bf221368f
commit 7e10509fe4
20 changed files with 211 additions and 55 deletions

View File

@ -196,7 +196,13 @@ public class KerberosAuthenticator implements Authenticator {
doSpnegoSequence(token); doSpnegoSequence(token);
} else { } else {
LOG.debug("Using fallback authenticator sequence."); LOG.debug("Using fallback authenticator sequence.");
getFallBackAuthenticator().authenticate(url, token); Authenticator auth = getFallBackAuthenticator();
// Make sure that the fall back authenticator have the same
// ConnectionConfigurator, since the method might be overridden.
// Otherwise the fall back authenticator might not have the information
// to make the connection (e.g., SSL certificates)
auth.setConnectionConfigurator(connConfigurator);
auth.authenticate(url, token);
} }
} }
} }

View File

@ -471,7 +471,9 @@ public class HttpServer implements FilterContainer {
if (conf.getBoolean( if (conf.getBoolean(
CommonConfigurationKeys.HADOOP_JETTY_LOGS_SERVE_ALIASES, CommonConfigurationKeys.HADOOP_JETTY_LOGS_SERVE_ALIASES,
CommonConfigurationKeys.DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES)) { CommonConfigurationKeys.DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES)) {
logContext.getInitParams().put( @SuppressWarnings("unchecked")
Map<String, String> params = logContext.getInitParams();
params.put(
"org.mortbay.jetty.servlet.Default.aliases", "true"); "org.mortbay.jetty.servlet.Default.aliases", "true");
} }
logContext.setDisplayName("logs"); logContext.setDisplayName("logs");

View File

@ -23,6 +23,8 @@ Release 2.3.0 - UNRELEASED
HDFS-5382. Implement the UI of browsing filesystems in HTML 5 page. (Haohui HDFS-5382. Implement the UI of browsing filesystems in HTML 5 page. (Haohui
Mai via jing9) Mai via jing9)
HDFS-3987. Support webhdfs over HTTPS. (Haohui Mai via jing9)
IMPROVEMENTS IMPROVEMENTS
HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu) HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)

View File

@ -76,9 +76,8 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
@ -605,12 +604,19 @@ public class DFSUtil {
* Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
* the configuration. * the configuration.
* *
* @param conf configuration
* @return list of InetSocketAddresses * @return list of InetSocketAddresses
*/ */
public static Map<String, Map<String, InetSocketAddress>> getHaNnHttpAddresses( public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses(
Configuration conf) { Configuration conf, String scheme) {
return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
return getAddresses(conf, null,
DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
} else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
return getAddresses(conf, null,
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
} else {
throw new IllegalArgumentException("Unsupported scheme: " + scheme);
}
} }
/** /**
@ -619,18 +625,28 @@ public class DFSUtil {
* cluster, the resolver further resolves the logical name (i.e., the authority * cluster, the resolver further resolves the logical name (i.e., the authority
* in the URL) into real namenode addresses. * in the URL) into real namenode addresses.
*/ */
public static InetSocketAddress[] resolve(URI uri, int schemeDefaultPort, public static InetSocketAddress[] resolveWebHdfsUri(URI uri, Configuration conf)
Configuration conf) throws IOException { throws IOException {
int defaultPort;
String scheme = uri.getScheme();
if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
defaultPort = DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
} else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
defaultPort = DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
} else {
throw new IllegalArgumentException("Unsupported scheme: " + scheme);
}
ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>(); ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>();
if (!HAUtil.isLogicalUri(conf, uri)) { if (!HAUtil.isLogicalUri(conf, uri)) {
InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(), InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(),
schemeDefaultPort); defaultPort);
ret.add(addr); ret.add(addr);
} else { } else {
Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil
.getHaNnHttpAddresses(conf); .getHaNnWebHdfsAddresses(conf, scheme);
for (Map<String, InetSocketAddress> addrs : addresses.values()) { for (Map<String, InetSocketAddress> addrs : addresses.values()) {
for (InetSocketAddress addr : addrs.values()) { for (InetSocketAddress addr : addrs.values()) {

View File

@ -801,6 +801,10 @@ public class NameNode implements NameNodeStatusMXBean {
return httpServer.getHttpAddress(); return httpServer.getHttpAddress();
} }
/**
* @return NameNode HTTPS address, used by the Web UI, image transfer,
* and HTTP-based file system clients like Hftp and WebHDFS
*/
public InetSocketAddress getHttpsAddress() { public InetSocketAddress getHttpsAddress() {
return httpServer.getHttpsAddress(); return httpServer.getHttpsAddress();
} }

View File

@ -66,6 +66,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.hdfs.web.ParamFilter; import org.apache.hadoop.hdfs.web.ParamFilter;
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam; import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam; import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
@ -96,6 +97,7 @@ import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
import org.apache.hadoop.hdfs.web.resources.UriFsPathParam; import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
import org.apache.hadoop.hdfs.web.resources.UserParam; import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.Credentials;
@ -210,7 +212,8 @@ public class NamenodeWebHdfsMethods {
final Credentials c = DelegationTokenSecretManager.createCredentials( final Credentials c = DelegationTokenSecretManager.createCredentials(
namenode, ugi, renewer != null? renewer: ugi.getShortUserName()); namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next(); final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
t.setKind(WebHdfsFileSystem.TOKEN_KIND); Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND : SWebHdfsFileSystem.TOKEN_KIND;
t.setKind(kind);
return t; return t;
} }

View File

@ -0,0 +1,66 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import java.io.IOException;
import java.security.GeneralSecurityException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.io.Text;
public class SWebHdfsFileSystem extends WebHdfsFileSystem {
public static final Text TOKEN_KIND = new Text("SWEBHDFS delegation");
public static final String SCHEME = "swebhdfs";
@Override
public String getScheme() {
return SCHEME;
}
@Override
protected String getTransportScheme() {
return "https";
}
@Override
protected synchronized void initializeTokenAspect() {
tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND);
}
@Override
protected void initializeConnectionFactory(Configuration conf)
throws IOException {
connectionFactory = new URLConnectionFactory(
URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT);
try {
connectionFactory.setConnConfigurator(URLConnectionFactory
.newSslConnConfigurator(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
conf));
} catch (GeneralSecurityException e) {
throw new IOException(e);
}
}
@Override
protected int getDefaultPort() {
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
}
}

View File

@ -58,7 +58,8 @@ final class TokenAspect<T extends FileSystem & Renewable> {
public boolean handleKind(Text kind) { public boolean handleKind(Text kind) {
return kind.equals(HftpFileSystem.TOKEN_KIND) return kind.equals(HftpFileSystem.TOKEN_KIND)
|| kind.equals(HsftpFileSystem.TOKEN_KIND) || kind.equals(HsftpFileSystem.TOKEN_KIND)
|| kind.equals(WebHdfsFileSystem.TOKEN_KIND); || kind.equals(WebHdfsFileSystem.TOKEN_KIND)
|| kind.equals(SWebHdfsFileSystem.TOKEN_KIND);
} }
@Override @Override
@ -83,6 +84,8 @@ final class TokenAspect<T extends FileSystem & Renewable> {
uri = DFSUtil.createUri(HsftpFileSystem.SCHEME, address); uri = DFSUtil.createUri(HsftpFileSystem.SCHEME, address);
} else if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) { } else if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) {
uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, address); uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, address);
} else if (kind.equals(SWebHdfsFileSystem.TOKEN_KIND)) {
uri = DFSUtil.createUri(SWebHdfsFileSystem.SCHEME, address);
} else { } else {
throw new IllegalArgumentException("Unsupported scheme"); throw new IllegalArgumentException("Unsupported scheme");
} }

View File

@ -56,7 +56,6 @@ import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.web.TokenAspect.DTSelecorByKind;
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam; import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam; import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam; import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
@ -99,7 +98,6 @@ import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
import org.mortbay.util.ajax.JSON; import org.mortbay.util.ajax.JSON;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets; import com.google.common.base.Charsets;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
@ -119,8 +117,7 @@ public class WebHdfsFileSystem extends FileSystem
/** Delegation token kind */ /** Delegation token kind */
public static final Text TOKEN_KIND = new Text("WEBHDFS delegation"); public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
protected TokenAspect<WebHdfsFileSystem> tokenAspect = new TokenAspect<WebHdfsFileSystem>( protected TokenAspect<WebHdfsFileSystem> tokenAspect;
this, TOKEN_KIND);
private UserGroupInformation ugi; private UserGroupInformation ugi;
private URI uri; private URI uri;
@ -141,17 +138,44 @@ public class WebHdfsFileSystem extends FileSystem
return SCHEME; return SCHEME;
} }
/**
* return the underlying transport protocol (http / https).
*/
protected String getTransportScheme() {
return "http";
}
/**
* Initialize tokenAspect. This function is intended to
* be overridden by SWebHdfsFileSystem.
*/
protected synchronized void initializeTokenAspect() {
tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND);
}
/**
* Initialize connectionFactory. This function is intended to
* be overridden by SWebHdfsFileSystem.
*/
protected void initializeConnectionFactory(Configuration conf)
throws IOException {
connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
}
@Override @Override
public synchronized void initialize(URI uri, Configuration conf public synchronized void initialize(URI uri, Configuration conf
) throws IOException { ) throws IOException {
super.initialize(uri, conf); super.initialize(uri, conf);
setConf(conf); setConf(conf);
initializeTokenAspect();
initializeConnectionFactory(conf);
ugi = UserGroupInformation.getCurrentUser(); ugi = UserGroupInformation.getCurrentUser();
try { try {
this.uri = new URI(uri.getScheme(), uri.getAuthority(), null, this.uri = new URI(uri.getScheme(), uri.getAuthority(), null,
null, null); null, null);
this.nnAddrs = DFSUtil.resolve(this.uri, getDefaultPort(), conf); this.nnAddrs = DFSUtil.resolveWebHdfsUri(this.uri, conf);
} catch (URISyntaxException e) { } catch (URISyntaxException e) {
throw new IllegalArgumentException(e); throw new IllegalArgumentException(e);
} }
@ -343,7 +367,7 @@ public class WebHdfsFileSystem extends FileSystem
*/ */
private URL getNamenodeURL(String path, String query) throws IOException { private URL getNamenodeURL(String path, String query) throws IOException {
InetSocketAddress nnAddr = getCurrentNNAddr(); InetSocketAddress nnAddr = getCurrentNNAddr();
final URL url = new URL("http", nnAddr.getHostName(), final URL url = new URL(getTransportScheme(), nnAddr.getHostName(),
nnAddr.getPort(), path + '?' + query); nnAddr.getPort(), path + '?' + query);
if (LOG.isTraceEnabled()) { if (LOG.isTraceEnabled()) {
LOG.trace("url=" + url); LOG.trace("url=" + url);
@ -841,8 +865,10 @@ public class WebHdfsFileSystem extends FileSystem
@Override @Override
public void close() throws IOException { public void close() throws IOException {
super.close(); super.close();
synchronized (this) {
tokenAspect.removeRenewAction(); tokenAspect.removeRenewAction();
} }
}
class OffsetUrlOpener extends ByteRangeInputStream.URLOpener { class OffsetUrlOpener extends ByteRangeInputStream.URLOpener {
OffsetUrlOpener(final URL url) { OffsetUrlOpener(final URL url) {

View File

@ -17,3 +17,4 @@ org.apache.hadoop.hdfs.DistributedFileSystem
org.apache.hadoop.hdfs.web.HftpFileSystem org.apache.hadoop.hdfs.web.HftpFileSystem
org.apache.hadoop.hdfs.web.HsftpFileSystem org.apache.hadoop.hdfs.web.HsftpFileSystem
org.apache.hadoop.hdfs.web.WebHdfsFileSystem org.apache.hadoop.hdfs.web.WebHdfsFileSystem
org.apache.hadoop.hdfs.web.SWebHdfsFileSystem

View File

@ -89,7 +89,7 @@ abstract public class TestSymlinkHdfs extends SymlinkBaseTest {
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.set(FsPermission.UMASK_LABEL, "000"); conf.set(FsPermission.UMASK_LABEL, "000");
cluster = new MiniDFSCluster.Builder(conf).build(); cluster = new MiniDFSCluster.Builder(conf).build();
webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf); webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
dfs = cluster.getFileSystem(); dfs = cluster.getFileSystem();
} }

View File

@ -73,6 +73,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
@ -834,8 +835,8 @@ public class TestDFSClientRetries {
try { try {
cluster.waitActive(); cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem(); final DistributedFileSystem dfs = cluster.getFileSystem();
final FileSystem fs = isWebHDFS? final FileSystem fs = isWebHDFS ? WebHdfsTestUtil.getWebHdfsFileSystem(
WebHdfsTestUtil.getWebHdfsFileSystem(conf): dfs; conf, WebHdfsFileSystem.SCHEME) : dfs;
final URI uri = dfs.getUri(); final URI uri = dfs.getUri();
assertTrue(HdfsUtils.isHealthy(uri)); assertTrue(HdfsUtils.isHealthy(uri));
@ -1039,7 +1040,7 @@ public class TestDFSClientRetries {
final UserGroupInformation ugi = UserGroupInformation.createUserForTesting( final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
username, new String[]{"supergroup"}); username, new String[]{"supergroup"});
return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf) return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME)
: DFSTestUtil.getFileSystemAs(ugi, conf); : DFSTestUtil.getFileSystemAs(ugi, conf);
} }

View File

@ -556,7 +556,7 @@ public class TestDFSUtil {
Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR); Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
Map<String, Map<String, InetSocketAddress>> map = Map<String, Map<String, InetSocketAddress>> map =
DFSUtil.getHaNnHttpAddresses(conf); DFSUtil.getHaNnWebHdfsAddresses(conf, "webhdfs");
assertEquals(NS1_NN1_ADDR, map.get("ns1").get("nn1").toString()); assertEquals(NS1_NN1_ADDR, map.get("ns1").get("nn1").toString());
assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString()); assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString());
@ -574,7 +574,7 @@ public class TestDFSUtil {
Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR); Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
URI uri = new URI("webhdfs://ns1"); URI uri = new URI("webhdfs://ns1");
assertTrue(HAUtil.isLogicalUri(conf, uri)); assertTrue(HAUtil.isLogicalUri(conf, uri));
InetSocketAddress[] addrs = DFSUtil.resolve(uri, DEFAULT_PORT, conf); InetSocketAddress[] addrs = DFSUtil.resolveWebHdfsUri(uri, conf);
assertArrayEquals(new InetSocketAddress[] { assertArrayEquals(new InetSocketAddress[] {
new InetSocketAddress(NS1_NN1_HOST, DEFAULT_PORT), new InetSocketAddress(NS1_NN1_HOST, DEFAULT_PORT),
new InetSocketAddress(NS1_NN2_HOST, DEFAULT_PORT), new InetSocketAddress(NS1_NN2_HOST, DEFAULT_PORT),

View File

@ -147,7 +147,7 @@ public class TestDelegationTokenForProxyUser {
public void testWebHdfsDoAs() throws Exception { public void testWebHdfsDoAs() throws Exception {
WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()"); WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()");
WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName()); WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName());
final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config); final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config, WebHdfsFileSystem.SCHEME);
final Path root = new Path("/"); final Path root = new Path("/");
cluster.getFileSystem().setPermission(root, new FsPermission((short)0777)); cluster.getFileSystem().setPermission(root, new FsPermission((short)0777));

View File

@ -163,7 +163,7 @@ public class TestAuditLogs {
setupAuditLogs(); setupAuditLogs();
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf); WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
InputStream istream = webfs.open(file); InputStream istream = webfs.open(file);
int val = istream.read(); int val = istream.read();
istream.close(); istream.close();
@ -182,7 +182,7 @@ public class TestAuditLogs {
setupAuditLogs(); setupAuditLogs();
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf); WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
FileStatus st = webfs.getFileStatus(file); FileStatus st = webfs.getFileStatus(file);
verifyAuditLogs(true); verifyAuditLogs(true);
@ -222,7 +222,7 @@ public class TestAuditLogs {
setupAuditLogs(); setupAuditLogs();
try { try {
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf); WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
InputStream istream = webfs.open(file); InputStream istream = webfs.open(file);
int val = istream.read(); int val = istream.read();
fail("open+read must not succeed, got " + val); fail("open+read must not succeed, got " + val);

View File

@ -65,6 +65,7 @@ public class TestHttpsFileSystem {
cluster.getFileSystem().create(new Path("/test")).close(); cluster.getFileSystem().create(new Path("/test")).close();
InetSocketAddress addr = cluster.getNameNode().getHttpsAddress(); InetSocketAddress addr = cluster.getNameNode().getHttpsAddress();
nnAddr = addr.getHostName() + ":" + addr.getPort(); nnAddr = addr.getHostName() + ":" + addr.getPort();
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr);
} }
@AfterClass @AfterClass
@ -80,4 +81,15 @@ public class TestHttpsFileSystem {
Assert.assertTrue(fs.exists(new Path("/test"))); Assert.assertTrue(fs.exists(new Path("/test")));
fs.close(); fs.close();
} }
@Test
public void testSWebHdfsFileSystem() throws Exception {
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, "swebhdfs");
final Path f = new Path("/testswebhdfs");
FSDataOutputStream os = fs.create(f);
os.write(23);
os.close();
Assert.assertTrue(fs.exists(f));
fs.close();
}
} }

View File

@ -101,7 +101,7 @@ public class TestWebHDFS {
try { try {
cluster.waitActive(); cluster.waitActive();
final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf); final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
final Path dir = new Path("/test/largeFile"); final Path dir = new Path("/test/largeFile");
Assert.assertTrue(fs.mkdirs(dir)); Assert.assertTrue(fs.mkdirs(dir));
@ -229,8 +229,8 @@ public class TestWebHDFS {
new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try { try {
cluster.waitActive(); cluster.waitActive();
WebHdfsTestUtil.getWebHdfsFileSystem(conf).setPermission( WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME)
new Path("/"), .setPermission(new Path("/"),
new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
// trick the NN into not believing it's not the superuser so we can // trick the NN into not believing it's not the superuser so we can
@ -243,7 +243,8 @@ public class TestWebHDFS {
.doAs(new PrivilegedExceptionAction<Void>() { .doAs(new PrivilegedExceptionAction<Void>() {
@Override @Override
public Void run() throws IOException, URISyntaxException { public Void run() throws IOException, URISyntaxException {
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf); FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsFileSystem.SCHEME);
Path d = new Path("/my-dir"); Path d = new Path("/my-dir");
Assert.assertTrue(fs.mkdirs(d)); Assert.assertTrue(fs.mkdirs(d));
for (int i=0; i < listLimit*3; i++) { for (int i=0; i < listLimit*3; i++) {

View File

@ -82,7 +82,7 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
final UserGroupInformation current = UserGroupInformation.getCurrentUser(); final UserGroupInformation current = UserGroupInformation.getCurrentUser();
ugi = UserGroupInformation.createUserForTesting( ugi = UserGroupInformation.createUserForTesting(
current.getShortUserName() + "x", new String[]{"user"}); current.getShortUserName() + "x", new String[]{"user"});
fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf); fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME);
defaultWorkingDirectory = fs.getWorkingDirectory().toUri().getPath(); defaultWorkingDirectory = fs.getWorkingDirectory().toUri().getPath();
} }

View File

@ -18,35 +18,32 @@
package org.apache.hadoop.hdfs.web; package org.apache.hadoop.hdfs.web;
import static org.junit.Assert.*; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.io.BufferedReader; import java.io.BufferedReader;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.InputStreamReader; import java.io.InputStreamReader;
import java.io.OutputStream; import java.io.OutputStream;
import java.net.InetAddress;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.ServerSocket; import java.net.ServerSocket;
import java.net.Socket; import java.net.Socket;
import java.net.SocketAddress;
import java.net.SocketTimeoutException; import java.net.SocketTimeoutException;
import java.nio.channels.SocketChannel; import java.nio.channels.SocketChannel;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/** /**
* This test suite checks that WebHdfsFileSystem sets connection timeouts and * This test suite checks that WebHdfsFileSystem sets connection timeouts and
@ -77,7 +74,7 @@ public class TestWebHdfsTimeouts {
serverSocket = new ServerSocket(0, CONNECTION_BACKLOG); serverSocket = new ServerSocket(0, CONNECTION_BACKLOG);
nnHttpAddress = new InetSocketAddress("localhost", serverSocket.getLocalPort()); nnHttpAddress = new InetSocketAddress("localhost", serverSocket.getLocalPort());
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:" + serverSocket.getLocalPort()); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:" + serverSocket.getLocalPort());
fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf); fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
fs.connectionFactory = connectionFactory; fs.connectionFactory = connectionFactory;
clients = new ArrayList<SocketChannel>(); clients = new ArrayList<SocketChannel>();
serverThread = null; serverThread = null;

View File

@ -46,20 +46,36 @@ public class WebHdfsTestUtil {
return conf; return conf;
} }
public static WebHdfsFileSystem getWebHdfsFileSystem(final Configuration conf public static WebHdfsFileSystem getWebHdfsFileSystem(
) throws IOException, URISyntaxException { final Configuration conf, String scheme) throws IOException,
final String uri = WebHdfsFileSystem.SCHEME + "://" URISyntaxException {
final String uri;
if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
uri = WebHdfsFileSystem.SCHEME + "://"
+ conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
} else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
uri = SWebHdfsFileSystem.SCHEME + "://"
+ conf.get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
} else {
throw new IllegalArgumentException("unknown scheme:" + scheme);
}
return (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf); return (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
} }
public static WebHdfsFileSystem getWebHdfsFileSystemAs( public static WebHdfsFileSystem getWebHdfsFileSystemAs(
final UserGroupInformation ugi, final Configuration conf final UserGroupInformation ugi, final Configuration conf
) throws IOException, InterruptedException {
return getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME);
}
public static WebHdfsFileSystem getWebHdfsFileSystemAs(
final UserGroupInformation ugi, final Configuration conf, String scheme
) throws IOException, InterruptedException { ) throws IOException, InterruptedException {
return ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() { return ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
@Override @Override
public WebHdfsFileSystem run() throws Exception { public WebHdfsFileSystem run() throws Exception {
return getWebHdfsFileSystem(conf); return getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
} }
}); });
} }