HDFS-3987. Merge change r1543962 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1543967 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
0bf221368f
commit
7e10509fe4
|
@ -196,7 +196,13 @@ public class KerberosAuthenticator implements Authenticator {
|
|||
doSpnegoSequence(token);
|
||||
} else {
|
||||
LOG.debug("Using fallback authenticator sequence.");
|
||||
getFallBackAuthenticator().authenticate(url, token);
|
||||
Authenticator auth = getFallBackAuthenticator();
|
||||
// Make sure that the fall back authenticator have the same
|
||||
// ConnectionConfigurator, since the method might be overridden.
|
||||
// Otherwise the fall back authenticator might not have the information
|
||||
// to make the connection (e.g., SSL certificates)
|
||||
auth.setConnectionConfigurator(connConfigurator);
|
||||
auth.authenticate(url, token);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -471,7 +471,9 @@ public class HttpServer implements FilterContainer {
|
|||
if (conf.getBoolean(
|
||||
CommonConfigurationKeys.HADOOP_JETTY_LOGS_SERVE_ALIASES,
|
||||
CommonConfigurationKeys.DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES)) {
|
||||
logContext.getInitParams().put(
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, String> params = logContext.getInitParams();
|
||||
params.put(
|
||||
"org.mortbay.jetty.servlet.Default.aliases", "true");
|
||||
}
|
||||
logContext.setDisplayName("logs");
|
||||
|
|
|
@ -23,6 +23,8 @@ Release 2.3.0 - UNRELEASED
|
|||
HDFS-5382. Implement the UI of browsing filesystems in HTML 5 page. (Haohui
|
||||
Mai via jing9)
|
||||
|
||||
HDFS-3987. Support webhdfs over HTTPS. (Haohui Mai via jing9)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)
|
||||
|
|
|
@ -76,9 +76,8 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
|||
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.io.retry.RetryPolicies;
|
||||
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||
import org.apache.hadoop.io.retry.RetryProxy;
|
||||
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
@ -605,12 +604,19 @@ public class DFSUtil {
|
|||
* Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
|
||||
* the configuration.
|
||||
*
|
||||
* @param conf configuration
|
||||
* @return list of InetSocketAddresses
|
||||
*/
|
||||
public static Map<String, Map<String, InetSocketAddress>> getHaNnHttpAddresses(
|
||||
Configuration conf) {
|
||||
return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
|
||||
public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses(
|
||||
Configuration conf, String scheme) {
|
||||
if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
|
||||
return getAddresses(conf, null,
|
||||
DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
|
||||
} else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
|
||||
return getAddresses(conf, null,
|
||||
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unsupported scheme: " + scheme);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -619,18 +625,28 @@ public class DFSUtil {
|
|||
* cluster, the resolver further resolves the logical name (i.e., the authority
|
||||
* in the URL) into real namenode addresses.
|
||||
*/
|
||||
public static InetSocketAddress[] resolve(URI uri, int schemeDefaultPort,
|
||||
Configuration conf) throws IOException {
|
||||
public static InetSocketAddress[] resolveWebHdfsUri(URI uri, Configuration conf)
|
||||
throws IOException {
|
||||
int defaultPort;
|
||||
String scheme = uri.getScheme();
|
||||
if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
|
||||
defaultPort = DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
|
||||
} else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
|
||||
defaultPort = DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unsupported scheme: " + scheme);
|
||||
}
|
||||
|
||||
ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>();
|
||||
|
||||
if (!HAUtil.isLogicalUri(conf, uri)) {
|
||||
InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(),
|
||||
schemeDefaultPort);
|
||||
defaultPort);
|
||||
ret.add(addr);
|
||||
|
||||
} else {
|
||||
Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil
|
||||
.getHaNnHttpAddresses(conf);
|
||||
.getHaNnWebHdfsAddresses(conf, scheme);
|
||||
|
||||
for (Map<String, InetSocketAddress> addrs : addresses.values()) {
|
||||
for (InetSocketAddress addr : addrs.values()) {
|
||||
|
@ -1391,4 +1407,4 @@ public class DFSUtil {
|
|||
return (value == null || value.isEmpty()) ?
|
||||
defaultKey : DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -801,6 +801,10 @@ public class NameNode implements NameNodeStatusMXBean {
|
|||
return httpServer.getHttpAddress();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return NameNode HTTPS address, used by the Web UI, image transfer,
|
||||
* and HTTP-based file system clients like Hftp and WebHDFS
|
||||
*/
|
||||
public InetSocketAddress getHttpsAddress() {
|
||||
return httpServer.getHttpsAddress();
|
||||
}
|
||||
|
|
|
@ -66,6 +66,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.hdfs.web.JsonUtil;
|
||||
import org.apache.hadoop.hdfs.web.ParamFilter;
|
||||
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
|
||||
|
@ -96,6 +97,7 @@ import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
|
|||
import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.UserParam;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.net.NodeBase;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
|
@ -210,7 +212,8 @@ public class NamenodeWebHdfsMethods {
|
|||
final Credentials c = DelegationTokenSecretManager.createCredentials(
|
||||
namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
|
||||
final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
|
||||
t.setKind(WebHdfsFileSystem.TOKEN_KIND);
|
||||
Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND : SWebHdfsFileSystem.TOKEN_KIND;
|
||||
t.setKind(kind);
|
||||
return t;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.web;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.security.GeneralSecurityException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
||||
public class SWebHdfsFileSystem extends WebHdfsFileSystem {
|
||||
|
||||
public static final Text TOKEN_KIND = new Text("SWEBHDFS delegation");
|
||||
public static final String SCHEME = "swebhdfs";
|
||||
|
||||
@Override
|
||||
public String getScheme() {
|
||||
return SCHEME;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getTransportScheme() {
|
||||
return "https";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void initializeTokenAspect() {
|
||||
tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void initializeConnectionFactory(Configuration conf)
|
||||
throws IOException {
|
||||
connectionFactory = new URLConnectionFactory(
|
||||
URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT);
|
||||
try {
|
||||
connectionFactory.setConnConfigurator(URLConnectionFactory
|
||||
.newSslConnConfigurator(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
|
||||
conf));
|
||||
} catch (GeneralSecurityException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int getDefaultPort() {
|
||||
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
|
||||
}
|
||||
}
|
|
@ -58,7 +58,8 @@ final class TokenAspect<T extends FileSystem & Renewable> {
|
|||
public boolean handleKind(Text kind) {
|
||||
return kind.equals(HftpFileSystem.TOKEN_KIND)
|
||||
|| kind.equals(HsftpFileSystem.TOKEN_KIND)
|
||||
|| kind.equals(WebHdfsFileSystem.TOKEN_KIND);
|
||||
|| kind.equals(WebHdfsFileSystem.TOKEN_KIND)
|
||||
|| kind.equals(SWebHdfsFileSystem.TOKEN_KIND);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -83,6 +84,8 @@ final class TokenAspect<T extends FileSystem & Renewable> {
|
|||
uri = DFSUtil.createUri(HsftpFileSystem.SCHEME, address);
|
||||
} else if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) {
|
||||
uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, address);
|
||||
} else if (kind.equals(SWebHdfsFileSystem.TOKEN_KIND)) {
|
||||
uri = DFSUtil.createUri(SWebHdfsFileSystem.SCHEME, address);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unsupported scheme");
|
||||
}
|
||||
|
|
|
@ -56,7 +56,6 @@ import org.apache.hadoop.hdfs.HAUtil;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||
import org.apache.hadoop.hdfs.web.TokenAspect.DTSelecorByKind;
|
||||
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
|
||||
|
@ -99,7 +98,6 @@ import org.apache.hadoop.security.token.TokenIdentifier;
|
|||
import org.apache.hadoop.util.Progressable;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Charsets;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
|
@ -119,8 +117,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
|
||||
/** Delegation token kind */
|
||||
public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
|
||||
protected TokenAspect<WebHdfsFileSystem> tokenAspect = new TokenAspect<WebHdfsFileSystem>(
|
||||
this, TOKEN_KIND);
|
||||
protected TokenAspect<WebHdfsFileSystem> tokenAspect;
|
||||
|
||||
private UserGroupInformation ugi;
|
||||
private URI uri;
|
||||
|
@ -141,17 +138,44 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
return SCHEME;
|
||||
}
|
||||
|
||||
/**
|
||||
* return the underlying transport protocol (http / https).
|
||||
*/
|
||||
protected String getTransportScheme() {
|
||||
return "http";
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize tokenAspect. This function is intended to
|
||||
* be overridden by SWebHdfsFileSystem.
|
||||
*/
|
||||
protected synchronized void initializeTokenAspect() {
|
||||
tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize connectionFactory. This function is intended to
|
||||
* be overridden by SWebHdfsFileSystem.
|
||||
*/
|
||||
protected void initializeConnectionFactory(Configuration conf)
|
||||
throws IOException {
|
||||
connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void initialize(URI uri, Configuration conf
|
||||
) throws IOException {
|
||||
super.initialize(uri, conf);
|
||||
setConf(conf);
|
||||
initializeTokenAspect();
|
||||
initializeConnectionFactory(conf);
|
||||
|
||||
ugi = UserGroupInformation.getCurrentUser();
|
||||
|
||||
try {
|
||||
this.uri = new URI(uri.getScheme(), uri.getAuthority(), null,
|
||||
null, null);
|
||||
this.nnAddrs = DFSUtil.resolve(this.uri, getDefaultPort(), conf);
|
||||
this.nnAddrs = DFSUtil.resolveWebHdfsUri(this.uri, conf);
|
||||
} catch (URISyntaxException e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
|
@ -343,7 +367,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
*/
|
||||
private URL getNamenodeURL(String path, String query) throws IOException {
|
||||
InetSocketAddress nnAddr = getCurrentNNAddr();
|
||||
final URL url = new URL("http", nnAddr.getHostName(),
|
||||
final URL url = new URL(getTransportScheme(), nnAddr.getHostName(),
|
||||
nnAddr.getPort(), path + '?' + query);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("url=" + url);
|
||||
|
@ -841,7 +865,9 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
@Override
|
||||
public void close() throws IOException {
|
||||
super.close();
|
||||
tokenAspect.removeRenewAction();
|
||||
synchronized (this) {
|
||||
tokenAspect.removeRenewAction();
|
||||
}
|
||||
}
|
||||
|
||||
class OffsetUrlOpener extends ByteRangeInputStream.URLOpener {
|
||||
|
|
|
@ -17,3 +17,4 @@ org.apache.hadoop.hdfs.DistributedFileSystem
|
|||
org.apache.hadoop.hdfs.web.HftpFileSystem
|
||||
org.apache.hadoop.hdfs.web.HsftpFileSystem
|
||||
org.apache.hadoop.hdfs.web.WebHdfsFileSystem
|
||||
org.apache.hadoop.hdfs.web.SWebHdfsFileSystem
|
||||
|
|
|
@ -89,7 +89,7 @@ abstract public class TestSymlinkHdfs extends SymlinkBaseTest {
|
|||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
conf.set(FsPermission.UMASK_LABEL, "000");
|
||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
|
||||
webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
|
||||
dfs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
|
|
|
@ -73,6 +73,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
|||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
@ -834,8 +835,8 @@ public class TestDFSClientRetries {
|
|||
try {
|
||||
cluster.waitActive();
|
||||
final DistributedFileSystem dfs = cluster.getFileSystem();
|
||||
final FileSystem fs = isWebHDFS?
|
||||
WebHdfsTestUtil.getWebHdfsFileSystem(conf): dfs;
|
||||
final FileSystem fs = isWebHDFS ? WebHdfsTestUtil.getWebHdfsFileSystem(
|
||||
conf, WebHdfsFileSystem.SCHEME) : dfs;
|
||||
final URI uri = dfs.getUri();
|
||||
assertTrue(HdfsUtils.isHealthy(uri));
|
||||
|
||||
|
@ -1039,7 +1040,7 @@ public class TestDFSClientRetries {
|
|||
final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
|
||||
username, new String[]{"supergroup"});
|
||||
|
||||
return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf)
|
||||
return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME)
|
||||
: DFSTestUtil.getFileSystemAs(ugi, conf);
|
||||
}
|
||||
|
||||
|
|
|
@ -556,7 +556,7 @@ public class TestDFSUtil {
|
|||
Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
|
||||
|
||||
Map<String, Map<String, InetSocketAddress>> map =
|
||||
DFSUtil.getHaNnHttpAddresses(conf);
|
||||
DFSUtil.getHaNnWebHdfsAddresses(conf, "webhdfs");
|
||||
|
||||
assertEquals(NS1_NN1_ADDR, map.get("ns1").get("nn1").toString());
|
||||
assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString());
|
||||
|
@ -574,7 +574,7 @@ public class TestDFSUtil {
|
|||
Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
|
||||
URI uri = new URI("webhdfs://ns1");
|
||||
assertTrue(HAUtil.isLogicalUri(conf, uri));
|
||||
InetSocketAddress[] addrs = DFSUtil.resolve(uri, DEFAULT_PORT, conf);
|
||||
InetSocketAddress[] addrs = DFSUtil.resolveWebHdfsUri(uri, conf);
|
||||
assertArrayEquals(new InetSocketAddress[] {
|
||||
new InetSocketAddress(NS1_NN1_HOST, DEFAULT_PORT),
|
||||
new InetSocketAddress(NS1_NN2_HOST, DEFAULT_PORT),
|
||||
|
|
|
@ -147,7 +147,7 @@ public class TestDelegationTokenForProxyUser {
|
|||
public void testWebHdfsDoAs() throws Exception {
|
||||
WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()");
|
||||
WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName());
|
||||
final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config);
|
||||
final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config, WebHdfsFileSystem.SCHEME);
|
||||
|
||||
final Path root = new Path("/");
|
||||
cluster.getFileSystem().setPermission(root, new FsPermission((short)0777));
|
||||
|
|
|
@ -163,7 +163,7 @@ public class TestAuditLogs {
|
|||
|
||||
setupAuditLogs();
|
||||
|
||||
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf);
|
||||
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
|
||||
InputStream istream = webfs.open(file);
|
||||
int val = istream.read();
|
||||
istream.close();
|
||||
|
@ -182,7 +182,7 @@ public class TestAuditLogs {
|
|||
|
||||
setupAuditLogs();
|
||||
|
||||
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf);
|
||||
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
|
||||
FileStatus st = webfs.getFileStatus(file);
|
||||
|
||||
verifyAuditLogs(true);
|
||||
|
@ -222,7 +222,7 @@ public class TestAuditLogs {
|
|||
|
||||
setupAuditLogs();
|
||||
try {
|
||||
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf);
|
||||
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
|
||||
InputStream istream = webfs.open(file);
|
||||
int val = istream.read();
|
||||
fail("open+read must not succeed, got " + val);
|
||||
|
|
|
@ -65,6 +65,7 @@ public class TestHttpsFileSystem {
|
|||
cluster.getFileSystem().create(new Path("/test")).close();
|
||||
InetSocketAddress addr = cluster.getNameNode().getHttpsAddress();
|
||||
nnAddr = addr.getHostName() + ":" + addr.getPort();
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
@ -80,4 +81,15 @@ public class TestHttpsFileSystem {
|
|||
Assert.assertTrue(fs.exists(new Path("/test")));
|
||||
fs.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSWebHdfsFileSystem() throws Exception {
|
||||
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, "swebhdfs");
|
||||
final Path f = new Path("/testswebhdfs");
|
||||
FSDataOutputStream os = fs.create(f);
|
||||
os.write(23);
|
||||
os.close();
|
||||
Assert.assertTrue(fs.exists(f));
|
||||
fs.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -101,7 +101,7 @@ public class TestWebHDFS {
|
|||
try {
|
||||
cluster.waitActive();
|
||||
|
||||
final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
|
||||
final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
|
||||
final Path dir = new Path("/test/largeFile");
|
||||
Assert.assertTrue(fs.mkdirs(dir));
|
||||
|
||||
|
@ -229,9 +229,9 @@ public class TestWebHDFS {
|
|||
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||
try {
|
||||
cluster.waitActive();
|
||||
WebHdfsTestUtil.getWebHdfsFileSystem(conf).setPermission(
|
||||
new Path("/"),
|
||||
new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
|
||||
WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME)
|
||||
.setPermission(new Path("/"),
|
||||
new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
|
||||
|
||||
// trick the NN into not believing it's not the superuser so we can
|
||||
// tell if the correct user is used by listStatus
|
||||
|
@ -243,8 +243,9 @@ public class TestWebHDFS {
|
|||
.doAs(new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws IOException, URISyntaxException {
|
||||
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
|
||||
Path d = new Path("/my-dir");
|
||||
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
|
||||
WebHdfsFileSystem.SCHEME);
|
||||
Path d = new Path("/my-dir");
|
||||
Assert.assertTrue(fs.mkdirs(d));
|
||||
for (int i=0; i < listLimit*3; i++) {
|
||||
Path p = new Path(d, "file-"+i);
|
||||
|
|
|
@ -82,7 +82,7 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
|
|||
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
|
||||
ugi = UserGroupInformation.createUserForTesting(
|
||||
current.getShortUserName() + "x", new String[]{"user"});
|
||||
fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf);
|
||||
fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME);
|
||||
defaultWorkingDirectory = fs.getWorkingDirectory().toUri().getPath();
|
||||
}
|
||||
|
||||
|
|
|
@ -18,35 +18,32 @@
|
|||
|
||||
package org.apache.hadoop.hdfs.web;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketAddress;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.nio.channels.SocketChannel;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* This test suite checks that WebHdfsFileSystem sets connection timeouts and
|
||||
|
@ -77,7 +74,7 @@ public class TestWebHdfsTimeouts {
|
|||
serverSocket = new ServerSocket(0, CONNECTION_BACKLOG);
|
||||
nnHttpAddress = new InetSocketAddress("localhost", serverSocket.getLocalPort());
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:" + serverSocket.getLocalPort());
|
||||
fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
|
||||
fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
|
||||
fs.connectionFactory = connectionFactory;
|
||||
clients = new ArrayList<SocketChannel>();
|
||||
serverThread = null;
|
||||
|
|
|
@ -46,20 +46,36 @@ public class WebHdfsTestUtil {
|
|||
return conf;
|
||||
}
|
||||
|
||||
public static WebHdfsFileSystem getWebHdfsFileSystem(final Configuration conf
|
||||
) throws IOException, URISyntaxException {
|
||||
final String uri = WebHdfsFileSystem.SCHEME + "://"
|
||||
+ conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
|
||||
public static WebHdfsFileSystem getWebHdfsFileSystem(
|
||||
final Configuration conf, String scheme) throws IOException,
|
||||
URISyntaxException {
|
||||
final String uri;
|
||||
|
||||
if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
|
||||
uri = WebHdfsFileSystem.SCHEME + "://"
|
||||
+ conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
|
||||
} else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
|
||||
uri = SWebHdfsFileSystem.SCHEME + "://"
|
||||
+ conf.get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
|
||||
} else {
|
||||
throw new IllegalArgumentException("unknown scheme:" + scheme);
|
||||
}
|
||||
return (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
|
||||
}
|
||||
|
||||
public static WebHdfsFileSystem getWebHdfsFileSystemAs(
|
||||
final UserGroupInformation ugi, final Configuration conf
|
||||
final UserGroupInformation ugi, final Configuration conf
|
||||
) throws IOException, InterruptedException {
|
||||
return getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME);
|
||||
}
|
||||
|
||||
public static WebHdfsFileSystem getWebHdfsFileSystemAs(
|
||||
final UserGroupInformation ugi, final Configuration conf, String scheme
|
||||
) throws IOException, InterruptedException {
|
||||
return ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
|
||||
@Override
|
||||
public WebHdfsFileSystem run() throws Exception {
|
||||
return getWebHdfsFileSystem(conf);
|
||||
return getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue