svn merge -c 1208140 from trunk for HDFS-2604.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1208142 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
f9e7cc28df
commit
c74e33b709
|
@ -39,6 +39,9 @@ Release 0.23.1 - UNRELEASED
|
||||||
|
|
||||||
HDFS-2587. Add apt doc for WebHDFS REST API. (szetszwo)
|
HDFS-2587. Add apt doc for WebHDFS REST API. (szetszwo)
|
||||||
|
|
||||||
|
HDFS-2604. Add a log message to show if WebHDFS is enabled and a
|
||||||
|
configuration section in the forrest doc. (szetszwo)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-2130. Switch default checksum to CRC32C. (todd)
|
HDFS-2130. Switch default checksum to CRC32C. (todd)
|
||||||
|
|
|
@ -138,6 +138,28 @@
|
||||||
http://<HOST>:<HTTP_PORT>/webhdfs/v1/<PATH>?op=...
|
http://<HOST>:<HTTP_PORT>/webhdfs/v1/<PATH>?op=...
|
||||||
</source>
|
</source>
|
||||||
</section>
|
</section>
|
||||||
|
<!-- ***************************************************************************** -->
|
||||||
|
<section>
|
||||||
|
<title>HDFS Configuration Options</title>
|
||||||
|
<p>
|
||||||
|
Below are the HDFS configuration options for WebHDFS.
|
||||||
|
</p>
|
||||||
|
<table>
|
||||||
|
<tr><th>Property Name</th><th>Description</th></tr>
|
||||||
|
<tr><td><code>dfs.webhdfs.enabled</code></td>
|
||||||
|
<td>Enable/disable WebHDFS in Namenodes and Datanodes
|
||||||
|
</td></tr>
|
||||||
|
<tr><td><code>dfs.web.authentication.kerberos.principal</code></td>
|
||||||
|
<td>The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
|
||||||
|
The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
|
||||||
|
HTTP SPENGO specification.
|
||||||
|
</td></tr>
|
||||||
|
<tr><td><code>dfs.web.authentication.kerberos.keytab</code></td>
|
||||||
|
<td>The Kerberos keytab file with the credentials for the
|
||||||
|
HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
|
||||||
|
</td></tr>
|
||||||
|
</table>
|
||||||
|
</section>
|
||||||
</section>
|
</section>
|
||||||
<!-- ***************************************************************************** -->
|
<!-- ***************************************************************************** -->
|
||||||
<section id="Authentication">
|
<section id="Authentication">
|
||||||
|
|
|
@ -35,6 +35,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_K
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
|
||||||
|
@ -48,8 +49,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT;
|
|
||||||
|
|
||||||
import java.io.BufferedOutputStream;
|
import java.io.BufferedOutputStream;
|
||||||
import java.io.ByteArrayInputStream;
|
import java.io.ByteArrayInputStream;
|
||||||
|
@ -93,6 +92,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
|
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
@ -130,7 +130,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
|
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||||
import org.apache.hadoop.hdfs.web.resources.Param;
|
import org.apache.hadoop.hdfs.web.resources.Param;
|
||||||
import org.apache.hadoop.http.HttpServer;
|
import org.apache.hadoop.http.HttpServer;
|
||||||
|
@ -493,7 +492,7 @@ public class DataNode extends Configured
|
||||||
this.infoServer.addServlet(null, "/blockScannerReport",
|
this.infoServer.addServlet(null, "/blockScannerReport",
|
||||||
DataBlockScanner.Servlet.class);
|
DataBlockScanner.Servlet.class);
|
||||||
|
|
||||||
if (conf.getBoolean(DFS_WEBHDFS_ENABLED_KEY, DFS_WEBHDFS_ENABLED_DEFAULT)) {
|
if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
|
||||||
infoServer.addJerseyResourcePackage(DatanodeWebHdfsMethods.class
|
infoServer.addJerseyResourcePackage(DatanodeWebHdfsMethods.class
|
||||||
.getPackage().getName() + ";" + Param.class.getPackage().getName(),
|
.getPackage().getName() + ";" + Param.class.getPackage().getName(),
|
||||||
WebHdfsFileSystem.PATH_PREFIX + "/*");
|
WebHdfsFileSystem.PATH_PREFIX + "/*");
|
||||||
|
|
|
@ -104,8 +104,7 @@ public class NameNodeHttpServer {
|
||||||
infoPort == 0, conf,
|
infoPort == 0, conf,
|
||||||
new AccessControlList(conf.get(DFSConfigKeys.DFS_ADMIN, " "))) {
|
new AccessControlList(conf.get(DFSConfigKeys.DFS_ADMIN, " "))) {
|
||||||
{
|
{
|
||||||
if (conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
|
if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
|
||||||
DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT)) {
|
|
||||||
//add SPNEGO authentication filter for webhdfs
|
//add SPNEGO authentication filter for webhdfs
|
||||||
final String name = "SPNEGO";
|
final String name = "SPNEGO";
|
||||||
final String classname = AuthFilter.class.getName();
|
final String classname = AuthFilter.class.getName();
|
||||||
|
|
|
@ -131,6 +131,14 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
DT_RENEWER.addRenewAction(webhdfs);
|
DT_RENEWER.addRenewAction(webhdfs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Is WebHDFS enabled in conf? */
|
||||||
|
public static boolean isEnabled(final Configuration conf, final Log log) {
|
||||||
|
final boolean b = conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
|
||||||
|
DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT);
|
||||||
|
log.info(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY + " = " + b);
|
||||||
|
return b;
|
||||||
|
}
|
||||||
|
|
||||||
private final UserGroupInformation ugi;
|
private final UserGroupInformation ugi;
|
||||||
private InetSocketAddress nnAddr;
|
private InetSocketAddress nnAddr;
|
||||||
private Token<?> delegationToken;
|
private Token<?> delegationToken;
|
||||||
|
|
|
@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
|
||||||
import org.apache.hadoop.hdfs.web.resources.DoAsParam;
|
import org.apache.hadoop.hdfs.web.resources.DoAsParam;
|
||||||
import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
|
import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
|
||||||
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
|
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
|
||||||
|
import org.apache.hadoop.hdfs.web.resources.PostOpParam;
|
||||||
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
|
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
|
||||||
import org.apache.hadoop.security.TestDoAsEffectiveUser;
|
import org.apache.hadoop.security.TestDoAsEffectiveUser;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
@ -198,9 +199,9 @@ public class TestDelegationTokenForProxyUser {
|
||||||
Assert.assertEquals("/user/" + PROXY_USER, responsePath);
|
Assert.assertEquals("/user/" + PROXY_USER, responsePath);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
final Path f = new Path("/testWebHdfsDoAs/a.txt");
|
||||||
{
|
{
|
||||||
//test create file with doAs
|
//test create file with doAs
|
||||||
final Path f = new Path("/testWebHdfsDoAs/a.txt");
|
|
||||||
final PutOpParam.Op op = PutOpParam.Op.CREATE;
|
final PutOpParam.Op op = PutOpParam.Op.CREATE;
|
||||||
final URL url = WebHdfsTestUtil.toUrl(webhdfs, op, f, new DoAsParam(PROXY_USER));
|
final URL url = WebHdfsTestUtil.toUrl(webhdfs, op, f, new DoAsParam(PROXY_USER));
|
||||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||||
|
@ -213,5 +214,21 @@ public class TestDelegationTokenForProxyUser {
|
||||||
WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner());
|
WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner());
|
||||||
Assert.assertEquals(PROXY_USER, status.getOwner());
|
Assert.assertEquals(PROXY_USER, status.getOwner());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
//test append file with doAs
|
||||||
|
final PostOpParam.Op op = PostOpParam.Op.APPEND;
|
||||||
|
final URL url = WebHdfsTestUtil.toUrl(webhdfs, op, f, new DoAsParam(PROXY_USER));
|
||||||
|
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||||
|
conn = WebHdfsTestUtil.twoStepWrite(conn, op);
|
||||||
|
final FSDataOutputStream out = WebHdfsTestUtil.write(webhdfs, op, conn, 4096);
|
||||||
|
out.write("\nHello again!".getBytes());
|
||||||
|
out.close();
|
||||||
|
|
||||||
|
final FileStatus status = webhdfs.getFileStatus(f);
|
||||||
|
WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner());
|
||||||
|
WebHdfsTestUtil.LOG.info("status.getLen() =" + status.getLen());
|
||||||
|
Assert.assertEquals(PROXY_USER, status.getOwner());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,6 +33,7 @@ public class TestJsonUtil {
|
||||||
return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
|
return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
|
||||||
f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
|
f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
|
||||||
f.getPermission(), f.getOwner(), f.getGroup(),
|
f.getPermission(), f.getOwner(), f.getGroup(),
|
||||||
|
f.isSymlink() ? new Path(f.getSymlink()) : null,
|
||||||
new Path(f.getFullName(parent)));
|
new Path(f.getFullName(parent)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue