HDFS-8052. Move WebHdfsFileSystem into hadoop-hdfs-client. Contributed by Haohui Mai.
This commit is contained in:
parent
8f6053ae51
commit
b64bb2b9b4
|
@ -60,7 +60,7 @@ public class RetryUtils {
|
||||||
boolean defaultRetryPolicyEnabled,
|
boolean defaultRetryPolicyEnabled,
|
||||||
String retryPolicySpecKey,
|
String retryPolicySpecKey,
|
||||||
String defaultRetryPolicySpec,
|
String defaultRetryPolicySpec,
|
||||||
final Class<? extends Exception> remoteExceptionToRetry
|
final String remoteExceptionToRetry
|
||||||
) {
|
) {
|
||||||
|
|
||||||
final RetryPolicy multipleLinearRandomRetry =
|
final RetryPolicy multipleLinearRandomRetry =
|
||||||
|
@ -94,7 +94,7 @@ public class RetryUtils {
|
||||||
final RetryPolicy p;
|
final RetryPolicy p;
|
||||||
if (e instanceof RemoteException) {
|
if (e instanceof RemoteException) {
|
||||||
final RemoteException re = (RemoteException)e;
|
final RemoteException re = (RemoteException)e;
|
||||||
p = remoteExceptionToRetry.getName().equals(re.getClassName())?
|
p = remoteExceptionToRetry.equals(re.getClassName())?
|
||||||
multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL;
|
multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL;
|
||||||
} else if (e instanceof IOException || e instanceof ServiceException) {
|
} else if (e instanceof IOException || e instanceof ServiceException) {
|
||||||
p = multipleLinearRandomRetry;
|
p = multipleLinearRandomRetry;
|
||||||
|
|
|
@ -19,10 +19,17 @@ package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
import com.google.common.base.Joiner;
|
import com.google.common.base.Joiner;
|
||||||
import com.google.common.collect.Maps;
|
import com.google.common.collect.Maps;
|
||||||
|
import org.apache.commons.io.Charsets;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.BlockLocation;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
import org.apache.hadoop.net.NodeBase;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
@ -31,6 +38,7 @@ import java.io.UnsupportedEncodingException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
|
||||||
|
@ -39,6 +47,13 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICE
|
||||||
public class DFSUtilClient {
|
public class DFSUtilClient {
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(
|
private static final Logger LOG = LoggerFactory.getLogger(
|
||||||
DFSUtilClient.class);
|
DFSUtilClient.class);
|
||||||
|
/**
|
||||||
|
* Converts a string to a byte array using UTF8 encoding.
|
||||||
|
*/
|
||||||
|
public static byte[] string2Bytes(String str) {
|
||||||
|
return str.getBytes(Charsets.UTF_8);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Converts a byte array to a string using UTF8 encoding.
|
* Converts a byte array to a string using UTF8 encoding.
|
||||||
*/
|
*/
|
||||||
|
@ -113,6 +128,62 @@ public class DFSUtilClient {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert a LocatedBlocks to BlockLocations[]
|
||||||
|
* @param blocks a LocatedBlocks
|
||||||
|
* @return an array of BlockLocations
|
||||||
|
*/
|
||||||
|
public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
|
||||||
|
if (blocks == null) {
|
||||||
|
return new BlockLocation[0];
|
||||||
|
}
|
||||||
|
return locatedBlocks2Locations(blocks.getLocatedBlocks());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert a List<LocatedBlock> to BlockLocation[]
|
||||||
|
* @param blocks A List<LocatedBlock> to be converted
|
||||||
|
* @return converted array of BlockLocation
|
||||||
|
*/
|
||||||
|
public static BlockLocation[] locatedBlocks2Locations(
|
||||||
|
List<LocatedBlock> blocks) {
|
||||||
|
if (blocks == null) {
|
||||||
|
return new BlockLocation[0];
|
||||||
|
}
|
||||||
|
int nrBlocks = blocks.size();
|
||||||
|
BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
|
||||||
|
if (nrBlocks == 0) {
|
||||||
|
return blkLocations;
|
||||||
|
}
|
||||||
|
int idx = 0;
|
||||||
|
for (LocatedBlock blk : blocks) {
|
||||||
|
assert idx < nrBlocks : "Incorrect index";
|
||||||
|
DatanodeInfo[] locations = blk.getLocations();
|
||||||
|
String[] hosts = new String[locations.length];
|
||||||
|
String[] xferAddrs = new String[locations.length];
|
||||||
|
String[] racks = new String[locations.length];
|
||||||
|
for (int hCnt = 0; hCnt < locations.length; hCnt++) {
|
||||||
|
hosts[hCnt] = locations[hCnt].getHostName();
|
||||||
|
xferAddrs[hCnt] = locations[hCnt].getXferAddr();
|
||||||
|
NodeBase node = new NodeBase(xferAddrs[hCnt],
|
||||||
|
locations[hCnt].getNetworkLocation());
|
||||||
|
racks[hCnt] = node.toString();
|
||||||
|
}
|
||||||
|
DatanodeInfo[] cachedLocations = blk.getCachedLocations();
|
||||||
|
String[] cachedHosts = new String[cachedLocations.length];
|
||||||
|
for (int i=0; i<cachedLocations.length; i++) {
|
||||||
|
cachedHosts[i] = cachedLocations[i].getHostName();
|
||||||
|
}
|
||||||
|
blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
|
||||||
|
racks,
|
||||||
|
blk.getStartOffset(),
|
||||||
|
blk.getBlockSize(),
|
||||||
|
blk.isCorrupt());
|
||||||
|
idx++;
|
||||||
|
}
|
||||||
|
return blkLocations;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Decode a specific range of bytes of the given byte array to a string
|
* Decode a specific range of bytes of the given byte array to a string
|
||||||
* using UTF8.
|
* using UTF8.
|
||||||
|
@ -234,4 +305,42 @@ public class DFSUtilClient {
|
||||||
}
|
}
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Whether the pathname is valid. Currently prohibits relative paths,
|
||||||
|
* names which contain a ":" or "//", or other non-canonical paths.
|
||||||
|
*/
|
||||||
|
public static boolean isValidName(String src) {
|
||||||
|
// Path must be absolute.
|
||||||
|
if (!src.startsWith(Path.SEPARATOR)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for ".." "." ":" "/"
|
||||||
|
String[] components = StringUtils.split(src, '/');
|
||||||
|
for (int i = 0; i < components.length; i++) {
|
||||||
|
String element = components[i];
|
||||||
|
if (element.equals(".") ||
|
||||||
|
(element.contains(":")) ||
|
||||||
|
(element.contains("/"))) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// ".." is allowed in path starting with /.reserved/.inodes
|
||||||
|
if (element.equals("..")) {
|
||||||
|
if (components.length > 4
|
||||||
|
&& components[1].equals(".reserved")
|
||||||
|
&& components[2].equals(".inodes")) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// The string may start or end with a /, but not have
|
||||||
|
// "//" in the middle.
|
||||||
|
if (element.isEmpty() && i != components.length - 1 &&
|
||||||
|
i != 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,6 +26,7 @@ public interface HdfsClientConfigKeys {
|
||||||
long DFS_BLOCK_SIZE_DEFAULT = 128*1024*1024;
|
long DFS_BLOCK_SIZE_DEFAULT = 128*1024*1024;
|
||||||
String DFS_REPLICATION_KEY = "dfs.replication";
|
String DFS_REPLICATION_KEY = "dfs.replication";
|
||||||
short DFS_REPLICATION_DEFAULT = 3;
|
short DFS_REPLICATION_DEFAULT = 3;
|
||||||
|
String DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern";
|
||||||
String DFS_WEBHDFS_USER_PATTERN_DEFAULT = "^[A-Za-z_][A-Za-z0-9._-]*[$]?$";
|
String DFS_WEBHDFS_USER_PATTERN_DEFAULT = "^[A-Za-z_][A-Za-z0-9._-]*[$]?$";
|
||||||
String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
|
String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
|
||||||
"^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
|
"^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
|
||||||
|
@ -37,6 +38,10 @@ public interface HdfsClientConfigKeys {
|
||||||
int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 50470;
|
int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 50470;
|
||||||
String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
|
String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
|
||||||
String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
|
String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
|
||||||
|
String DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled";
|
||||||
|
boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
|
||||||
|
String DFS_NAMENODE_HTTP_PORT_KEY = "dfs.http.port";
|
||||||
|
String DFS_NAMENODE_HTTPS_PORT_KEY = "dfs.https.port";
|
||||||
|
|
||||||
/** dfs.client.retry configuration properties */
|
/** dfs.client.retry configuration properties */
|
||||||
interface Retry {
|
interface Retry {
|
||||||
|
|
|
@ -38,4 +38,8 @@ public interface HdfsConstantsClient {
|
||||||
* URI.
|
* URI.
|
||||||
*/
|
*/
|
||||||
String HA_DT_SERVICE_PREFIX = "ha-";
|
String HA_DT_SERVICE_PREFIX = "ha-";
|
||||||
|
// The name of the SafeModeException. FileSystem should retry if it sees
|
||||||
|
// the below exception in RPC
|
||||||
|
String SAFEMODE_EXCEPTION_CLASS_NAME = "org.apache.hadoop.hdfs.server" +
|
||||||
|
".namenode.SafeModeException";
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.hadoop.fs.XAttrCodec;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
|
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
|
||||||
|
@ -110,7 +110,7 @@ class JsonUtilClient {
|
||||||
final String localName = (String) m.get("pathSuffix");
|
final String localName = (String) m.get("pathSuffix");
|
||||||
final WebHdfsConstants.PathType type = WebHdfsConstants.PathType.valueOf((String) m.get("type"));
|
final WebHdfsConstants.PathType type = WebHdfsConstants.PathType.valueOf((String) m.get("type"));
|
||||||
final byte[] symlink = type != WebHdfsConstants.PathType.SYMLINK? null
|
final byte[] symlink = type != WebHdfsConstants.PathType.SYMLINK? null
|
||||||
: DFSUtil.string2Bytes((String) m.get("symlink"));
|
: DFSUtilClient.string2Bytes((String) m.get("symlink"));
|
||||||
|
|
||||||
final long len = ((Number) m.get("length")).longValue();
|
final long len = ((Number) m.get("length")).longValue();
|
||||||
final String owner = (String) m.get("owner");
|
final String owner = (String) m.get("owner");
|
||||||
|
@ -130,7 +130,8 @@ class JsonUtilClient {
|
||||||
HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
|
HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
|
||||||
return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication,
|
return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication,
|
||||||
blockSize, mTime, aTime, permission, owner, group,
|
blockSize, mTime, aTime, permission, owner, group,
|
||||||
symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, null,
|
symlink, DFSUtilClient.string2Bytes(localName),
|
||||||
|
fileId, childrenNum, null,
|
||||||
storagePolicy);
|
storagePolicy);
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.web;
|
package org.apache.hadoop.hdfs.web;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
@ -39,10 +39,10 @@ public class SWebHdfsFileSystem extends WebHdfsFileSystem {
|
||||||
return WebHdfsConstants.SWEBHDFS_TOKEN_KIND;
|
return WebHdfsConstants.SWEBHDFS_TOKEN_KIND;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
@Override
|
||||||
public int getDefaultPort() {
|
public int getDefaultPort() {
|
||||||
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
|
return getConf().getInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
|
HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -55,8 +55,8 @@ final class TokenAspect<T extends FileSystem & Renewable> {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean handleKind(Text kind) {
|
public boolean handleKind(Text kind) {
|
||||||
return kind.equals(HftpFileSystem.TOKEN_KIND)
|
return kind.equals(WebHdfsConstants.HFTP_TOKEN_KIND)
|
||||||
|| kind.equals(HsftpFileSystem.TOKEN_KIND)
|
|| kind.equals(WebHdfsConstants.HSFTP_TOKEN_KIND)
|
||||||
|| kind.equals(WebHdfsConstants.WEBHDFS_TOKEN_KIND)
|
|| kind.equals(WebHdfsConstants.WEBHDFS_TOKEN_KIND)
|
||||||
|| kind.equals(WebHdfsConstants.SWEBHDFS_TOKEN_KIND);
|
|| kind.equals(WebHdfsConstants.SWEBHDFS_TOKEN_KIND);
|
||||||
}
|
}
|
||||||
|
@ -87,10 +87,10 @@ final class TokenAspect<T extends FileSystem & Renewable> {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static String getSchemeByKind(Text kind) {
|
private static String getSchemeByKind(Text kind) {
|
||||||
if (kind.equals(HftpFileSystem.TOKEN_KIND)) {
|
if (kind.equals(WebHdfsConstants.HFTP_TOKEN_KIND)) {
|
||||||
return HftpFileSystem.SCHEME;
|
return WebHdfsConstants.HFTP_SCHEME;
|
||||||
} else if (kind.equals(HsftpFileSystem.TOKEN_KIND)) {
|
} else if (kind.equals(WebHdfsConstants.HSFTP_TOKEN_KIND)) {
|
||||||
return HsftpFileSystem.SCHEME;
|
return WebHdfsConstants.HSFTP_SCHEME;
|
||||||
} else if (kind.equals(WebHdfsConstants.WEBHDFS_TOKEN_KIND)) {
|
} else if (kind.equals(WebHdfsConstants.WEBHDFS_TOKEN_KIND)) {
|
||||||
return WebHdfsConstants.WEBHDFS_SCHEME;
|
return WebHdfsConstants.WEBHDFS_SCHEME;
|
||||||
} else if (kind.equals(WebHdfsConstants.SWEBHDFS_TOKEN_KIND)) {
|
} else if (kind.equals(WebHdfsConstants.SWEBHDFS_TOKEN_KIND)) {
|
|
@ -174,7 +174,7 @@ public class URLConnectionFactory {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets timeout parameters on the given URLConnection.
|
* Sets timeout parameters on the given URLConnection.
|
||||||
*
|
*
|
||||||
* @param connection
|
* @param connection
|
||||||
* URLConnection to set
|
* URLConnection to set
|
||||||
* @param socketTimeout
|
* @param socketTimeout
|
|
@ -23,6 +23,10 @@ import org.apache.hadoop.io.Text;
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class WebHdfsConstants {
|
public class WebHdfsConstants {
|
||||||
|
public static final String HFTP_SCHEME = "hftp";
|
||||||
|
public static final Text HFTP_TOKEN_KIND = new Text("HFTP delegation");
|
||||||
|
public static final Text HSFTP_TOKEN_KIND = new Text("HSFTP delegation");
|
||||||
|
public static final String HSFTP_SCHEME = "hsftp";
|
||||||
public static final String WEBHDFS_SCHEME = "webhdfs";
|
public static final String WEBHDFS_SCHEME = "webhdfs";
|
||||||
public static final String SWEBHDFS_SCHEME = "swebhdfs";
|
public static final String SWEBHDFS_SCHEME = "swebhdfs";
|
||||||
public static final Text WEBHDFS_TOKEN_KIND = new Text("WEBHDFS delegation");
|
public static final Text WEBHDFS_TOKEN_KIND = new Text("WEBHDFS delegation");
|
||||||
|
|
|
@ -56,14 +56,12 @@ import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
|
||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
import org.apache.hadoop.hdfs.HAUtilClient;
|
import org.apache.hadoop.hdfs.HAUtilClient;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
|
||||||
import org.apache.hadoop.hdfs.web.resources.*;
|
import org.apache.hadoop.hdfs.web.resources.*;
|
||||||
import org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op;
|
import org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
|
@ -145,8 +143,8 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
setConf(conf);
|
setConf(conf);
|
||||||
/** set user pattern based on configuration file */
|
/** set user pattern based on configuration file */
|
||||||
UserParam.setUserPattern(conf.get(
|
UserParam.setUserPattern(conf.get(
|
||||||
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
|
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
|
||||||
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
|
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
|
||||||
|
|
||||||
connectionFactory = URLConnectionFactory
|
connectionFactory = URLConnectionFactory
|
||||||
.newDefaultURLConnectionFactory(conf);
|
.newDefaultURLConnectionFactory(conf);
|
||||||
|
@ -172,7 +170,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_DEFAULT,
|
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_DEFAULT,
|
||||||
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_KEY,
|
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_KEY,
|
||||||
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_DEFAULT,
|
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_DEFAULT,
|
||||||
SafeModeException.class);
|
HdfsConstantsClient.SAFEMODE_EXCEPTION_CLASS_NAME);
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
int maxFailoverAttempts = conf.getInt(
|
int maxFailoverAttempts = conf.getInt(
|
||||||
|
@ -209,8 +207,9 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
|
|
||||||
/** Is WebHDFS enabled in conf? */
|
/** Is WebHDFS enabled in conf? */
|
||||||
public static boolean isEnabled(final Configuration conf, final Log log) {
|
public static boolean isEnabled(final Configuration conf, final Log log) {
|
||||||
final boolean b = conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
|
final boolean b = conf.getBoolean(
|
||||||
DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT);
|
HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
|
||||||
|
HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT);
|
||||||
return b;
|
return b;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -230,7 +229,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
if(LOG.isDebugEnabled()) {
|
if(LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Using UGI token: " + token);
|
LOG.debug("Using UGI token: " + token);
|
||||||
}
|
}
|
||||||
canRefreshDelegationToken = false;
|
canRefreshDelegationToken = false;
|
||||||
} else {
|
} else {
|
||||||
token = getDelegationToken(null);
|
token = getDelegationToken(null);
|
||||||
if (token != null) {
|
if (token != null) {
|
||||||
|
@ -263,15 +262,15 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
@Override
|
@Override
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public int getDefaultPort() {
|
public int getDefaultPort() {
|
||||||
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
|
return getConf().getInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
|
HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public URI getUri() {
|
public URI getUri() {
|
||||||
return this.uri;
|
return this.uri;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected URI canonicalizeUri(URI uri) {
|
protected URI canonicalizeUri(URI uri) {
|
||||||
return NetUtils.getCanonicalUri(uri, getDefaultPort());
|
return NetUtils.getCanonicalUri(uri, getDefaultPort());
|
||||||
|
@ -295,8 +294,8 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
@Override
|
@Override
|
||||||
public synchronized void setWorkingDirectory(final Path dir) {
|
public synchronized void setWorkingDirectory(final Path dir) {
|
||||||
String result = makeAbsolute(dir).toUri().getPath();
|
String result = makeAbsolute(dir).toUri().getPath();
|
||||||
if (!DFSUtil.isValidName(result)) {
|
if (!DFSUtilClient.isValidName(result)) {
|
||||||
throw new IllegalArgumentException("Invalid DFS directory name " +
|
throw new IllegalArgumentException("Invalid DFS directory name " +
|
||||||
result);
|
result);
|
||||||
}
|
}
|
||||||
workingDir = makeAbsolute(dir);
|
workingDir = makeAbsolute(dir);
|
||||||
|
@ -375,10 +374,10 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Covert an exception to an IOException.
|
* Covert an exception to an IOException.
|
||||||
*
|
*
|
||||||
* For a non-IOException, wrap it with IOException.
|
* For a non-IOException, wrap it with IOException.
|
||||||
* For a RemoteException, unwrap it.
|
* For a RemoteException, unwrap it.
|
||||||
* For an IOException which is not a RemoteException, return it.
|
* For an IOException which is not a RemoteException, return it.
|
||||||
*/
|
*/
|
||||||
private static IOException toIOException(Exception e) {
|
private static IOException toIOException(Exception e) {
|
||||||
if (!(e instanceof IOException)) {
|
if (!(e instanceof IOException)) {
|
||||||
|
@ -421,9 +420,9 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
}
|
}
|
||||||
return url;
|
return url;
|
||||||
}
|
}
|
||||||
|
|
||||||
Param<?,?>[] getAuthParameters(final HttpOpParam.Op op) throws IOException {
|
Param<?,?>[] getAuthParameters(final HttpOpParam.Op op) throws IOException {
|
||||||
List<Param<?,?>> authParams = Lists.newArrayList();
|
List<Param<?,?>> authParams = Lists.newArrayList();
|
||||||
// Skip adding delegation token for token operations because these
|
// Skip adding delegation token for token operations because these
|
||||||
// operations require authentication.
|
// operations require authentication.
|
||||||
Token<?> token = null;
|
Token<?> token = null;
|
||||||
|
@ -502,11 +501,11 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Two-step requests redirected to a DN
|
* Two-step requests redirected to a DN
|
||||||
*
|
*
|
||||||
* Create/Append:
|
* Create/Append:
|
||||||
* Step 1) Submit a Http request with neither auto-redirect nor data.
|
* Step 1) Submit a Http request with neither auto-redirect nor data.
|
||||||
* Step 2) Submit another Http request with the URL from the Location header with data.
|
* Step 2) Submit another Http request with the URL from the Location header with data.
|
||||||
*
|
*
|
||||||
* The reason of having two-step create/append is for preventing clients to
|
* The reason of having two-step create/append is for preventing clients to
|
||||||
* send out the data before the redirect. This issue is addressed by the
|
* send out the data before the redirect. This issue is addressed by the
|
||||||
* "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
|
* "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
|
||||||
|
@ -514,7 +513,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
* and Java 6 http client), which do not correctly implement "Expect:
|
* and Java 6 http client), which do not correctly implement "Expect:
|
||||||
* 100-continue". The two-step create/append is a temporary workaround for
|
* 100-continue". The two-step create/append is a temporary workaround for
|
||||||
* the software library bugs.
|
* the software library bugs.
|
||||||
*
|
*
|
||||||
* Open/Checksum
|
* Open/Checksum
|
||||||
* Also implements two-step connects for other operations redirected to
|
* Also implements two-step connects for other operations redirected to
|
||||||
* a DN such as open and checksum
|
* a DN such as open and checksum
|
||||||
|
@ -523,7 +522,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
//redirect hostname and port
|
//redirect hostname and port
|
||||||
String redirectHost = null;
|
String redirectHost = null;
|
||||||
|
|
||||||
|
|
||||||
// resolve redirects for a DN operation unless already resolved
|
// resolve redirects for a DN operation unless already resolved
|
||||||
if (op.getRedirect() && !redirected) {
|
if (op.getRedirect() && !redirected) {
|
||||||
final HttpOpParam.Op redirectOp =
|
final HttpOpParam.Op redirectOp =
|
||||||
|
@ -553,7 +552,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
throw ioe;
|
throw ioe;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private HttpURLConnection connect(final HttpOpParam.Op op, final URL url)
|
private HttpURLConnection connect(final HttpOpParam.Op op, final URL url)
|
||||||
|
@ -565,7 +564,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
conn.setInstanceFollowRedirects(false);
|
conn.setInstanceFollowRedirects(false);
|
||||||
switch (op.getType()) {
|
switch (op.getType()) {
|
||||||
// if not sending a message body for a POST or PUT operation, need
|
// if not sending a message body for a POST or PUT operation, need
|
||||||
// to ensure the server/proxy knows this
|
// to ensure the server/proxy knows this
|
||||||
case POST:
|
case POST:
|
||||||
case PUT: {
|
case PUT: {
|
||||||
conn.setDoOutput(true);
|
conn.setDoOutput(true);
|
||||||
|
@ -673,21 +672,21 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
abstract class AbstractFsPathRunner<T> extends AbstractRunner<T> {
|
abstract class AbstractFsPathRunner<T> extends AbstractRunner<T> {
|
||||||
private final Path fspath;
|
private final Path fspath;
|
||||||
private final Param<?,?>[] parameters;
|
private final Param<?,?>[] parameters;
|
||||||
|
|
||||||
AbstractFsPathRunner(final HttpOpParam.Op op, final Path fspath,
|
AbstractFsPathRunner(final HttpOpParam.Op op, final Path fspath,
|
||||||
Param<?,?>... parameters) {
|
Param<?,?>... parameters) {
|
||||||
super(op, false);
|
super(op, false);
|
||||||
this.fspath = fspath;
|
this.fspath = fspath;
|
||||||
this.parameters = parameters;
|
this.parameters = parameters;
|
||||||
}
|
}
|
||||||
|
|
||||||
AbstractFsPathRunner(final HttpOpParam.Op op, Param<?,?>[] parameters,
|
AbstractFsPathRunner(final HttpOpParam.Op op, Param<?,?>[] parameters,
|
||||||
final Path fspath) {
|
final Path fspath) {
|
||||||
super(op, false);
|
super(op, false);
|
||||||
this.fspath = fspath;
|
this.fspath = fspath;
|
||||||
this.parameters = parameters;
|
this.parameters = parameters;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected URL getUrl() throws IOException {
|
protected URL getUrl() throws IOException {
|
||||||
if (excludeDatanodes.getValue() != null) {
|
if (excludeDatanodes.getValue() != null) {
|
||||||
|
@ -708,7 +707,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
FsPathRunner(Op op, Path fspath, Param<?,?>... parameters) {
|
FsPathRunner(Op op, Path fspath, Param<?,?>... parameters) {
|
||||||
super(op, fspath, parameters);
|
super(op, fspath, parameters);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
Void getResponse(HttpURLConnection conn) throws IOException {
|
Void getResponse(HttpURLConnection conn) throws IOException {
|
||||||
return null;
|
return null;
|
||||||
|
@ -723,12 +722,12 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
Param<?,?>... parameters) {
|
Param<?,?>... parameters) {
|
||||||
super(op, fspath, parameters);
|
super(op, fspath, parameters);
|
||||||
}
|
}
|
||||||
|
|
||||||
FsPathResponseRunner(final HttpOpParam.Op op, Param<?,?>[] parameters,
|
FsPathResponseRunner(final HttpOpParam.Op op, Param<?,?>[] parameters,
|
||||||
final Path fspath) {
|
final Path fspath) {
|
||||||
super(op, parameters, fspath);
|
super(op, parameters, fspath);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
final T getResponse(HttpURLConnection conn) throws IOException {
|
final T getResponse(HttpURLConnection conn) throws IOException {
|
||||||
try {
|
try {
|
||||||
|
@ -751,7 +750,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
conn.disconnect();
|
conn.disconnect();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
abstract T decodeResponse(Map<?,?> json) throws IOException;
|
abstract T decodeResponse(Map<?,?> json) throws IOException;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -762,7 +761,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
FsPathBooleanRunner(Op op, Path fspath, Param<?,?>... parameters) {
|
FsPathBooleanRunner(Op op, Path fspath, Param<?,?>... parameters) {
|
||||||
super(op, fspath, parameters);
|
super(op, fspath, parameters);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
Boolean decodeResponse(Map<?,?> json) throws IOException {
|
Boolean decodeResponse(Map<?,?> json) throws IOException {
|
||||||
return (Boolean)json.get("boolean");
|
return (Boolean)json.get("boolean");
|
||||||
|
@ -774,13 +773,13 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
*/
|
*/
|
||||||
class FsPathOutputStreamRunner extends AbstractFsPathRunner<FSDataOutputStream> {
|
class FsPathOutputStreamRunner extends AbstractFsPathRunner<FSDataOutputStream> {
|
||||||
private final int bufferSize;
|
private final int bufferSize;
|
||||||
|
|
||||||
FsPathOutputStreamRunner(Op op, Path fspath, int bufferSize,
|
FsPathOutputStreamRunner(Op op, Path fspath, int bufferSize,
|
||||||
Param<?,?>... parameters) {
|
Param<?,?>... parameters) {
|
||||||
super(op, fspath, parameters);
|
super(op, fspath, parameters);
|
||||||
this.bufferSize = bufferSize;
|
this.bufferSize = bufferSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
FSDataOutputStream getResponse(final HttpURLConnection conn)
|
FSDataOutputStream getResponse(final HttpURLConnection conn)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -812,7 +811,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
return conn;
|
return conn;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Used by open() which tracks the resolved url itself
|
* Used by open() which tracks the resolved url itself
|
||||||
*/
|
*/
|
||||||
|
@ -926,26 +925,26 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
new RenameOptionSetParam(options)
|
new RenameOptionSetParam(options)
|
||||||
).run();
|
).run();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setXAttr(Path p, String name, byte[] value,
|
public void setXAttr(Path p, String name, byte[] value,
|
||||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||||
statistics.incrementWriteOps(1);
|
statistics.incrementWriteOps(1);
|
||||||
final HttpOpParam.Op op = PutOpParam.Op.SETXATTR;
|
final HttpOpParam.Op op = PutOpParam.Op.SETXATTR;
|
||||||
if (value != null) {
|
if (value != null) {
|
||||||
new FsPathRunner(op, p, new XAttrNameParam(name), new XAttrValueParam(
|
new FsPathRunner(op, p, new XAttrNameParam(name), new XAttrValueParam(
|
||||||
XAttrCodec.encodeValue(value, XAttrCodec.HEX)),
|
XAttrCodec.encodeValue(value, XAttrCodec.HEX)),
|
||||||
new XAttrSetFlagParam(flag)).run();
|
new XAttrSetFlagParam(flag)).run();
|
||||||
} else {
|
} else {
|
||||||
new FsPathRunner(op, p, new XAttrNameParam(name),
|
new FsPathRunner(op, p, new XAttrNameParam(name),
|
||||||
new XAttrSetFlagParam(flag)).run();
|
new XAttrSetFlagParam(flag)).run();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public byte[] getXAttr(Path p, final String name) throws IOException {
|
public byte[] getXAttr(Path p, final String name) throws IOException {
|
||||||
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
|
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
|
||||||
return new FsPathResponseRunner<byte[]>(op, p, new XAttrNameParam(name),
|
return new FsPathResponseRunner<byte[]>(op, p, new XAttrNameParam(name),
|
||||||
new XAttrEncodingParam(XAttrCodec.HEX)) {
|
new XAttrEncodingParam(XAttrCodec.HEX)) {
|
||||||
@Override
|
@Override
|
||||||
byte[] decodeResponse(Map<?, ?> json) throws IOException {
|
byte[] decodeResponse(Map<?, ?> json) throws IOException {
|
||||||
|
@ -953,11 +952,11 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
}
|
}
|
||||||
}.run();
|
}.run();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Map<String, byte[]> getXAttrs(Path p) throws IOException {
|
public Map<String, byte[]> getXAttrs(Path p) throws IOException {
|
||||||
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
|
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
|
||||||
return new FsPathResponseRunner<Map<String, byte[]>>(op, p,
|
return new FsPathResponseRunner<Map<String, byte[]>>(op, p,
|
||||||
new XAttrEncodingParam(XAttrCodec.HEX)) {
|
new XAttrEncodingParam(XAttrCodec.HEX)) {
|
||||||
@Override
|
@Override
|
||||||
Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
|
Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
|
||||||
|
@ -965,18 +964,18 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
}
|
}
|
||||||
}.run();
|
}.run();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Map<String, byte[]> getXAttrs(Path p, final List<String> names)
|
public Map<String, byte[]> getXAttrs(Path p, final List<String> names)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Preconditions.checkArgument(names != null && !names.isEmpty(),
|
Preconditions.checkArgument(names != null && !names.isEmpty(),
|
||||||
"XAttr names cannot be null or empty.");
|
"XAttr names cannot be null or empty.");
|
||||||
Param<?,?>[] parameters = new Param<?,?>[names.size() + 1];
|
Param<?,?>[] parameters = new Param<?,?>[names.size() + 1];
|
||||||
for (int i = 0; i < parameters.length - 1; i++) {
|
for (int i = 0; i < parameters.length - 1; i++) {
|
||||||
parameters[i] = new XAttrNameParam(names.get(i));
|
parameters[i] = new XAttrNameParam(names.get(i));
|
||||||
}
|
}
|
||||||
parameters[parameters.length - 1] = new XAttrEncodingParam(XAttrCodec.HEX);
|
parameters[parameters.length - 1] = new XAttrEncodingParam(XAttrCodec.HEX);
|
||||||
|
|
||||||
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
|
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
|
||||||
return new FsPathResponseRunner<Map<String, byte[]>>(op, parameters, p) {
|
return new FsPathResponseRunner<Map<String, byte[]>>(op, parameters, p) {
|
||||||
@Override
|
@Override
|
||||||
|
@ -985,7 +984,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
}
|
}
|
||||||
}.run();
|
}.run();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<String> listXAttrs(Path p) throws IOException {
|
public List<String> listXAttrs(Path p) throws IOException {
|
||||||
final HttpOpParam.Op op = GetOpParam.Op.LISTXATTRS;
|
final HttpOpParam.Op op = GetOpParam.Op.LISTXATTRS;
|
||||||
|
@ -1065,7 +1064,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Path createSnapshot(final Path path, final String snapshotName)
|
public Path createSnapshot(final Path path, final String snapshotName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
statistics.incrementWriteOps(1);
|
statistics.incrementWriteOps(1);
|
||||||
final HttpOpParam.Op op = PutOpParam.Op.CREATESNAPSHOT;
|
final HttpOpParam.Op op = PutOpParam.Op.CREATESNAPSHOT;
|
||||||
|
@ -1119,14 +1118,14 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getDefaultBlockSize() {
|
public long getDefaultBlockSize() {
|
||||||
return getConf().getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
|
return getConf().getLongBytes(HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY,
|
||||||
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
|
HdfsClientConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public short getDefaultReplication() {
|
public short getDefaultReplication() {
|
||||||
return (short)getConf().getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
|
return (short)getConf().getInt(HdfsClientConfigKeys.DFS_REPLICATION_KEY,
|
||||||
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
|
HdfsClientConfigKeys.DFS_REPLICATION_DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -1236,7 +1235,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
final URL offsetUrl = offset == 0L? url
|
final URL offsetUrl = offset == 0L? url
|
||||||
: new URL(url + "&" + new OffsetParam(offset));
|
: new URL(url + "&" + new OffsetParam(offset));
|
||||||
return new URLRunner(GetOpParam.Op.OPEN, offsetUrl, resolved).run();
|
return new URLRunner(GetOpParam.Op.OPEN, offsetUrl, resolved).run();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final String OFFSET_PARAM_PREFIX = OffsetParam.NAME + "=";
|
private static final String OFFSET_PARAM_PREFIX = OffsetParam.NAME + "=";
|
||||||
|
@ -1367,7 +1366,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
new TokenArgumentParam(token.encodeToUrlString())
|
new TokenArgumentParam(token.encodeToUrlString())
|
||||||
).run();
|
).run();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BlockLocation[] getFileBlockLocations(final FileStatus status,
|
public BlockLocation[] getFileBlockLocations(final FileStatus status,
|
||||||
final long offset, final long length) throws IOException {
|
final long offset, final long length) throws IOException {
|
||||||
|
@ -1378,7 +1377,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BlockLocation[] getFileBlockLocations(final Path p,
|
public BlockLocation[] getFileBlockLocations(final Path p,
|
||||||
final long offset, final long length) throws IOException {
|
final long offset, final long length) throws IOException {
|
||||||
statistics.incrementReadOps(1);
|
statistics.incrementReadOps(1);
|
||||||
|
|
||||||
|
@ -1387,7 +1386,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
new OffsetParam(offset), new LengthParam(length)) {
|
new OffsetParam(offset), new LengthParam(length)) {
|
||||||
@Override
|
@Override
|
||||||
BlockLocation[] decodeResponse(Map<?,?> json) throws IOException {
|
BlockLocation[] decodeResponse(Map<?,?> json) throws IOException {
|
||||||
return DFSUtil.locatedBlocks2Locations(
|
return DFSUtilClient.locatedBlocks2Locations(
|
||||||
JsonUtilClient.toLocatedBlocks(json));
|
JsonUtilClient.toLocatedBlocks(json));
|
||||||
}
|
}
|
||||||
}.run();
|
}.run();
|
||||||
|
@ -1416,7 +1415,7 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
public MD5MD5CRC32FileChecksum getFileChecksum(final Path p
|
public MD5MD5CRC32FileChecksum getFileChecksum(final Path p
|
||||||
) throws IOException {
|
) throws IOException {
|
||||||
statistics.incrementReadOps(1);
|
statistics.incrementReadOps(1);
|
||||||
|
|
||||||
final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM;
|
final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM;
|
||||||
return new FsPathResponseRunner<MD5MD5CRC32FileChecksum>(op, p) {
|
return new FsPathResponseRunner<MD5MD5CRC32FileChecksum>(op, p) {
|
||||||
@Override
|
@Override
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.lib.wsrs.EnumSetParam;
|
import org.apache.hadoop.lib.wsrs.EnumSetParam;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
@ -369,7 +370,7 @@ public class HttpFSFileSystem extends FileSystem
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
protected int getDefaultPort() {
|
protected int getDefaultPort() {
|
||||||
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
|
return getConf().getInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
|
DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -149,6 +149,8 @@ Release 2.8.0 - UNRELEASED
|
||||||
HDFS-8215. Refactor NamenodeFsck#check method. (Takanobu Asanuma
|
HDFS-8215. Refactor NamenodeFsck#check method. (Takanobu Asanuma
|
||||||
via szetszwo)
|
via szetszwo)
|
||||||
|
|
||||||
|
HDFS-8052. Move WebHdfsFileSystem into hadoop-hdfs-client. (wheat9)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||||
|
|
|
@ -48,7 +48,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.htrace.Sampler;
|
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.Span;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.Trace;
|
||||||
import org.apache.htrace.TraceScope;
|
import org.apache.htrace.TraceScope;
|
||||||
|
@ -296,7 +295,7 @@ class BlockStorageLocationUtil {
|
||||||
List<LocatedBlock> blocks,
|
List<LocatedBlock> blocks,
|
||||||
Map<LocatedBlock, List<VolumeId>> blockVolumeIds) throws IOException {
|
Map<LocatedBlock, List<VolumeId>> blockVolumeIds) throws IOException {
|
||||||
// Construct the final return value of VolumeBlockLocation[]
|
// Construct the final return value of VolumeBlockLocation[]
|
||||||
BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks);
|
BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
|
||||||
List<BlockStorageLocation> volumeBlockLocs =
|
List<BlockStorageLocation> volumeBlockLocs =
|
||||||
new ArrayList<BlockStorageLocation>(locations.length);
|
new ArrayList<BlockStorageLocation>(locations.length);
|
||||||
for (int i = 0; i < locations.length; i++) {
|
for (int i = 0; i < locations.length; i++) {
|
||||||
|
|
|
@ -919,7 +919,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
TraceScope scope = getPathTraceScope("getBlockLocations", src);
|
TraceScope scope = getPathTraceScope("getBlockLocations", src);
|
||||||
try {
|
try {
|
||||||
LocatedBlocks blocks = getLocatedBlocks(src, start, length);
|
LocatedBlocks blocks = getLocatedBlocks(src, start, length);
|
||||||
BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks);
|
BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
|
||||||
HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
|
HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
|
||||||
for (int i = 0; i < locations.length; i++) {
|
for (int i = 0; i < locations.length; i++) {
|
||||||
hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));
|
hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));
|
||||||
|
|
|
@ -102,7 +102,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
"dfs.namenode.path.based.cache.block.map.allocation.percent";
|
"dfs.namenode.path.based.cache.block.map.allocation.percent";
|
||||||
public static final float DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT = 0.25f;
|
public static final float DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT = 0.25f;
|
||||||
|
|
||||||
public static final String DFS_NAMENODE_HTTP_PORT_KEY = "dfs.http.port";
|
|
||||||
public static final int DFS_NAMENODE_HTTP_PORT_DEFAULT =
|
public static final int DFS_NAMENODE_HTTP_PORT_DEFAULT =
|
||||||
HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
|
HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
|
||||||
public static final String DFS_NAMENODE_HTTP_ADDRESS_KEY =
|
public static final String DFS_NAMENODE_HTTP_ADDRESS_KEY =
|
||||||
|
@ -163,9 +162,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
*/
|
*/
|
||||||
public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT =
|
public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT =
|
||||||
"org.apache.hadoop.hdfs.web.AuthFilter".toString();
|
"org.apache.hadoop.hdfs.web.AuthFilter".toString();
|
||||||
public static final String DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled";
|
@Deprecated
|
||||||
public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
|
public static final String DFS_WEBHDFS_USER_PATTERN_KEY =
|
||||||
public static final String DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern";
|
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY;
|
||||||
|
@Deprecated
|
||||||
public static final String DFS_WEBHDFS_USER_PATTERN_DEFAULT =
|
public static final String DFS_WEBHDFS_USER_PATTERN_DEFAULT =
|
||||||
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT;
|
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT;
|
||||||
public static final String DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";
|
public static final String DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";
|
||||||
|
@ -305,7 +305,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
|
|
||||||
//Following keys have no defaults
|
//Following keys have no defaults
|
||||||
public static final String DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir";
|
public static final String DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir";
|
||||||
public static final String DFS_NAMENODE_HTTPS_PORT_KEY = "dfs.https.port";
|
|
||||||
public static final int DFS_NAMENODE_HTTPS_PORT_DEFAULT =
|
public static final int DFS_NAMENODE_HTTPS_PORT_DEFAULT =
|
||||||
HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
|
HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
|
||||||
public static final String DFS_NAMENODE_HTTPS_ADDRESS_KEY =
|
public static final String DFS_NAMENODE_HTTPS_ADDRESS_KEY =
|
||||||
|
|
|
@ -229,37 +229,7 @@ public class DFSUtil {
|
||||||
* names which contain a ":" or "//", or other non-canonical paths.
|
* names which contain a ":" or "//", or other non-canonical paths.
|
||||||
*/
|
*/
|
||||||
public static boolean isValidName(String src) {
|
public static boolean isValidName(String src) {
|
||||||
// Path must be absolute.
|
return DFSUtilClient.isValidName(src);
|
||||||
if (!src.startsWith(Path.SEPARATOR)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for ".." "." ":" "/"
|
|
||||||
String[] components = StringUtils.split(src, '/');
|
|
||||||
for (int i = 0; i < components.length; i++) {
|
|
||||||
String element = components[i];
|
|
||||||
if (element.equals(".") ||
|
|
||||||
(element.indexOf(":") >= 0) ||
|
|
||||||
(element.indexOf("/") >= 0)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
// ".." is allowed in path starting with /.reserved/.inodes
|
|
||||||
if (element.equals("..")) {
|
|
||||||
if (components.length > 4
|
|
||||||
&& components[1].equals(FSDirectory.DOT_RESERVED_STRING)
|
|
||||||
&& components[2].equals(FSDirectory.DOT_INODES_STRING)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
// The string may start or end with a /, but not have
|
|
||||||
// "//" in the middle.
|
|
||||||
if (element.isEmpty() && i != components.length - 1 &&
|
|
||||||
i != 0) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -330,7 +300,7 @@ public class DFSUtil {
|
||||||
* Converts a string to a byte array using UTF8 encoding.
|
* Converts a string to a byte array using UTF8 encoding.
|
||||||
*/
|
*/
|
||||||
public static byte[] string2Bytes(String str) {
|
public static byte[] string2Bytes(String str) {
|
||||||
return str.getBytes(Charsets.UTF_8);
|
return DFSUtilClient.string2Bytes(str);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -476,61 +446,6 @@ public class DFSUtil {
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert a LocatedBlocks to BlockLocations[]
|
|
||||||
* @param blocks a LocatedBlocks
|
|
||||||
* @return an array of BlockLocations
|
|
||||||
*/
|
|
||||||
public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
|
|
||||||
if (blocks == null) {
|
|
||||||
return new BlockLocation[0];
|
|
||||||
}
|
|
||||||
return locatedBlocks2Locations(blocks.getLocatedBlocks());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert a List<LocatedBlock> to BlockLocation[]
|
|
||||||
* @param blocks A List<LocatedBlock> to be converted
|
|
||||||
* @return converted array of BlockLocation
|
|
||||||
*/
|
|
||||||
public static BlockLocation[] locatedBlocks2Locations(List<LocatedBlock> blocks) {
|
|
||||||
if (blocks == null) {
|
|
||||||
return new BlockLocation[0];
|
|
||||||
}
|
|
||||||
int nrBlocks = blocks.size();
|
|
||||||
BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
|
|
||||||
if (nrBlocks == 0) {
|
|
||||||
return blkLocations;
|
|
||||||
}
|
|
||||||
int idx = 0;
|
|
||||||
for (LocatedBlock blk : blocks) {
|
|
||||||
assert idx < nrBlocks : "Incorrect index";
|
|
||||||
DatanodeInfo[] locations = blk.getLocations();
|
|
||||||
String[] hosts = new String[locations.length];
|
|
||||||
String[] xferAddrs = new String[locations.length];
|
|
||||||
String[] racks = new String[locations.length];
|
|
||||||
for (int hCnt = 0; hCnt < locations.length; hCnt++) {
|
|
||||||
hosts[hCnt] = locations[hCnt].getHostName();
|
|
||||||
xferAddrs[hCnt] = locations[hCnt].getXferAddr();
|
|
||||||
NodeBase node = new NodeBase(xferAddrs[hCnt],
|
|
||||||
locations[hCnt].getNetworkLocation());
|
|
||||||
racks[hCnt] = node.toString();
|
|
||||||
}
|
|
||||||
DatanodeInfo[] cachedLocations = blk.getCachedLocations();
|
|
||||||
String[] cachedHosts = new String[cachedLocations.length];
|
|
||||||
for (int i=0; i<cachedLocations.length; i++) {
|
|
||||||
cachedHosts[i] = cachedLocations[i].getHostName();
|
|
||||||
}
|
|
||||||
blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
|
|
||||||
racks,
|
|
||||||
blk.getStartOffset(),
|
|
||||||
blk.getBlockSize(),
|
|
||||||
blk.isCorrupt());
|
|
||||||
idx++;
|
|
||||||
}
|
|
||||||
return blkLocations;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return configuration key of format key.suffix1.suffix2...suffixN
|
* Return configuration key of format key.suffix1.suffix2...suffixN
|
||||||
|
|
|
@ -404,7 +404,7 @@ public class NameNodeProxies {
|
||||||
HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT,
|
HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT,
|
||||||
HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY,
|
HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY,
|
||||||
HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT,
|
HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT,
|
||||||
SafeModeException.class);
|
SafeModeException.class.getName());
|
||||||
|
|
||||||
final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
|
final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
|
||||||
ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
|
ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
|
||||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||||
import org.apache.hadoop.fs.LocatedFileStatus;
|
import org.apache.hadoop.fs.LocatedFileStatus;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Interface that represents the over the wire information
|
* Interface that represents the over the wire information
|
||||||
|
@ -78,6 +78,6 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
|
||||||
isSymlink() ? new Path(getSymlink()) : null,
|
isSymlink() ? new Path(getSymlink()) : null,
|
||||||
(getFullPath(path)).makeQualified(
|
(getFullPath(path)).makeQualified(
|
||||||
defaultUri, null), // fully-qualify path
|
defaultUri, null), // fully-qualify path
|
||||||
DFSUtil.locatedBlocks2Locations(getBlockLocations()));
|
DFSUtilClient.locatedBlocks2Locations(getBlockLocations()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
|
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
||||||
|
|
|
@ -46,8 +46,6 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretMan
|
||||||
import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
|
import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
|
import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
|
import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
|
||||||
import org.apache.hadoop.hdfs.web.HftpFileSystem;
|
|
||||||
import org.apache.hadoop.hdfs.web.HsftpFileSystem;
|
|
||||||
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
@ -243,7 +241,7 @@ public class DelegationTokenFetcher {
|
||||||
dis = new DataInputStream(in);
|
dis = new DataInputStream(in);
|
||||||
ts.readFields(dis);
|
ts.readFields(dis);
|
||||||
for (Token<?> token : ts.getAllTokens()) {
|
for (Token<?> token : ts.getAllTokens()) {
|
||||||
token.setKind(isHttps ? HsftpFileSystem.TOKEN_KIND : HftpFileSystem.TOKEN_KIND);
|
token.setKind(isHttps ? WebHdfsConstants.HSFTP_TOKEN_KIND : WebHdfsConstants.HFTP_TOKEN_KIND);
|
||||||
SecurityUtil.setTokenService(token, serviceAddr);
|
SecurityUtil.setTokenService(token, serviceAddr);
|
||||||
}
|
}
|
||||||
return ts;
|
return ts;
|
||||||
|
|
|
@ -48,6 +48,7 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||||
import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
|
import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
|
||||||
|
@ -79,7 +80,6 @@ import org.xml.sax.helpers.XMLReaderFactory;
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public class HftpFileSystem extends FileSystem
|
public class HftpFileSystem extends FileSystem
|
||||||
implements DelegationTokenRenewer.Renewable, TokenAspect.TokenManagementDelegator {
|
implements DelegationTokenRenewer.Renewable, TokenAspect.TokenManagementDelegator {
|
||||||
public static final String SCHEME = "hftp";
|
|
||||||
|
|
||||||
static {
|
static {
|
||||||
HttpURLConnection.setFollowRedirects(true);
|
HttpURLConnection.setFollowRedirects(true);
|
||||||
|
@ -87,8 +87,6 @@ public class HftpFileSystem extends FileSystem
|
||||||
|
|
||||||
URLConnectionFactory connectionFactory;
|
URLConnectionFactory connectionFactory;
|
||||||
|
|
||||||
public static final Text TOKEN_KIND = new Text("HFTP delegation");
|
|
||||||
|
|
||||||
protected UserGroupInformation ugi;
|
protected UserGroupInformation ugi;
|
||||||
private URI hftpURI;
|
private URI hftpURI;
|
||||||
|
|
||||||
|
@ -123,7 +121,7 @@ public class HftpFileSystem extends FileSystem
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int getDefaultPort() {
|
protected int getDefaultPort() {
|
||||||
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
|
return getConf().getInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
|
DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -168,7 +166,7 @@ public class HftpFileSystem extends FileSystem
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public String getScheme() {
|
public String getScheme() {
|
||||||
return SCHEME;
|
return WebHdfsConstants.HFTP_SCHEME;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -176,7 +174,7 @@ public class HftpFileSystem extends FileSystem
|
||||||
* be overridden by HsFtpFileSystem.
|
* be overridden by HsFtpFileSystem.
|
||||||
*/
|
*/
|
||||||
protected void initTokenAspect() {
|
protected void initTokenAspect() {
|
||||||
tokenAspect = new TokenAspect<HftpFileSystem>(this, tokenServiceName, TOKEN_KIND);
|
tokenAspect = new TokenAspect<HftpFileSystem>(this, tokenServiceName, WebHdfsConstants.HFTP_TOKEN_KIND);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -21,7 +21,7 @@ package org.apache.hadoop.hdfs.web;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An implementation of a protocol for accessing filesystems over HTTPS. The
|
* An implementation of a protocol for accessing filesystems over HTTPS. The
|
||||||
|
@ -34,8 +34,6 @@ import org.apache.hadoop.io.Text;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public class HsftpFileSystem extends HftpFileSystem {
|
public class HsftpFileSystem extends HftpFileSystem {
|
||||||
public static final Text TOKEN_KIND = new Text("HSFTP delegation");
|
|
||||||
public static final String SCHEME = "hsftp";
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the protocol scheme for the FileSystem.
|
* Return the protocol scheme for the FileSystem.
|
||||||
|
@ -45,7 +43,7 @@ public class HsftpFileSystem extends HftpFileSystem {
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public String getScheme() {
|
public String getScheme() {
|
||||||
return SCHEME;
|
return WebHdfsConstants.HSFTP_SCHEME;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -59,12 +57,12 @@ public class HsftpFileSystem extends HftpFileSystem {
|
||||||
@Override
|
@Override
|
||||||
protected void initTokenAspect() {
|
protected void initTokenAspect() {
|
||||||
tokenAspect = new TokenAspect<HsftpFileSystem>(this, tokenServiceName,
|
tokenAspect = new TokenAspect<HsftpFileSystem>(this, tokenServiceName,
|
||||||
TOKEN_KIND);
|
WebHdfsConstants.HSFTP_TOKEN_KIND);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int getDefaultPort() {
|
protected int getDefaultPort() {
|
||||||
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
|
return getConf().getInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
|
DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
|
@ -86,7 +87,7 @@ abstract public class TestSymlinkHdfs extends SymlinkBaseTest {
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void beforeClassSetup() throws Exception {
|
public static void beforeClassSetup() throws Exception {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||||
conf.set(FsPermission.UMASK_LABEL, "000");
|
conf.set(FsPermission.UMASK_LABEL, "000");
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 0);
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 0);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||||
|
|
|
@ -112,7 +112,7 @@ public class TestDFSUtil {
|
||||||
List<LocatedBlock> ls = Arrays.asList(l1, l2);
|
List<LocatedBlock> ls = Arrays.asList(l1, l2);
|
||||||
LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null);
|
LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null);
|
||||||
|
|
||||||
BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs);
|
BlockLocation[] bs = DFSUtilClient.locatedBlocks2Locations(lbs);
|
||||||
|
|
||||||
assertTrue("expected 2 blocks but got " + bs.length,
|
assertTrue("expected 2 blocks but got " + bs.length,
|
||||||
bs.length == 2);
|
bs.length == 2);
|
||||||
|
@ -128,7 +128,7 @@ public class TestDFSUtil {
|
||||||
corruptCount == 1);
|
corruptCount == 1);
|
||||||
|
|
||||||
// test an empty location
|
// test an empty location
|
||||||
bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
|
bs = DFSUtilClient.locatedBlocks2Locations(new LocatedBlocks());
|
||||||
assertEquals(0, bs.length);
|
assertEquals(0, bs.length);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -64,6 +64,7 @@ import org.apache.hadoop.fs.RemoteIterator;
|
||||||
import org.apache.hadoop.fs.VolumeId;
|
import org.apache.hadoop.fs.VolumeId;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.net.Peer;
|
import org.apache.hadoop.hdfs.net.Peer;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||||
|
@ -500,7 +501,7 @@ public class TestDistributedFileSystem {
|
||||||
RAN.setSeed(seed);
|
RAN.setSeed(seed);
|
||||||
|
|
||||||
final Configuration conf = getTestConfiguration();
|
final Configuration conf = getTestConfiguration();
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||||
|
|
||||||
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||||
final FileSystem hdfs = cluster.getFileSystem();
|
final FileSystem hdfs = cluster.getFileSystem();
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.ContentSummary;
|
import org.apache.hadoop.fs.ContentSummary;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
||||||
|
@ -792,7 +793,7 @@ public class TestQuota {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
final int BLOCK_SIZE = 6 * 1024;
|
final int BLOCK_SIZE = 6 * 1024;
|
||||||
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||||
MiniDFSCluster cluster =
|
MiniDFSCluster cluster =
|
||||||
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
@ -854,7 +855,7 @@ public class TestQuota {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
final int BLOCK_SIZE = 6 * 1024;
|
final int BLOCK_SIZE = 6 * 1024;
|
||||||
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||||
// Make it relinquish locks. When run serially, the result should
|
// Make it relinquish locks. When run serially, the result should
|
||||||
// be identical.
|
// be identical.
|
||||||
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
|
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
|
||||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
|
@ -68,7 +69,7 @@ public class TestDelegationToken {
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
config = new HdfsConfiguration();
|
config = new HdfsConfiguration();
|
||||||
config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
config.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||||
config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
|
config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
|
||||||
config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
|
config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
|
||||||
config.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
|
config.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
|
||||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||||
|
@ -98,7 +99,7 @@ public class TestDelegationTokenForProxyUser {
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setUp() throws Exception {
|
public static void setUp() throws Exception {
|
||||||
config = new HdfsConfiguration();
|
config = new HdfsConfiguration();
|
||||||
config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
config.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||||
config.setLong(
|
config.setLong(
|
||||||
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
|
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
|
||||||
config.setLong(
|
config.setLong(
|
||||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.web.HftpFileSystem;
|
import org.apache.hadoop.hdfs.web.HftpFileSystem;
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
|
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
|
||||||
|
@ -116,7 +117,7 @@ public class TestAuditLogs {
|
||||||
final long precision = 1L;
|
final long precision = 1L;
|
||||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
|
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, useAsyncLog);
|
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, useAsyncLog);
|
||||||
util = new DFSTestUtil.Builder().setName("TestAuditAllowed").
|
util = new DFSTestUtil.Builder().setName("TestAuditAllowed").
|
||||||
setNumFiles(20).build();
|
setNumFiles(20).build();
|
||||||
|
|
|
@ -22,6 +22,7 @@ import static org.junit.Assert.assertThat;
|
||||||
import static org.hamcrest.core.Is.is;
|
import static org.hamcrest.core.Is.is;
|
||||||
import static org.hamcrest.core.IsNot.not;
|
import static org.hamcrest.core.IsNot.not;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
|
@ -194,7 +195,7 @@ public class TestNameNodeRespectsBindHostKeys {
|
||||||
|
|
||||||
private static void setupSsl() throws Exception {
|
private static void setupSsl() throws Exception {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||||
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
|
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
|
||||||
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||||
conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.AppendTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
|
import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
|
||||||
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
|
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
|
||||||
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
|
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
|
||||||
|
@ -71,7 +72,7 @@ public class TestFSMainOperationsWebHdfs extends FSMainOperationsBaseTest {
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setupCluster() {
|
public static void setupCluster() {
|
||||||
final Configuration conf = new Configuration();
|
final Configuration conf = new Configuration();
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||||
|
|
|
@ -61,7 +61,7 @@ public class TestHftpDelegationToken {
|
||||||
DelegationTokenIdentifier.HDFS_DELEGATION_KIND, new Text(
|
DelegationTokenIdentifier.HDFS_DELEGATION_KIND, new Text(
|
||||||
"127.0.0.1:8020"));
|
"127.0.0.1:8020"));
|
||||||
Credentials cred = new Credentials();
|
Credentials cred = new Credentials();
|
||||||
cred.addToken(HftpFileSystem.TOKEN_KIND, token);
|
cred.addToken(WebHdfsConstants.HFTP_TOKEN_KIND, token);
|
||||||
ByteArrayOutputStream os = new ByteArrayOutputStream();
|
ByteArrayOutputStream os = new ByteArrayOutputStream();
|
||||||
cred.write(new DataOutputStream(os));
|
cred.write(new DataOutputStream(os));
|
||||||
|
|
||||||
|
@ -82,12 +82,12 @@ public class TestHftpDelegationToken {
|
||||||
new String[] { "bar" });
|
new String[] { "bar" });
|
||||||
|
|
||||||
TokenAspect<HftpFileSystem> tokenAspect = new TokenAspect<HftpFileSystem>(
|
TokenAspect<HftpFileSystem> tokenAspect = new TokenAspect<HftpFileSystem>(
|
||||||
fs, SecurityUtil.buildTokenService(uri), HftpFileSystem.TOKEN_KIND);
|
fs, SecurityUtil.buildTokenService(uri), WebHdfsConstants.HFTP_TOKEN_KIND);
|
||||||
|
|
||||||
tokenAspect.initDelegationToken(ugi);
|
tokenAspect.initDelegationToken(ugi);
|
||||||
tokenAspect.ensureTokenInitialized();
|
tokenAspect.ensureTokenInitialized();
|
||||||
|
|
||||||
Assert.assertSame(HftpFileSystem.TOKEN_KIND, fs.getRenewToken().getKind());
|
Assert.assertSame(WebHdfsConstants.HFTP_TOKEN_KIND, fs.getRenewToken().getKind());
|
||||||
|
|
||||||
Token<?> tok = (Token<?>) Whitebox.getInternalState(fs, "delegationToken");
|
Token<?> tok = (Token<?>) Whitebox.getInternalState(fs, "delegationToken");
|
||||||
Assert.assertNotSame("Not making a copy of the remote token", token, tok);
|
Assert.assertNotSame("Not making a copy of the remote token", token, tok);
|
||||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
|
@ -313,7 +314,7 @@ public class TestHftpFileSystem {
|
||||||
@Test
|
@Test
|
||||||
public void testHftpCustomDefaultPorts() throws IOException {
|
public void testHftpCustomDefaultPorts() throws IOException {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
|
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
|
||||||
|
|
||||||
URI uri = URI.create("hftp://localhost");
|
URI uri = URI.create("hftp://localhost");
|
||||||
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
|
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
|
||||||
|
@ -343,7 +344,7 @@ public class TestHftpFileSystem {
|
||||||
@Test
|
@Test
|
||||||
public void testHftpCustomUriPortWithCustomDefaultPorts() throws IOException {
|
public void testHftpCustomUriPortWithCustomDefaultPorts() throws IOException {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
|
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
|
||||||
|
|
||||||
URI uri = URI.create("hftp://localhost:789");
|
URI uri = URI.create("hftp://localhost:789");
|
||||||
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
|
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
|
||||||
|
@ -386,8 +387,8 @@ public class TestHftpFileSystem {
|
||||||
@Test
|
@Test
|
||||||
public void testHsftpCustomDefaultPorts() throws IOException {
|
public void testHsftpCustomDefaultPorts() throws IOException {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
|
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
|
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
|
||||||
|
|
||||||
URI uri = URI.create("hsftp://localhost");
|
URI uri = URI.create("hsftp://localhost");
|
||||||
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
|
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
|
||||||
|
@ -414,8 +415,8 @@ public class TestHftpFileSystem {
|
||||||
@Test
|
@Test
|
||||||
public void testHsftpCustomUriPortWithCustomDefaultPorts() throws IOException {
|
public void testHsftpCustomUriPortWithCustomDefaultPorts() throws IOException {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
|
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
|
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
|
||||||
|
|
||||||
URI uri = URI.create("hsftp://localhost:789");
|
URI uri = URI.create("hsftp://localhost:789");
|
||||||
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
|
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
|
||||||
|
|
|
@ -22,9 +22,7 @@ import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
|
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -36,8 +34,8 @@ public class TestHttpFSPorts {
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setupConfig() {
|
public void setupConfig() {
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
|
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
|
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
import org.apache.hadoop.http.HttpConfig;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
||||||
|
@ -52,7 +53,7 @@ public class TestHttpsFileSystem {
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setUp() throws Exception {
|
public static void setUp() throws Exception {
|
||||||
conf = new Configuration();
|
conf = new Configuration();
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||||
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
|
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||||
conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||||
|
|
|
@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.TestDFSClientRetries;
|
import org.apache.hadoop.hdfs.TestDFSClientRetries;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
||||||
|
@ -283,7 +284,8 @@ public class TestWebHDFS {
|
||||||
@Test(timeout=300000)
|
@Test(timeout=300000)
|
||||||
public void testNumericalUserName() throws Exception {
|
public void testNumericalUserName() throws Exception {
|
||||||
final Configuration conf = WebHdfsTestUtil.createConf();
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
||||||
conf.set(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, "^[A-Za-z0-9_][A-Za-z0-9._-]*[$]?$");
|
conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, "^[A-Za-z0-9_][A-Za-z0-9" +
|
||||||
|
"._-]*[$]?$");
|
||||||
final MiniDFSCluster cluster =
|
final MiniDFSCluster cluster =
|
||||||
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||||
try {
|
try {
|
||||||
|
@ -341,7 +343,7 @@ public class TestWebHDFS {
|
||||||
@Test
|
@Test
|
||||||
public void testWebHdfsEnabledByDefault() throws Exception {
|
public void testWebHdfsEnabledByDefault() throws Exception {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
Assert.assertTrue(conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
|
Assert.assertTrue(conf.getBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
|
||||||
false));
|
false));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,8 +42,8 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.AppendTestUtil;
|
import org.apache.hadoop.hdfs.AppendTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.web.resources.*;
|
import org.apache.hadoop.hdfs.web.resources.*;
|
||||||
import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam;
|
import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
@ -60,7 +60,7 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
|
||||||
private UserGroupInformation ugi;
|
private UserGroupInformation ugi;
|
||||||
|
|
||||||
static {
|
static {
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.web.resources.*;
|
import org.apache.hadoop.hdfs.web.resources.*;
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
import org.apache.hadoop.http.HttpConfig;
|
||||||
|
@ -204,7 +205,7 @@ public class TestWebHdfsTokens {
|
||||||
String keystoresDir;
|
String keystoresDir;
|
||||||
String sslConfDir;
|
String sslConfDir;
|
||||||
|
|
||||||
clusterConf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
clusterConf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||||
clusterConf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
|
clusterConf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
|
||||||
clusterConf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
clusterConf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||||
clusterConf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
clusterConf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||||
|
|
|
@ -27,11 +27,11 @@ import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
|
@ -71,7 +71,7 @@ public class TestWebHdfsWithMultipleNameNodes {
|
||||||
throws Exception {
|
throws Exception {
|
||||||
LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);
|
LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);
|
||||||
|
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||||
|
|
||||||
cluster = new MiniDFSCluster.Builder(conf)
|
cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
|
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
|
||||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
|
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
|
||||||
import org.apache.hadoop.hdfs.web.resources.Param;
|
import org.apache.hadoop.hdfs.web.resources.Param;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
@ -41,7 +42,7 @@ public class WebHdfsTestUtil {
|
||||||
|
|
||||||
public static Configuration createConf() {
|
public static Configuration createConf() {
|
||||||
final Configuration conf = new Configuration();
|
final Configuration conf = new Configuration();
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||||
return conf;
|
return conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,8 +38,8 @@ import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
|
import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
|
||||||
import org.apache.hadoop.hdfs.web.HftpFileSystem;
|
|
||||||
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
|
||||||
|
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
||||||
import org.apache.hadoop.io.DataOutputBuffer;
|
import org.apache.hadoop.io.DataOutputBuffer;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
@ -219,7 +219,7 @@ public class TestDelegationTokenRemoteFetcher {
|
||||||
"renewer"), new Text("realuser")).getBytes();
|
"renewer"), new Text("realuser")).getBytes();
|
||||||
Text service = new Text(serviceUri.toString());
|
Text service = new Text(serviceUri.toString());
|
||||||
return new Token<DelegationTokenIdentifier>(ident, pw,
|
return new Token<DelegationTokenIdentifier>(ident, pw,
|
||||||
HftpFileSystem.TOKEN_KIND, service);
|
WebHdfsConstants.HFTP_TOKEN_KIND, service);
|
||||||
}
|
}
|
||||||
|
|
||||||
private interface Handler {
|
private interface Handler {
|
||||||
|
|
Loading…
Reference in New Issue