HDFS-8052. Move WebHdfsFileSystem into hadoop-hdfs-client. Contributed by Haohui Mai.

This commit is contained in:
Haohui Mai 2015-04-23 17:33:05 -07:00
parent 8f6053ae51
commit b64bb2b9b4
44 changed files with 270 additions and 225 deletions

View File

@ -60,7 +60,7 @@ public class RetryUtils {
boolean defaultRetryPolicyEnabled,
String retryPolicySpecKey,
String defaultRetryPolicySpec,
final Class<? extends Exception> remoteExceptionToRetry
final String remoteExceptionToRetry
) {
final RetryPolicy multipleLinearRandomRetry =
@ -94,7 +94,7 @@ public class RetryUtils {
final RetryPolicy p;
if (e instanceof RemoteException) {
final RemoteException re = (RemoteException)e;
p = remoteExceptionToRetry.getName().equals(re.getClassName())?
p = remoteExceptionToRetry.equals(re.getClassName())?
multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL;
} else if (e instanceof IOException || e instanceof ServiceException) {
p = multipleLinearRandomRetry;

View File

@ -19,10 +19,17 @@ package org.apache.hadoop.hdfs;
import com.google.common.base.Joiner;
import com.google.common.collect.Maps;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -31,6 +38,7 @@ import java.io.UnsupportedEncodingException;
import java.net.InetSocketAddress;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
@ -39,6 +47,13 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICE
public class DFSUtilClient {
private static final Logger LOG = LoggerFactory.getLogger(
DFSUtilClient.class);
/**
* Converts a string to a byte array using UTF8 encoding.
*/
public static byte[] string2Bytes(String str) {
return str.getBytes(Charsets.UTF_8);
}
/**
* Converts a byte array to a string using UTF8 encoding.
*/
@ -113,6 +128,62 @@ public class DFSUtilClient {
}
}
/**
* Convert a LocatedBlocks to BlockLocations[]
* @param blocks a LocatedBlocks
* @return an array of BlockLocations
*/
public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
if (blocks == null) {
return new BlockLocation[0];
}
return locatedBlocks2Locations(blocks.getLocatedBlocks());
}
/**
* Convert a List<LocatedBlock> to BlockLocation[]
* @param blocks A List<LocatedBlock> to be converted
* @return converted array of BlockLocation
*/
public static BlockLocation[] locatedBlocks2Locations(
List<LocatedBlock> blocks) {
if (blocks == null) {
return new BlockLocation[0];
}
int nrBlocks = blocks.size();
BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
if (nrBlocks == 0) {
return blkLocations;
}
int idx = 0;
for (LocatedBlock blk : blocks) {
assert idx < nrBlocks : "Incorrect index";
DatanodeInfo[] locations = blk.getLocations();
String[] hosts = new String[locations.length];
String[] xferAddrs = new String[locations.length];
String[] racks = new String[locations.length];
for (int hCnt = 0; hCnt < locations.length; hCnt++) {
hosts[hCnt] = locations[hCnt].getHostName();
xferAddrs[hCnt] = locations[hCnt].getXferAddr();
NodeBase node = new NodeBase(xferAddrs[hCnt],
locations[hCnt].getNetworkLocation());
racks[hCnt] = node.toString();
}
DatanodeInfo[] cachedLocations = blk.getCachedLocations();
String[] cachedHosts = new String[cachedLocations.length];
for (int i=0; i<cachedLocations.length; i++) {
cachedHosts[i] = cachedLocations[i].getHostName();
}
blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
racks,
blk.getStartOffset(),
blk.getBlockSize(),
blk.isCorrupt());
idx++;
}
return blkLocations;
}
/**
* Decode a specific range of bytes of the given byte array to a string
* using UTF8.
@ -234,4 +305,42 @@ public class DFSUtilClient {
}
return value;
}
/**
* Whether the pathname is valid. Currently prohibits relative paths,
* names which contain a ":" or "//", or other non-canonical paths.
*/
public static boolean isValidName(String src) {
// Path must be absolute.
if (!src.startsWith(Path.SEPARATOR)) {
return false;
}
// Check for ".." "." ":" "/"
String[] components = StringUtils.split(src, '/');
for (int i = 0; i < components.length; i++) {
String element = components[i];
if (element.equals(".") ||
(element.contains(":")) ||
(element.contains("/"))) {
return false;
}
// ".." is allowed in path starting with /.reserved/.inodes
if (element.equals("..")) {
if (components.length > 4
&& components[1].equals(".reserved")
&& components[2].equals(".inodes")) {
continue;
}
return false;
}
// The string may start or end with a /, but not have
// "//" in the middle.
if (element.isEmpty() && i != components.length - 1 &&
i != 0) {
return false;
}
}
return true;
}
}

View File

@ -26,6 +26,7 @@ public interface HdfsClientConfigKeys {
long DFS_BLOCK_SIZE_DEFAULT = 128*1024*1024;
String DFS_REPLICATION_KEY = "dfs.replication";
short DFS_REPLICATION_DEFAULT = 3;
String DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern";
String DFS_WEBHDFS_USER_PATTERN_DEFAULT = "^[A-Za-z_][A-Za-z0-9._-]*[$]?$";
String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
"^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
@ -37,6 +38,10 @@ public interface HdfsClientConfigKeys {
int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 50470;
String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
String DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled";
boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
String DFS_NAMENODE_HTTP_PORT_KEY = "dfs.http.port";
String DFS_NAMENODE_HTTPS_PORT_KEY = "dfs.https.port";
/** dfs.client.retry configuration properties */
interface Retry {

View File

@ -38,4 +38,8 @@ public interface HdfsConstantsClient {
* URI.
*/
String HA_DT_SERVICE_PREFIX = "ha-";
// The name of the SafeModeException. FileSystem should retry if it sees
// the below exception in RPC
String SAFEMODE_EXCEPTION_CLASS_NAME = "org.apache.hadoop.hdfs.server" +
".namenode.SafeModeException";
}

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
@ -110,7 +110,7 @@ class JsonUtilClient {
final String localName = (String) m.get("pathSuffix");
final WebHdfsConstants.PathType type = WebHdfsConstants.PathType.valueOf((String) m.get("type"));
final byte[] symlink = type != WebHdfsConstants.PathType.SYMLINK? null
: DFSUtil.string2Bytes((String) m.get("symlink"));
: DFSUtilClient.string2Bytes((String) m.get("symlink"));
final long len = ((Number) m.get("length")).longValue();
final String owner = (String) m.get("owner");
@ -130,7 +130,8 @@ class JsonUtilClient {
HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication,
blockSize, mTime, aTime, permission, owner, group,
symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, null,
symlink, DFSUtilClient.string2Bytes(localName),
fileId, childrenNum, null,
storagePolicy);
}

View File

@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.web;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.io.Text;
import com.google.common.annotations.VisibleForTesting;
@ -39,10 +39,10 @@ public class SWebHdfsFileSystem extends WebHdfsFileSystem {
return WebHdfsConstants.SWEBHDFS_TOKEN_KIND;
}
@Override
@VisibleForTesting
@Override
public int getDefaultPort() {
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
return getConf().getInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
}
}

View File

@ -55,8 +55,8 @@ final class TokenAspect<T extends FileSystem & Renewable> {
@Override
public boolean handleKind(Text kind) {
return kind.equals(HftpFileSystem.TOKEN_KIND)
|| kind.equals(HsftpFileSystem.TOKEN_KIND)
return kind.equals(WebHdfsConstants.HFTP_TOKEN_KIND)
|| kind.equals(WebHdfsConstants.HSFTP_TOKEN_KIND)
|| kind.equals(WebHdfsConstants.WEBHDFS_TOKEN_KIND)
|| kind.equals(WebHdfsConstants.SWEBHDFS_TOKEN_KIND);
}
@ -87,10 +87,10 @@ final class TokenAspect<T extends FileSystem & Renewable> {
}
private static String getSchemeByKind(Text kind) {
if (kind.equals(HftpFileSystem.TOKEN_KIND)) {
return HftpFileSystem.SCHEME;
} else if (kind.equals(HsftpFileSystem.TOKEN_KIND)) {
return HsftpFileSystem.SCHEME;
if (kind.equals(WebHdfsConstants.HFTP_TOKEN_KIND)) {
return WebHdfsConstants.HFTP_SCHEME;
} else if (kind.equals(WebHdfsConstants.HSFTP_TOKEN_KIND)) {
return WebHdfsConstants.HSFTP_SCHEME;
} else if (kind.equals(WebHdfsConstants.WEBHDFS_TOKEN_KIND)) {
return WebHdfsConstants.WEBHDFS_SCHEME;
} else if (kind.equals(WebHdfsConstants.SWEBHDFS_TOKEN_KIND)) {

View File

@ -174,7 +174,7 @@ public class URLConnectionFactory {
/**
* Sets timeout parameters on the given URLConnection.
*
*
* @param connection
* URLConnection to set
* @param socketTimeout

View File

@ -23,6 +23,10 @@ import org.apache.hadoop.io.Text;
@InterfaceAudience.Private
public class WebHdfsConstants {
public static final String HFTP_SCHEME = "hftp";
public static final Text HFTP_TOKEN_KIND = new Text("HFTP delegation");
public static final Text HSFTP_TOKEN_KIND = new Text("HSFTP delegation");
public static final String HSFTP_SCHEME = "hsftp";
public static final String WEBHDFS_SCHEME = "webhdfs";
public static final String SWEBHDFS_SCHEME = "swebhdfs";
public static final Text WEBHDFS_TOKEN_KIND = new Text("WEBHDFS delegation");

View File

@ -56,14 +56,12 @@ import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HAUtilClient;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.web.resources.*;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op;
import org.apache.hadoop.io.Text;
@ -145,8 +143,8 @@ public class WebHdfsFileSystem extends FileSystem
setConf(conf);
/** set user pattern based on configuration file */
UserParam.setUserPattern(conf.get(
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf);
@ -172,7 +170,7 @@ public class WebHdfsFileSystem extends FileSystem
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_DEFAULT,
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_KEY,
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_DEFAULT,
SafeModeException.class);
HdfsConstantsClient.SAFEMODE_EXCEPTION_CLASS_NAME);
} else {
int maxFailoverAttempts = conf.getInt(
@ -209,8 +207,9 @@ public class WebHdfsFileSystem extends FileSystem
/** Is WebHDFS enabled in conf? */
public static boolean isEnabled(final Configuration conf, final Log log) {
final boolean b = conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT);
final boolean b = conf.getBoolean(
HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT);
return b;
}
@ -230,7 +229,7 @@ public class WebHdfsFileSystem extends FileSystem
if(LOG.isDebugEnabled()) {
LOG.debug("Using UGI token: " + token);
}
canRefreshDelegationToken = false;
canRefreshDelegationToken = false;
} else {
token = getDelegationToken(null);
if (token != null) {
@ -263,15 +262,15 @@ public class WebHdfsFileSystem extends FileSystem
@Override
@VisibleForTesting
public int getDefaultPort() {
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
return getConf().getInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
}
@Override
public URI getUri() {
return this.uri;
}
@Override
protected URI canonicalizeUri(URI uri) {
return NetUtils.getCanonicalUri(uri, getDefaultPort());
@ -295,8 +294,8 @@ public class WebHdfsFileSystem extends FileSystem
@Override
public synchronized void setWorkingDirectory(final Path dir) {
String result = makeAbsolute(dir).toUri().getPath();
if (!DFSUtil.isValidName(result)) {
throw new IllegalArgumentException("Invalid DFS directory name " +
if (!DFSUtilClient.isValidName(result)) {
throw new IllegalArgumentException("Invalid DFS directory name " +
result);
}
workingDir = makeAbsolute(dir);
@ -375,10 +374,10 @@ public class WebHdfsFileSystem extends FileSystem
/**
* Covert an exception to an IOException.
*
*
* For a non-IOException, wrap it with IOException.
* For a RemoteException, unwrap it.
* For an IOException which is not a RemoteException, return it.
* For an IOException which is not a RemoteException, return it.
*/
private static IOException toIOException(Exception e) {
if (!(e instanceof IOException)) {
@ -421,9 +420,9 @@ public class WebHdfsFileSystem extends FileSystem
}
return url;
}
Param<?,?>[] getAuthParameters(final HttpOpParam.Op op) throws IOException {
List<Param<?,?>> authParams = Lists.newArrayList();
List<Param<?,?>> authParams = Lists.newArrayList();
// Skip adding delegation token for token operations because these
// operations require authentication.
Token<?> token = null;
@ -502,11 +501,11 @@ public class WebHdfsFileSystem extends FileSystem
/**
* Two-step requests redirected to a DN
*
*
* Create/Append:
* Step 1) Submit a Http request with neither auto-redirect nor data.
* Step 1) Submit a Http request with neither auto-redirect nor data.
* Step 2) Submit another Http request with the URL from the Location header with data.
*
*
* The reason of having two-step create/append is for preventing clients to
* send out the data before the redirect. This issue is addressed by the
* "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
@ -514,7 +513,7 @@ public class WebHdfsFileSystem extends FileSystem
* and Java 6 http client), which do not correctly implement "Expect:
* 100-continue". The two-step create/append is a temporary workaround for
* the software library bugs.
*
*
* Open/Checksum
* Also implements two-step connects for other operations redirected to
* a DN such as open and checksum
@ -523,7 +522,7 @@ public class WebHdfsFileSystem extends FileSystem
//redirect hostname and port
String redirectHost = null;
// resolve redirects for a DN operation unless already resolved
if (op.getRedirect() && !redirected) {
final HttpOpParam.Op redirectOp =
@ -553,7 +552,7 @@ public class WebHdfsFileSystem extends FileSystem
}
}
throw ioe;
}
}
}
private HttpURLConnection connect(final HttpOpParam.Op op, final URL url)
@ -565,7 +564,7 @@ public class WebHdfsFileSystem extends FileSystem
conn.setInstanceFollowRedirects(false);
switch (op.getType()) {
// if not sending a message body for a POST or PUT operation, need
// to ensure the server/proxy knows this
// to ensure the server/proxy knows this
case POST:
case PUT: {
conn.setDoOutput(true);
@ -673,21 +672,21 @@ public class WebHdfsFileSystem extends FileSystem
abstract class AbstractFsPathRunner<T> extends AbstractRunner<T> {
private final Path fspath;
private final Param<?,?>[] parameters;
AbstractFsPathRunner(final HttpOpParam.Op op, final Path fspath,
Param<?,?>... parameters) {
super(op, false);
this.fspath = fspath;
this.parameters = parameters;
}
AbstractFsPathRunner(final HttpOpParam.Op op, Param<?,?>[] parameters,
final Path fspath) {
super(op, false);
this.fspath = fspath;
this.parameters = parameters;
}
@Override
protected URL getUrl() throws IOException {
if (excludeDatanodes.getValue() != null) {
@ -708,7 +707,7 @@ public class WebHdfsFileSystem extends FileSystem
FsPathRunner(Op op, Path fspath, Param<?,?>... parameters) {
super(op, fspath, parameters);
}
@Override
Void getResponse(HttpURLConnection conn) throws IOException {
return null;
@ -723,12 +722,12 @@ public class WebHdfsFileSystem extends FileSystem
Param<?,?>... parameters) {
super(op, fspath, parameters);
}
FsPathResponseRunner(final HttpOpParam.Op op, Param<?,?>[] parameters,
final Path fspath) {
super(op, parameters, fspath);
}
@Override
final T getResponse(HttpURLConnection conn) throws IOException {
try {
@ -751,7 +750,7 @@ public class WebHdfsFileSystem extends FileSystem
conn.disconnect();
}
}
abstract T decodeResponse(Map<?,?> json) throws IOException;
}
@ -762,7 +761,7 @@ public class WebHdfsFileSystem extends FileSystem
FsPathBooleanRunner(Op op, Path fspath, Param<?,?>... parameters) {
super(op, fspath, parameters);
}
@Override
Boolean decodeResponse(Map<?,?> json) throws IOException {
return (Boolean)json.get("boolean");
@ -774,13 +773,13 @@ public class WebHdfsFileSystem extends FileSystem
*/
class FsPathOutputStreamRunner extends AbstractFsPathRunner<FSDataOutputStream> {
private final int bufferSize;
FsPathOutputStreamRunner(Op op, Path fspath, int bufferSize,
Param<?,?>... parameters) {
super(op, fspath, parameters);
this.bufferSize = bufferSize;
}
@Override
FSDataOutputStream getResponse(final HttpURLConnection conn)
throws IOException {
@ -812,7 +811,7 @@ public class WebHdfsFileSystem extends FileSystem
return conn;
}
}
/**
* Used by open() which tracks the resolved url itself
*/
@ -926,26 +925,26 @@ public class WebHdfsFileSystem extends FileSystem
new RenameOptionSetParam(options)
).run();
}
@Override
public void setXAttr(Path p, String name, byte[] value,
public void setXAttr(Path p, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.SETXATTR;
if (value != null) {
new FsPathRunner(op, p, new XAttrNameParam(name), new XAttrValueParam(
XAttrCodec.encodeValue(value, XAttrCodec.HEX)),
XAttrCodec.encodeValue(value, XAttrCodec.HEX)),
new XAttrSetFlagParam(flag)).run();
} else {
new FsPathRunner(op, p, new XAttrNameParam(name),
new FsPathRunner(op, p, new XAttrNameParam(name),
new XAttrSetFlagParam(flag)).run();
}
}
@Override
public byte[] getXAttr(Path p, final String name) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
return new FsPathResponseRunner<byte[]>(op, p, new XAttrNameParam(name),
return new FsPathResponseRunner<byte[]>(op, p, new XAttrNameParam(name),
new XAttrEncodingParam(XAttrCodec.HEX)) {
@Override
byte[] decodeResponse(Map<?, ?> json) throws IOException {
@ -953,11 +952,11 @@ public class WebHdfsFileSystem extends FileSystem
}
}.run();
}
@Override
public Map<String, byte[]> getXAttrs(Path p) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
return new FsPathResponseRunner<Map<String, byte[]>>(op, p,
return new FsPathResponseRunner<Map<String, byte[]>>(op, p,
new XAttrEncodingParam(XAttrCodec.HEX)) {
@Override
Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
@ -965,18 +964,18 @@ public class WebHdfsFileSystem extends FileSystem
}
}.run();
}
@Override
public Map<String, byte[]> getXAttrs(Path p, final List<String> names)
public Map<String, byte[]> getXAttrs(Path p, final List<String> names)
throws IOException {
Preconditions.checkArgument(names != null && !names.isEmpty(),
Preconditions.checkArgument(names != null && !names.isEmpty(),
"XAttr names cannot be null or empty.");
Param<?,?>[] parameters = new Param<?,?>[names.size() + 1];
for (int i = 0; i < parameters.length - 1; i++) {
parameters[i] = new XAttrNameParam(names.get(i));
}
parameters[parameters.length - 1] = new XAttrEncodingParam(XAttrCodec.HEX);
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
return new FsPathResponseRunner<Map<String, byte[]>>(op, parameters, p) {
@Override
@ -985,7 +984,7 @@ public class WebHdfsFileSystem extends FileSystem
}
}.run();
}
@Override
public List<String> listXAttrs(Path p) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.LISTXATTRS;
@ -1065,7 +1064,7 @@ public class WebHdfsFileSystem extends FileSystem
}
@Override
public Path createSnapshot(final Path path, final String snapshotName)
public Path createSnapshot(final Path path, final String snapshotName)
throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.CREATESNAPSHOT;
@ -1119,14 +1118,14 @@ public class WebHdfsFileSystem extends FileSystem
@Override
public long getDefaultBlockSize() {
return getConf().getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
return getConf().getLongBytes(HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY,
HdfsClientConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
}
@Override
public short getDefaultReplication() {
return (short)getConf().getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
return (short)getConf().getInt(HdfsClientConfigKeys.DFS_REPLICATION_KEY,
HdfsClientConfigKeys.DFS_REPLICATION_DEFAULT);
}
@Override
@ -1236,7 +1235,7 @@ public class WebHdfsFileSystem extends FileSystem
final URL offsetUrl = offset == 0L? url
: new URL(url + "&" + new OffsetParam(offset));
return new URLRunner(GetOpParam.Op.OPEN, offsetUrl, resolved).run();
}
}
}
private static final String OFFSET_PARAM_PREFIX = OffsetParam.NAME + "=";
@ -1367,7 +1366,7 @@ public class WebHdfsFileSystem extends FileSystem
new TokenArgumentParam(token.encodeToUrlString())
).run();
}
@Override
public BlockLocation[] getFileBlockLocations(final FileStatus status,
final long offset, final long length) throws IOException {
@ -1378,7 +1377,7 @@ public class WebHdfsFileSystem extends FileSystem
}
@Override
public BlockLocation[] getFileBlockLocations(final Path p,
public BlockLocation[] getFileBlockLocations(final Path p,
final long offset, final long length) throws IOException {
statistics.incrementReadOps(1);
@ -1387,7 +1386,7 @@ public class WebHdfsFileSystem extends FileSystem
new OffsetParam(offset), new LengthParam(length)) {
@Override
BlockLocation[] decodeResponse(Map<?,?> json) throws IOException {
return DFSUtil.locatedBlocks2Locations(
return DFSUtilClient.locatedBlocks2Locations(
JsonUtilClient.toLocatedBlocks(json));
}
}.run();
@ -1416,7 +1415,7 @@ public class WebHdfsFileSystem extends FileSystem
public MD5MD5CRC32FileChecksum getFileChecksum(final Path p
) throws IOException {
statistics.incrementReadOps(1);
final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM;
return new FsPathResponseRunner<MD5MD5CRC32FileChecksum>(op, p) {
@Override

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.lib.wsrs.EnumSetParam;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@ -369,7 +370,7 @@ public class HttpFSFileSystem extends FileSystem
*/
@Override
protected int getDefaultPort() {
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
return getConf().getInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
}

View File

@ -149,6 +149,8 @@ Release 2.8.0 - UNRELEASED
HDFS-8215. Refactor NamenodeFsck#check method. (Takanobu Asanuma
via szetszwo)
HDFS-8052. Move WebHdfsFileSystem into hadoop-hdfs-client. (wheat9)
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -48,7 +48,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.token.Token;
import org.apache.htrace.Sampler;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
@ -296,7 +295,7 @@ class BlockStorageLocationUtil {
List<LocatedBlock> blocks,
Map<LocatedBlock, List<VolumeId>> blockVolumeIds) throws IOException {
// Construct the final return value of VolumeBlockLocation[]
BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks);
BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
List<BlockStorageLocation> volumeBlockLocs =
new ArrayList<BlockStorageLocation>(locations.length);
for (int i = 0; i < locations.length; i++) {

View File

@ -919,7 +919,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
TraceScope scope = getPathTraceScope("getBlockLocations", src);
try {
LocatedBlocks blocks = getLocatedBlocks(src, start, length);
BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks);
BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
for (int i = 0; i < locations.length; i++) {
hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));

View File

@ -102,7 +102,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
"dfs.namenode.path.based.cache.block.map.allocation.percent";
public static final float DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT = 0.25f;
public static final String DFS_NAMENODE_HTTP_PORT_KEY = "dfs.http.port";
public static final int DFS_NAMENODE_HTTP_PORT_DEFAULT =
HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
public static final String DFS_NAMENODE_HTTP_ADDRESS_KEY =
@ -163,9 +162,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
*/
public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT =
"org.apache.hadoop.hdfs.web.AuthFilter".toString();
public static final String DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled";
public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
public static final String DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern";
@Deprecated
public static final String DFS_WEBHDFS_USER_PATTERN_KEY =
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY;
@Deprecated
public static final String DFS_WEBHDFS_USER_PATTERN_DEFAULT =
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT;
public static final String DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";
@ -305,7 +305,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
//Following keys have no defaults
public static final String DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir";
public static final String DFS_NAMENODE_HTTPS_PORT_KEY = "dfs.https.port";
public static final int DFS_NAMENODE_HTTPS_PORT_DEFAULT =
HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
public static final String DFS_NAMENODE_HTTPS_ADDRESS_KEY =

View File

@ -229,37 +229,7 @@ public class DFSUtil {
* names which contain a ":" or "//", or other non-canonical paths.
*/
public static boolean isValidName(String src) {
// Path must be absolute.
if (!src.startsWith(Path.SEPARATOR)) {
return false;
}
// Check for ".." "." ":" "/"
String[] components = StringUtils.split(src, '/');
for (int i = 0; i < components.length; i++) {
String element = components[i];
if (element.equals(".") ||
(element.indexOf(":") >= 0) ||
(element.indexOf("/") >= 0)) {
return false;
}
// ".." is allowed in path starting with /.reserved/.inodes
if (element.equals("..")) {
if (components.length > 4
&& components[1].equals(FSDirectory.DOT_RESERVED_STRING)
&& components[2].equals(FSDirectory.DOT_INODES_STRING)) {
continue;
}
return false;
}
// The string may start or end with a /, but not have
// "//" in the middle.
if (element.isEmpty() && i != components.length - 1 &&
i != 0) {
return false;
}
}
return true;
return DFSUtilClient.isValidName(src);
}
/**
@ -330,7 +300,7 @@ public class DFSUtil {
* Converts a string to a byte array using UTF8 encoding.
*/
public static byte[] string2Bytes(String str) {
return str.getBytes(Charsets.UTF_8);
return DFSUtilClient.string2Bytes(str);
}
/**
@ -476,61 +446,6 @@ public class DFSUtil {
}
return result;
}
/**
* Convert a LocatedBlocks to BlockLocations[]
* @param blocks a LocatedBlocks
* @return an array of BlockLocations
*/
public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
if (blocks == null) {
return new BlockLocation[0];
}
return locatedBlocks2Locations(blocks.getLocatedBlocks());
}
/**
* Convert a List<LocatedBlock> to BlockLocation[]
* @param blocks A List<LocatedBlock> to be converted
* @return converted array of BlockLocation
*/
public static BlockLocation[] locatedBlocks2Locations(List<LocatedBlock> blocks) {
if (blocks == null) {
return new BlockLocation[0];
}
int nrBlocks = blocks.size();
BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
if (nrBlocks == 0) {
return blkLocations;
}
int idx = 0;
for (LocatedBlock blk : blocks) {
assert idx < nrBlocks : "Incorrect index";
DatanodeInfo[] locations = blk.getLocations();
String[] hosts = new String[locations.length];
String[] xferAddrs = new String[locations.length];
String[] racks = new String[locations.length];
for (int hCnt = 0; hCnt < locations.length; hCnt++) {
hosts[hCnt] = locations[hCnt].getHostName();
xferAddrs[hCnt] = locations[hCnt].getXferAddr();
NodeBase node = new NodeBase(xferAddrs[hCnt],
locations[hCnt].getNetworkLocation());
racks[hCnt] = node.toString();
}
DatanodeInfo[] cachedLocations = blk.getCachedLocations();
String[] cachedHosts = new String[cachedLocations.length];
for (int i=0; i<cachedLocations.length; i++) {
cachedHosts[i] = cachedLocations[i].getHostName();
}
blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
racks,
blk.getStartOffset(),
blk.getBlockSize(),
blk.isCorrupt());
idx++;
}
return blkLocations;
}
/**
* Return configuration key of format key.suffix1.suffix2...suffixN

View File

@ -404,7 +404,7 @@ public class NameNodeProxies {
HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT,
HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY,
HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT,
SafeModeException.class);
SafeModeException.class.getName());
final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(

View File

@ -25,7 +25,7 @@ import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
/**
* Interface that represents the over the wire information
@ -78,6 +78,6 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
isSymlink() ? new Path(getSymlink()) : null,
(getFullPath(path)).makeQualified(
defaultUri, null), // fully-qualify path
DFSUtil.locatedBlocks2Locations(getBlockLocations()));
DFSUtilClient.locatedBlocks2Locations(getBlockLocations()));
}
}

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;

View File

@ -46,8 +46,6 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretMan
import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
import org.apache.hadoop.hdfs.web.HftpFileSystem;
import org.apache.hadoop.hdfs.web.HsftpFileSystem;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
@ -243,7 +241,7 @@ public class DelegationTokenFetcher {
dis = new DataInputStream(in);
ts.readFields(dis);
for (Token<?> token : ts.getAllTokens()) {
token.setKind(isHttps ? HsftpFileSystem.TOKEN_KIND : HftpFileSystem.TOKEN_KIND);
token.setKind(isHttps ? WebHdfsConstants.HSFTP_TOKEN_KIND : WebHdfsConstants.HFTP_TOKEN_KIND);
SecurityUtil.setTokenService(token, serviceAddr);
}
return ts;

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
@ -79,7 +80,6 @@ import org.xml.sax.helpers.XMLReaderFactory;
@InterfaceStability.Evolving
public class HftpFileSystem extends FileSystem
implements DelegationTokenRenewer.Renewable, TokenAspect.TokenManagementDelegator {
public static final String SCHEME = "hftp";
static {
HttpURLConnection.setFollowRedirects(true);
@ -87,8 +87,6 @@ public class HftpFileSystem extends FileSystem
URLConnectionFactory connectionFactory;
public static final Text TOKEN_KIND = new Text("HFTP delegation");
protected UserGroupInformation ugi;
private URI hftpURI;
@ -123,7 +121,7 @@ public class HftpFileSystem extends FileSystem
@Override
protected int getDefaultPort() {
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
return getConf().getInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
}
@ -168,7 +166,7 @@ public class HftpFileSystem extends FileSystem
*/
@Override
public String getScheme() {
return SCHEME;
return WebHdfsConstants.HFTP_SCHEME;
}
/**
@ -176,7 +174,7 @@ public class HftpFileSystem extends FileSystem
* be overridden by HsFtpFileSystem.
*/
protected void initTokenAspect() {
tokenAspect = new TokenAspect<HftpFileSystem>(this, tokenServiceName, TOKEN_KIND);
tokenAspect = new TokenAspect<HftpFileSystem>(this, tokenServiceName, WebHdfsConstants.HFTP_TOKEN_KIND);
}
@Override

View File

@ -21,7 +21,7 @@ package org.apache.hadoop.hdfs.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
/**
* An implementation of a protocol for accessing filesystems over HTTPS. The
@ -34,8 +34,6 @@ import org.apache.hadoop.io.Text;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class HsftpFileSystem extends HftpFileSystem {
public static final Text TOKEN_KIND = new Text("HSFTP delegation");
public static final String SCHEME = "hsftp";
/**
* Return the protocol scheme for the FileSystem.
@ -45,7 +43,7 @@ public class HsftpFileSystem extends HftpFileSystem {
*/
@Override
public String getScheme() {
return SCHEME;
return WebHdfsConstants.HSFTP_SCHEME;
}
/**
@ -59,12 +57,12 @@ public class HsftpFileSystem extends HftpFileSystem {
@Override
protected void initTokenAspect() {
tokenAspect = new TokenAspect<HsftpFileSystem>(this, tokenServiceName,
TOKEN_KIND);
WebHdfsConstants.HSFTP_TOKEN_KIND);
}
@Override
protected int getDefaultPort() {
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
return getConf().getInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
}
}

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
@ -86,7 +87,7 @@ abstract public class TestSymlinkHdfs extends SymlinkBaseTest {
@BeforeClass
public static void beforeClassSetup() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.set(FsPermission.UMASK_LABEL, "000");
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 0);
cluster = new MiniDFSCluster.Builder(conf).build();

View File

@ -112,7 +112,7 @@ public class TestDFSUtil {
List<LocatedBlock> ls = Arrays.asList(l1, l2);
LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null);
BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs);
BlockLocation[] bs = DFSUtilClient.locatedBlocks2Locations(lbs);
assertTrue("expected 2 blocks but got " + bs.length,
bs.length == 2);
@ -128,7 +128,7 @@ public class TestDFSUtil {
corruptCount == 1);
// test an empty location
bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
bs = DFSUtilClient.locatedBlocks2Locations(new LocatedBlocks());
assertEquals(0, bs.length);
}

View File

@ -64,6 +64,7 @@ import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.VolumeId;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
@ -500,7 +501,7 @@ public class TestDistributedFileSystem {
RAN.setSeed(seed);
final Configuration conf = getTestConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs = cluster.getFileSystem();

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
@ -792,7 +793,7 @@ public class TestQuota {
Configuration conf = new HdfsConfiguration();
final int BLOCK_SIZE = 6 * 1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
@ -854,7 +855,7 @@ public class TestQuota {
Configuration conf = new HdfsConfiguration();
final int BLOCK_SIZE = 6 * 1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
// Make it relinquish locks. When run serially, the result should
// be identical.
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@ -68,7 +69,7 @@ public class TestDelegationToken {
@Before
public void setUp() throws Exception {
config = new HdfsConfiguration();
config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
config.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
config.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
@ -98,7 +99,7 @@ public class TestDelegationTokenForProxyUser {
@BeforeClass
public static void setUp() throws Exception {
config = new HdfsConfiguration();
config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
config.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
config.setLong(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
config.setLong(

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.web.HftpFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
@ -116,7 +117,7 @@ public class TestAuditLogs {
final long precision = 1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, useAsyncLog);
util = new DFSTestUtil.Builder().setName("TestAuditAllowed").
setNumFiles(20).build();

View File

@ -22,6 +22,7 @@ import static org.junit.Assert.assertThat;
import static org.hamcrest.core.Is.is;
import static org.hamcrest.core.IsNot.not;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.junit.Test;
import org.apache.hadoop.fs.FileUtil;
@ -194,7 +195,7 @@ public class TestNameNodeRespectsBindHostKeys {
private static void setupSsl() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
@ -71,7 +72,7 @@ public class TestFSMainOperationsWebHdfs extends FSMainOperationsBaseTest {
@BeforeClass
public static void setupCluster() {
final Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();

View File

@ -61,7 +61,7 @@ public class TestHftpDelegationToken {
DelegationTokenIdentifier.HDFS_DELEGATION_KIND, new Text(
"127.0.0.1:8020"));
Credentials cred = new Credentials();
cred.addToken(HftpFileSystem.TOKEN_KIND, token);
cred.addToken(WebHdfsConstants.HFTP_TOKEN_KIND, token);
ByteArrayOutputStream os = new ByteArrayOutputStream();
cred.write(new DataOutputStream(os));
@ -82,12 +82,12 @@ public class TestHftpDelegationToken {
new String[] { "bar" });
TokenAspect<HftpFileSystem> tokenAspect = new TokenAspect<HftpFileSystem>(
fs, SecurityUtil.buildTokenService(uri), HftpFileSystem.TOKEN_KIND);
fs, SecurityUtil.buildTokenService(uri), WebHdfsConstants.HFTP_TOKEN_KIND);
tokenAspect.initDelegationToken(ugi);
tokenAspect.ensureTokenInitialized();
Assert.assertSame(HftpFileSystem.TOKEN_KIND, fs.getRenewToken().getKind());
Assert.assertSame(WebHdfsConstants.HFTP_TOKEN_KIND, fs.getRenewToken().getKind());
Token<?> tok = (Token<?>) Whitebox.getInternalState(fs, "delegationToken");
Assert.assertNotSame("Not making a copy of the remote token", token, tok);

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@ -313,7 +314,7 @@ public class TestHftpFileSystem {
@Test
public void testHftpCustomDefaultPorts() throws IOException {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
URI uri = URI.create("hftp://localhost");
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
@ -343,7 +344,7 @@ public class TestHftpFileSystem {
@Test
public void testHftpCustomUriPortWithCustomDefaultPorts() throws IOException {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
URI uri = URI.create("hftp://localhost:789");
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
@ -386,8 +387,8 @@ public class TestHftpFileSystem {
@Test
public void testHsftpCustomDefaultPorts() throws IOException {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
URI uri = URI.create("hsftp://localhost");
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
@ -414,8 +415,8 @@ public class TestHftpFileSystem {
@Test
public void testHsftpCustomUriPortWithCustomDefaultPorts() throws IOException {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
URI uri = URI.create("hsftp://localhost:789");
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);

View File

@ -22,9 +22,7 @@ import static org.junit.Assert.assertEquals;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.junit.Before;
import org.junit.Test;
@ -36,8 +34,8 @@ public class TestHttpFSPorts {
@Before
public void setupConfig() {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
}
@Test

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
@ -52,7 +53,7 @@ public class TestHttpsFileSystem {
@BeforeClass
public static void setUp() throws Exception {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");

View File

@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDFSClientRetries;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@ -283,7 +284,8 @@ public class TestWebHDFS {
@Test(timeout=300000)
public void testNumericalUserName() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();
conf.set(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, "^[A-Za-z0-9_][A-Za-z0-9._-]*[$]?$");
conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, "^[A-Za-z0-9_][A-Za-z0-9" +
"._-]*[$]?$");
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
@ -341,7 +343,7 @@ public class TestWebHDFS {
@Test
public void testWebHdfsEnabledByDefault() throws Exception {
Configuration conf = new HdfsConfiguration();
Assert.assertTrue(conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
Assert.assertTrue(conf.getBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
false));
}

View File

@ -42,8 +42,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.web.resources.*;
import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam;
import org.apache.hadoop.io.IOUtils;
@ -60,7 +60,7 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
private UserGroupInformation ugi;
static {
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.web.resources.*;
import org.apache.hadoop.http.HttpConfig;
@ -204,7 +205,7 @@ public class TestWebHdfsTokens {
String keystoresDir;
String sslConfDir;
clusterConf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
clusterConf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
clusterConf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
clusterConf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
clusterConf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");

View File

@ -27,11 +27,11 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
@ -71,7 +71,7 @@ public class TestWebHdfsWithMultipleNameNodes {
throws Exception {
LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
import org.apache.hadoop.hdfs.web.resources.Param;
import org.apache.hadoop.security.UserGroupInformation;
@ -41,7 +42,7 @@ public class WebHdfsTestUtil {
public static Configuration createConf() {
final Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
return conf;
}

View File

@ -38,8 +38,8 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
import org.apache.hadoop.hdfs.web.HftpFileSystem;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
@ -219,7 +219,7 @@ public class TestDelegationTokenRemoteFetcher {
"renewer"), new Text("realuser")).getBytes();
Text service = new Text(serviceUri.toString());
return new Token<DelegationTokenIdentifier>(ident, pw,
HftpFileSystem.TOKEN_KIND, service);
WebHdfsConstants.HFTP_TOKEN_KIND, service);
}
private interface Handler {