HDFS-8052. Move WebHdfsFileSystem into hadoop-hdfs-client. Contributed by Haohui Mai.

This commit is contained in:
Haohui Mai 2015-04-23 17:33:05 -07:00
parent 0b3f8957a8
commit bcf89ddc7d
22 changed files with 198 additions and 163 deletions

View File

@ -60,7 +60,7 @@ public class RetryUtils {
boolean defaultRetryPolicyEnabled,
String retryPolicySpecKey,
String defaultRetryPolicySpec,
final Class<? extends Exception> remoteExceptionToRetry
final String remoteExceptionToRetry
) {
final RetryPolicy multipleLinearRandomRetry =
@ -94,7 +94,7 @@ public class RetryUtils {
final RetryPolicy p;
if (e instanceof RemoteException) {
final RemoteException re = (RemoteException)e;
p = remoteExceptionToRetry.getName().equals(re.getClassName())?
p = remoteExceptionToRetry.equals(re.getClassName())?
multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL;
} else if (e instanceof IOException || e instanceof ServiceException) {
p = multipleLinearRandomRetry;

View File

@ -19,10 +19,17 @@ package org.apache.hadoop.hdfs;
import com.google.common.base.Joiner;
import com.google.common.collect.Maps;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -31,6 +38,7 @@ import java.io.UnsupportedEncodingException;
import java.net.InetSocketAddress;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
@ -39,6 +47,13 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICE
public class DFSUtilClient {
private static final Logger LOG = LoggerFactory.getLogger(
DFSUtilClient.class);
/**
* Converts a string to a byte array using UTF8 encoding.
*/
public static byte[] string2Bytes(String str) {
return str.getBytes(Charsets.UTF_8);
}
/**
* Converts a byte array to a string using UTF8 encoding.
*/
@ -113,6 +128,62 @@ public class DFSUtilClient {
}
}
/**
* Convert a LocatedBlocks to BlockLocations[]
* @param blocks a LocatedBlocks
* @return an array of BlockLocations
*/
public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
if (blocks == null) {
return new BlockLocation[0];
}
return locatedBlocks2Locations(blocks.getLocatedBlocks());
}
/**
* Convert a List<LocatedBlock> to BlockLocation[]
* @param blocks A List<LocatedBlock> to be converted
* @return converted array of BlockLocation
*/
public static BlockLocation[] locatedBlocks2Locations(
List<LocatedBlock> blocks) {
if (blocks == null) {
return new BlockLocation[0];
}
int nrBlocks = blocks.size();
BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
if (nrBlocks == 0) {
return blkLocations;
}
int idx = 0;
for (LocatedBlock blk : blocks) {
assert idx < nrBlocks : "Incorrect index";
DatanodeInfo[] locations = blk.getLocations();
String[] hosts = new String[locations.length];
String[] xferAddrs = new String[locations.length];
String[] racks = new String[locations.length];
for (int hCnt = 0; hCnt < locations.length; hCnt++) {
hosts[hCnt] = locations[hCnt].getHostName();
xferAddrs[hCnt] = locations[hCnt].getXferAddr();
NodeBase node = new NodeBase(xferAddrs[hCnt],
locations[hCnt].getNetworkLocation());
racks[hCnt] = node.toString();
}
DatanodeInfo[] cachedLocations = blk.getCachedLocations();
String[] cachedHosts = new String[cachedLocations.length];
for (int i=0; i<cachedLocations.length; i++) {
cachedHosts[i] = cachedLocations[i].getHostName();
}
blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
racks,
blk.getStartOffset(),
blk.getBlockSize(),
blk.isCorrupt());
idx++;
}
return blkLocations;
}
/**
* Decode a specific range of bytes of the given byte array to a string
* using UTF8.
@ -234,4 +305,42 @@ public class DFSUtilClient {
}
return value;
}
/**
* Whether the pathname is valid. Currently prohibits relative paths,
* names which contain a ":" or "//", or other non-canonical paths.
*/
public static boolean isValidName(String src) {
// Path must be absolute.
if (!src.startsWith(Path.SEPARATOR)) {
return false;
}
// Check for ".." "." ":" "/"
String[] components = StringUtils.split(src, '/');
for (int i = 0; i < components.length; i++) {
String element = components[i];
if (element.equals(".") ||
(element.contains(":")) ||
(element.contains("/"))) {
return false;
}
// ".." is allowed in path starting with /.reserved/.inodes
if (element.equals("..")) {
if (components.length > 4
&& components[1].equals(".reserved")
&& components[2].equals(".inodes")) {
continue;
}
return false;
}
// The string may start or end with a /, but not have
// "//" in the middle.
if (element.isEmpty() && i != components.length - 1 &&
i != 0) {
return false;
}
}
return true;
}
}

View File

@ -26,6 +26,7 @@ public interface HdfsClientConfigKeys {
long DFS_BLOCK_SIZE_DEFAULT = 128*1024*1024;
String DFS_REPLICATION_KEY = "dfs.replication";
short DFS_REPLICATION_DEFAULT = 3;
String DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern";
String DFS_WEBHDFS_USER_PATTERN_DEFAULT = "^[A-Za-z_][A-Za-z0-9._-]*[$]?$";
String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
"^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";

View File

@ -38,4 +38,8 @@ public interface HdfsConstantsClient {
* URI.
*/
String HA_DT_SERVICE_PREFIX = "ha-";
// The name of the SafeModeException. FileSystem should retry if it sees
// the below exception in RPC
String SAFEMODE_EXCEPTION_CLASS_NAME = "org.apache.hadoop.hdfs.server" +
".namenode.SafeModeException";
}

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
@ -110,7 +110,7 @@ class JsonUtilClient {
final String localName = (String) m.get("pathSuffix");
final WebHdfsConstants.PathType type = WebHdfsConstants.PathType.valueOf((String) m.get("type"));
final byte[] symlink = type != WebHdfsConstants.PathType.SYMLINK? null
: DFSUtil.string2Bytes((String) m.get("symlink"));
: DFSUtilClient.string2Bytes((String) m.get("symlink"));
final long len = ((Number) m.get("length")).longValue();
final String owner = (String) m.get("owner");
@ -130,7 +130,8 @@ class JsonUtilClient {
HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication,
blockSize, mTime, aTime, permission, owner, group,
symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, null,
symlink, DFSUtilClient.string2Bytes(localName),
fileId, childrenNum, null,
storagePolicy);
}

View File

@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.web;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.io.Text;
public class SWebHdfsFileSystem extends WebHdfsFileSystem {
@ -39,6 +39,6 @@ public class SWebHdfsFileSystem extends WebHdfsFileSystem {
@Override
protected int getDefaultPort() {
return DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
return HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
}
}

View File

@ -174,7 +174,7 @@ public class URLConnectionFactory {
/**
* Sets timeout parameters on the given URLConnection.
*
*
* @param connection
* URLConnection to set
* @param socketTimeout

View File

@ -56,14 +56,12 @@ import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HAUtilClient;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.web.resources.*;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op;
import org.apache.hadoop.io.Text;
@ -145,8 +143,8 @@ public class WebHdfsFileSystem extends FileSystem
setConf(conf);
/** set user pattern based on configuration file */
UserParam.setUserPattern(conf.get(
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf);
@ -173,7 +171,7 @@ public class WebHdfsFileSystem extends FileSystem
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_DEFAULT,
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_KEY,
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_DEFAULT,
SafeModeException.class);
HdfsConstantsClient.SAFEMODE_EXCEPTION_CLASS_NAME);
} else {
int maxFailoverAttempts = conf.getInt(
@ -224,7 +222,7 @@ public class WebHdfsFileSystem extends FileSystem
if(LOG.isDebugEnabled()) {
LOG.debug("Using UGI token: " + token);
}
canRefreshDelegationToken = false;
canRefreshDelegationToken = false;
} else {
token = getDelegationToken(null);
if (token != null) {
@ -256,14 +254,14 @@ public class WebHdfsFileSystem extends FileSystem
@Override
protected int getDefaultPort() {
return DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
return HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
}
@Override
public URI getUri() {
return this.uri;
}
@Override
protected URI canonicalizeUri(URI uri) {
return NetUtils.getCanonicalUri(uri, getDefaultPort());
@ -287,8 +285,8 @@ public class WebHdfsFileSystem extends FileSystem
@Override
public synchronized void setWorkingDirectory(final Path dir) {
String result = makeAbsolute(dir).toUri().getPath();
if (!DFSUtil.isValidName(result)) {
throw new IllegalArgumentException("Invalid DFS directory name " +
if (!DFSUtilClient.isValidName(result)) {
throw new IllegalArgumentException("Invalid DFS directory name " +
result);
}
workingDir = makeAbsolute(dir);
@ -367,10 +365,10 @@ public class WebHdfsFileSystem extends FileSystem
/**
* Covert an exception to an IOException.
*
*
* For a non-IOException, wrap it with IOException.
* For a RemoteException, unwrap it.
* For an IOException which is not a RemoteException, return it.
* For an IOException which is not a RemoteException, return it.
*/
private static IOException toIOException(Exception e) {
if (!(e instanceof IOException)) {
@ -413,9 +411,9 @@ public class WebHdfsFileSystem extends FileSystem
}
return url;
}
Param<?,?>[] getAuthParameters(final HttpOpParam.Op op) throws IOException {
List<Param<?,?>> authParams = Lists.newArrayList();
List<Param<?,?>> authParams = Lists.newArrayList();
// Skip adding delegation token for token operations because these
// operations require authentication.
Token<?> token = null;
@ -494,11 +492,11 @@ public class WebHdfsFileSystem extends FileSystem
/**
* Two-step requests redirected to a DN
*
*
* Create/Append:
* Step 1) Submit a Http request with neither auto-redirect nor data.
* Step 1) Submit a Http request with neither auto-redirect nor data.
* Step 2) Submit another Http request with the URL from the Location header with data.
*
*
* The reason of having two-step create/append is for preventing clients to
* send out the data before the redirect. This issue is addressed by the
* "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
@ -506,7 +504,7 @@ public class WebHdfsFileSystem extends FileSystem
* and Java 6 http client), which do not correctly implement "Expect:
* 100-continue". The two-step create/append is a temporary workaround for
* the software library bugs.
*
*
* Open/Checksum
* Also implements two-step connects for other operations redirected to
* a DN such as open and checksum
@ -515,7 +513,7 @@ public class WebHdfsFileSystem extends FileSystem
//redirect hostname and port
String redirectHost = null;
// resolve redirects for a DN operation unless already resolved
if (op.getRedirect() && !redirected) {
final HttpOpParam.Op redirectOp =
@ -545,7 +543,7 @@ public class WebHdfsFileSystem extends FileSystem
}
}
throw ioe;
}
}
}
private HttpURLConnection connect(final HttpOpParam.Op op, final URL url)
@ -557,7 +555,7 @@ public class WebHdfsFileSystem extends FileSystem
conn.setInstanceFollowRedirects(false);
switch (op.getType()) {
// if not sending a message body for a POST or PUT operation, need
// to ensure the server/proxy knows this
// to ensure the server/proxy knows this
case POST:
case PUT: {
conn.setDoOutput(true);
@ -665,21 +663,21 @@ public class WebHdfsFileSystem extends FileSystem
abstract class AbstractFsPathRunner<T> extends AbstractRunner<T> {
private final Path fspath;
private final Param<?,?>[] parameters;
AbstractFsPathRunner(final HttpOpParam.Op op, final Path fspath,
Param<?,?>... parameters) {
super(op, false);
this.fspath = fspath;
this.parameters = parameters;
}
AbstractFsPathRunner(final HttpOpParam.Op op, Param<?,?>[] parameters,
final Path fspath) {
super(op, false);
this.fspath = fspath;
this.parameters = parameters;
}
@Override
protected URL getUrl() throws IOException {
if (excludeDatanodes.getValue() != null) {
@ -700,7 +698,7 @@ public class WebHdfsFileSystem extends FileSystem
FsPathRunner(Op op, Path fspath, Param<?,?>... parameters) {
super(op, fspath, parameters);
}
@Override
Void getResponse(HttpURLConnection conn) throws IOException {
return null;
@ -715,12 +713,12 @@ public class WebHdfsFileSystem extends FileSystem
Param<?,?>... parameters) {
super(op, fspath, parameters);
}
FsPathResponseRunner(final HttpOpParam.Op op, Param<?,?>[] parameters,
final Path fspath) {
super(op, parameters, fspath);
}
@Override
final T getResponse(HttpURLConnection conn) throws IOException {
try {
@ -743,7 +741,7 @@ public class WebHdfsFileSystem extends FileSystem
conn.disconnect();
}
}
abstract T decodeResponse(Map<?,?> json) throws IOException;
}
@ -754,7 +752,7 @@ public class WebHdfsFileSystem extends FileSystem
FsPathBooleanRunner(Op op, Path fspath, Param<?,?>... parameters) {
super(op, fspath, parameters);
}
@Override
Boolean decodeResponse(Map<?,?> json) throws IOException {
return (Boolean)json.get("boolean");
@ -766,13 +764,13 @@ public class WebHdfsFileSystem extends FileSystem
*/
class FsPathOutputStreamRunner extends AbstractFsPathRunner<FSDataOutputStream> {
private final int bufferSize;
FsPathOutputStreamRunner(Op op, Path fspath, int bufferSize,
Param<?,?>... parameters) {
super(op, fspath, parameters);
this.bufferSize = bufferSize;
}
@Override
FSDataOutputStream getResponse(final HttpURLConnection conn)
throws IOException {
@ -804,7 +802,7 @@ public class WebHdfsFileSystem extends FileSystem
return conn;
}
}
/**
* Used by open() which tracks the resolved url itself
*/
@ -918,26 +916,26 @@ public class WebHdfsFileSystem extends FileSystem
new RenameOptionSetParam(options)
).run();
}
@Override
public void setXAttr(Path p, String name, byte[] value,
public void setXAttr(Path p, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.SETXATTR;
if (value != null) {
new FsPathRunner(op, p, new XAttrNameParam(name), new XAttrValueParam(
XAttrCodec.encodeValue(value, XAttrCodec.HEX)),
XAttrCodec.encodeValue(value, XAttrCodec.HEX)),
new XAttrSetFlagParam(flag)).run();
} else {
new FsPathRunner(op, p, new XAttrNameParam(name),
new FsPathRunner(op, p, new XAttrNameParam(name),
new XAttrSetFlagParam(flag)).run();
}
}
@Override
public byte[] getXAttr(Path p, final String name) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
return new FsPathResponseRunner<byte[]>(op, p, new XAttrNameParam(name),
return new FsPathResponseRunner<byte[]>(op, p, new XAttrNameParam(name),
new XAttrEncodingParam(XAttrCodec.HEX)) {
@Override
byte[] decodeResponse(Map<?, ?> json) throws IOException {
@ -945,11 +943,11 @@ public class WebHdfsFileSystem extends FileSystem
}
}.run();
}
@Override
public Map<String, byte[]> getXAttrs(Path p) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
return new FsPathResponseRunner<Map<String, byte[]>>(op, p,
return new FsPathResponseRunner<Map<String, byte[]>>(op, p,
new XAttrEncodingParam(XAttrCodec.HEX)) {
@Override
Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
@ -957,18 +955,18 @@ public class WebHdfsFileSystem extends FileSystem
}
}.run();
}
@Override
public Map<String, byte[]> getXAttrs(Path p, final List<String> names)
public Map<String, byte[]> getXAttrs(Path p, final List<String> names)
throws IOException {
Preconditions.checkArgument(names != null && !names.isEmpty(),
Preconditions.checkArgument(names != null && !names.isEmpty(),
"XAttr names cannot be null or empty.");
Param<?,?>[] parameters = new Param<?,?>[names.size() + 1];
for (int i = 0; i < parameters.length - 1; i++) {
parameters[i] = new XAttrNameParam(names.get(i));
}
parameters[parameters.length - 1] = new XAttrEncodingParam(XAttrCodec.HEX);
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
return new FsPathResponseRunner<Map<String, byte[]>>(op, parameters, p) {
@Override
@ -977,7 +975,7 @@ public class WebHdfsFileSystem extends FileSystem
}
}.run();
}
@Override
public List<String> listXAttrs(Path p) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.LISTXATTRS;
@ -1057,7 +1055,7 @@ public class WebHdfsFileSystem extends FileSystem
}
@Override
public Path createSnapshot(final Path path, final String snapshotName)
public Path createSnapshot(final Path path, final String snapshotName)
throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.CREATESNAPSHOT;
@ -1111,14 +1109,14 @@ public class WebHdfsFileSystem extends FileSystem
@Override
public long getDefaultBlockSize() {
return getConf().getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
return getConf().getLongBytes(HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY,
HdfsClientConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
}
@Override
public short getDefaultReplication() {
return (short)getConf().getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
return (short)getConf().getInt(HdfsClientConfigKeys.DFS_REPLICATION_KEY,
HdfsClientConfigKeys.DFS_REPLICATION_DEFAULT);
}
@Override
@ -1228,7 +1226,7 @@ public class WebHdfsFileSystem extends FileSystem
final URL offsetUrl = offset == 0L? url
: new URL(url + "&" + new OffsetParam(offset));
return new URLRunner(GetOpParam.Op.OPEN, offsetUrl, resolved).run();
}
}
}
private static final String OFFSET_PARAM_PREFIX = OffsetParam.NAME + "=";
@ -1359,7 +1357,7 @@ public class WebHdfsFileSystem extends FileSystem
new TokenArgumentParam(token.encodeToUrlString())
).run();
}
@Override
public BlockLocation[] getFileBlockLocations(final FileStatus status,
final long offset, final long length) throws IOException {
@ -1370,7 +1368,7 @@ public class WebHdfsFileSystem extends FileSystem
}
@Override
public BlockLocation[] getFileBlockLocations(final Path p,
public BlockLocation[] getFileBlockLocations(final Path p,
final long offset, final long length) throws IOException {
statistics.incrementReadOps(1);
@ -1379,7 +1377,7 @@ public class WebHdfsFileSystem extends FileSystem
new OffsetParam(offset), new LengthParam(length)) {
@Override
BlockLocation[] decodeResponse(Map<?,?> json) throws IOException {
return DFSUtil.locatedBlocks2Locations(
return DFSUtilClient.locatedBlocks2Locations(
JsonUtilClient.toLocatedBlocks(json));
}
}.run();
@ -1408,7 +1406,7 @@ public class WebHdfsFileSystem extends FileSystem
public MD5MD5CRC32FileChecksum getFileChecksum(final Path p
) throws IOException {
statistics.incrementReadOps(1);
final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM;
return new FsPathResponseRunner<MD5MD5CRC32FileChecksum>(op, p) {
@Override

View File

@ -467,6 +467,8 @@ Release 2.8.0 - UNRELEASED
HDFS-8215. Refactor NamenodeFsck#check method. (Takanobu Asanuma
via szetszwo)
HDFS-8052. Move WebHdfsFileSystem into hadoop-hdfs-client. (wheat9)
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -48,7 +48,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.token.Token;
import org.apache.htrace.Sampler;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
@ -296,7 +295,7 @@ class BlockStorageLocationUtil {
List<LocatedBlock> blocks,
Map<LocatedBlock, List<VolumeId>> blockVolumeIds) throws IOException {
// Construct the final return value of VolumeBlockLocation[]
BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks);
BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
List<BlockStorageLocation> volumeBlockLocs =
new ArrayList<BlockStorageLocation>(locations.length);
for (int i = 0; i < locations.length; i++) {

View File

@ -917,7 +917,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
TraceScope scope = getPathTraceScope("getBlockLocations", src);
try {
LocatedBlocks blocks = getLocatedBlocks(src, start, length);
BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks);
BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
for (int i = 0; i < locations.length; i++) {
hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));

View File

@ -164,7 +164,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
*/
public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT =
"org.apache.hadoop.hdfs.web.AuthFilter".toString();
public static final String DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern";
@Deprecated
public static final String DFS_WEBHDFS_USER_PATTERN_KEY =
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY;
@Deprecated
public static final String DFS_WEBHDFS_USER_PATTERN_DEFAULT =
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT;
public static final String DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";

View File

@ -228,37 +228,7 @@ public class DFSUtil {
* names which contain a ":" or "//", or other non-canonical paths.
*/
public static boolean isValidName(String src) {
// Path must be absolute.
if (!src.startsWith(Path.SEPARATOR)) {
return false;
}
// Check for ".." "." ":" "/"
String[] components = StringUtils.split(src, '/');
for (int i = 0; i < components.length; i++) {
String element = components[i];
if (element.equals(".") ||
(element.indexOf(":") >= 0) ||
(element.indexOf("/") >= 0)) {
return false;
}
// ".." is allowed in path starting with /.reserved/.inodes
if (element.equals("..")) {
if (components.length > 4
&& components[1].equals(FSDirectory.DOT_RESERVED_STRING)
&& components[2].equals(FSDirectory.DOT_INODES_STRING)) {
continue;
}
return false;
}
// The string may start or end with a /, but not have
// "//" in the middle.
if (element.isEmpty() && i != components.length - 1 &&
i != 0) {
return false;
}
}
return true;
return DFSUtilClient.isValidName(src);
}
/**
@ -329,7 +299,7 @@ public class DFSUtil {
* Converts a string to a byte array using UTF8 encoding.
*/
public static byte[] string2Bytes(String str) {
return str.getBytes(Charsets.UTF_8);
return DFSUtilClient.string2Bytes(str);
}
/**
@ -475,61 +445,6 @@ public class DFSUtil {
}
return result;
}
/**
* Convert a LocatedBlocks to BlockLocations[]
* @param blocks a LocatedBlocks
* @return an array of BlockLocations
*/
public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
if (blocks == null) {
return new BlockLocation[0];
}
return locatedBlocks2Locations(blocks.getLocatedBlocks());
}
/**
* Convert a List<LocatedBlock> to BlockLocation[]
* @param blocks A List<LocatedBlock> to be converted
* @return converted array of BlockLocation
*/
public static BlockLocation[] locatedBlocks2Locations(List<LocatedBlock> blocks) {
if (blocks == null) {
return new BlockLocation[0];
}
int nrBlocks = blocks.size();
BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
if (nrBlocks == 0) {
return blkLocations;
}
int idx = 0;
for (LocatedBlock blk : blocks) {
assert idx < nrBlocks : "Incorrect index";
DatanodeInfo[] locations = blk.getLocations();
String[] hosts = new String[locations.length];
String[] xferAddrs = new String[locations.length];
String[] racks = new String[locations.length];
for (int hCnt = 0; hCnt < locations.length; hCnt++) {
hosts[hCnt] = locations[hCnt].getHostName();
xferAddrs[hCnt] = locations[hCnt].getXferAddr();
NodeBase node = new NodeBase(xferAddrs[hCnt],
locations[hCnt].getNetworkLocation());
racks[hCnt] = node.toString();
}
DatanodeInfo[] cachedLocations = blk.getCachedLocations();
String[] cachedHosts = new String[cachedLocations.length];
for (int i=0; i<cachedLocations.length; i++) {
cachedHosts[i] = cachedLocations[i].getHostName();
}
blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
racks,
blk.getStartOffset(),
blk.getBlockSize(),
blk.isCorrupt());
idx++;
}
return blkLocations;
}
/**
* Return configuration key of format key.suffix1.suffix2...suffixN

View File

@ -404,7 +404,7 @@ public class NameNodeProxies {
HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT,
HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY,
HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT,
SafeModeException.class);
SafeModeException.class.getName());
final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(

View File

@ -25,7 +25,7 @@ import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
/**
* Interface that represents the over the wire information
@ -78,6 +78,6 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
isSymlink() ? new Path(getSymlink()) : null,
(getFullPath(path)).makeQualified(
defaultUri, null), // fully-qualify path
DFSUtil.locatedBlocks2Locations(getBlockLocations()));
DFSUtilClient.locatedBlocks2Locations(getBlockLocations()));
}
}

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@ -69,8 +70,8 @@ public class NameNodeHttpServer {
private void initWebHdfs(Configuration conf) throws IOException {
// set user pattern based on configuration file
UserParam.setUserPattern(conf.get(
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
// add authentication filter for webhdfs
final String className = conf.get(

View File

@ -112,7 +112,7 @@ public class TestDFSUtil {
List<LocatedBlock> ls = Arrays.asList(l1, l2);
LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null);
BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs);
BlockLocation[] bs = DFSUtilClient.locatedBlocks2Locations(lbs);
assertTrue("expected 2 blocks but got " + bs.length,
bs.length == 2);
@ -128,7 +128,7 @@ public class TestDFSUtil {
corruptCount == 1);
// test an empty location
bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
bs = DFSUtilClient.locatedBlocks2Locations(new LocatedBlocks());
assertEquals(0, bs.length);
}

View File

@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDFSClientRetries;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@ -282,7 +283,8 @@ public class TestWebHDFS {
@Test(timeout=300000)
public void testNumericalUserName() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();
conf.set(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, "^[A-Za-z0-9_][A-Za-z0-9._-]*[$]?$");
conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, "^[A-Za-z0-9_][A-Za-z0-9" +
"._-]*[$]?$");
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {