Merging r1543902 through r1544303 from trunk to branch HDFS-2832

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1544306 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arpit Agarwal 2013-11-21 20:06:09 +00:00
commit 16f7fb80d9
83 changed files with 1691 additions and 757 deletions

View File

@ -196,7 +196,13 @@ public class KerberosAuthenticator implements Authenticator {
doSpnegoSequence(token);
} else {
LOG.debug("Using fallback authenticator sequence.");
getFallBackAuthenticator().authenticate(url, token);
Authenticator auth = getFallBackAuthenticator();
// Make sure that the fall back authenticator have the same
// ConnectionConfigurator, since the method might be overridden.
// Otherwise the fall back authenticator might not have the information
// to make the connection (e.g., SSL certificates)
auth.setConnectionConfigurator(connConfigurator);
auth.authenticate(url, token);
}
}
}

View File

@ -383,6 +383,11 @@ Release 2.3.0 - UNRELEASED
HADOOP-10067. Missing POM dependency on jsr305 (Robert Rati via stevel)
HADOOP-10103. update commons-lang to 2.6 (Akira AJISAKA via stevel)
HADOOP-10111. Allow DU to be initialized with an initial value (Kihwal Lee
via jeagles)
OPTIMIZATIONS
HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
@ -500,6 +505,9 @@ Release 2.2.1 - UNRELEASED
HADOOP-10110. hadoop-auth has a build break due to missing dependency.
(Chuan Liu via arp)
HADOOP-9114. After defined the dfs.checksum.type as the NULL, write file and hflush will
through java.lang.ArrayIndexOutOfBoundsException (Sathish via umamahesh)
Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES

View File

@ -47,17 +47,32 @@ public class DU extends Shell {
* @throws IOException if we fail to refresh the disk usage
*/
public DU(File path, long interval) throws IOException {
this(path, interval, -1L);
}
/**
* Keeps track of disk usage.
* @param path the path to check disk usage in
* @param interval refresh the disk usage at this interval
* @param initialUsed use this value until next refresh
* @throws IOException if we fail to refresh the disk usage
*/
public DU(File path, long interval, long initialUsed) throws IOException {
super(0);
//we set the Shell interval to 0 so it will always run our command
//and use this one to set the thread sleep interval
this.refreshInterval = interval;
this.dirPath = path.getCanonicalPath();
//populate the used variable
run();
//populate the used variable if the initial value is not specified.
if (initialUsed < 0) {
run();
} else {
this.used.set(initialUsed);
}
}
/**
* Keeps track of disk usage.
* @param path the path to check disk usage in
@ -65,10 +80,24 @@ public class DU extends Shell {
* @throws IOException if we fail to refresh the disk usage
*/
public DU(File path, Configuration conf) throws IOException {
this(path, conf.getLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY,
CommonConfigurationKeys.FS_DU_INTERVAL_DEFAULT));
this(path, conf, -1L);
}
/**
* Keeps track of disk usage.
* @param path the path to check disk usage in
* @param conf configuration object
* @param initialUsed use it until the next refresh.
* @throws IOException if we fail to refresh the disk usage
*/
public DU(File path, Configuration conf, long initialUsed)
throws IOException {
this(path, conf.getLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY,
CommonConfigurationKeys.FS_DU_INTERVAL_DEFAULT), initialUsed);
}
/**
* This thread refreshes the "used" variable.
*

View File

@ -183,10 +183,13 @@ abstract public class FSOutputSummer extends OutputStream {
}
static byte[] int2byte(int integer, byte[] bytes) {
bytes[0] = (byte)((integer >>> 24) & 0xFF);
bytes[1] = (byte)((integer >>> 16) & 0xFF);
bytes[2] = (byte)((integer >>> 8) & 0xFF);
bytes[3] = (byte)((integer >>> 0) & 0xFF);
if (bytes.length != 0) {
bytes[0] = (byte) ((integer >>> 24) & 0xFF);
bytes[1] = (byte) ((integer >>> 16) & 0xFF);
bytes[2] = (byte) ((integer >>> 8) & 0xFF);
bytes[3] = (byte) ((integer >>> 0) & 0xFF);
return bytes;
}
return bytes;
}

View File

@ -472,7 +472,9 @@ public class HttpServer implements FilterContainer {
if (conf.getBoolean(
CommonConfigurationKeys.HADOOP_JETTY_LOGS_SERVE_ALIASES,
CommonConfigurationKeys.DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES)) {
logContext.getInitParams().put(
@SuppressWarnings("unchecked")
Map<String, String> params = logContext.getInitParams();
params.put(
"org.mortbay.jetty.servlet.Default.aliases", "true");
}
logContext.setDisplayName("logs");

View File

@ -116,4 +116,19 @@ public class TestDU extends TestCase {
long duSize = du.getUsed();
assertTrue(String.valueOf(duSize), duSize >= 0L);
}
public void testDUSetInitialValue() throws IOException {
File file = new File(DU_DIR, "dataX");
createFile(file, 8192);
DU du = new DU(file, 3000, 1024);
du.start();
assertTrue("Initial usage setting not honored", du.getUsed() == 1024);
// wait until the first du runs.
try {
Thread.sleep(5000);
} catch (InterruptedException ie) {}
assertTrue("Usage didn't get updated", du.getUsed() == 8192);
}
}

View File

@ -204,6 +204,15 @@ Trunk (Unreleased)
HDFS-5525. Inline dust templates for new Web UI. (Haohui Mai via jing9)
HDFS-5451. Add byte and file statistics to PathBasedCacheEntry.
(Colin Patrick McCabe via Andrew Wang)
HDFS-5531. Combine the getNsQuota() and getDsQuota() methods in INode.
(szetszwo)
HDFS-5473. Consistent naming of user-visible caching classes and methods
(cmccabe)
OPTIMIZATIONS
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
@ -411,6 +420,8 @@ Release 2.3.0 - UNRELEASED
HDFS-5382. Implement the UI of browsing filesystems in HTML 5 page. (Haohui
Mai via jing9)
HDFS-3987. Support webhdfs over HTTPS. (Haohui Mai via jing9)
IMPROVEMENTS
HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)
@ -515,6 +526,9 @@ Release 2.3.0 - UNRELEASED
HDFS-1386. TestJMXGet fails in jdk7 (jeagles)
HDFS-5532. Enable the webhdfs by default to support new HDFS web UI. (Vinay
via jing9)
OPTIMIZATIONS
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)

View File

@ -108,6 +108,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.ClientMmapManager;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
@ -117,7 +118,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -2290,41 +2291,41 @@ public class DFSClient implements java.io.Closeable {
}
}
public long addPathBasedCacheDirective(
PathBasedCacheDirective directive) throws IOException {
public long addCacheDirective(
CacheDirectiveInfo info) throws IOException {
checkOpen();
try {
return namenode.addPathBasedCacheDirective(directive);
return namenode.addCacheDirective(info);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
public void modifyPathBasedCacheDirective(
PathBasedCacheDirective directive) throws IOException {
public void modifyCacheDirective(
CacheDirectiveInfo info) throws IOException {
checkOpen();
try {
namenode.modifyPathBasedCacheDirective(directive);
namenode.modifyCacheDirective(info);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
public void removePathBasedCacheDirective(long id)
public void removeCacheDirective(long id)
throws IOException {
checkOpen();
try {
namenode.removePathBasedCacheDirective(id);
namenode.removeCacheDirective(id);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
public RemoteIterator<PathBasedCacheDirective> listPathBasedCacheDirectives(
PathBasedCacheDirective filter) throws IOException {
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
CacheDirectiveInfo filter) throws IOException {
checkOpen();
try {
return namenode.listPathBasedCacheDirectives(0, filter);
return namenode.listCacheDirectives(0, filter);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}

View File

@ -162,7 +162,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY = "dfs.namenode.replication.max-streams-hard-limit";
public static final int DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4;
public static final String DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled";
public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = false;
public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
public static final String DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";
public static final boolean DFS_PERMISSIONS_ENABLED_DEFAULT = true;
public static final String DFS_PERSIST_BLOCKS_KEY = "dfs.persist.blocks";

View File

@ -73,6 +73,8 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
@ -622,12 +624,19 @@ public class DFSUtil {
* Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
* the configuration.
*
* @param conf configuration
* @return list of InetSocketAddresses
*/
public static Map<String, Map<String, InetSocketAddress>> getHaNnHttpAddresses(
Configuration conf) {
return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses(
Configuration conf, String scheme) {
if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
return getAddresses(conf, null,
DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
} else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
return getAddresses(conf, null,
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
} else {
throw new IllegalArgumentException("Unsupported scheme: " + scheme);
}
}
/**
@ -636,18 +645,28 @@ public class DFSUtil {
* cluster, the resolver further resolves the logical name (i.e., the authority
* in the URL) into real namenode addresses.
*/
public static InetSocketAddress[] resolve(URI uri, int schemeDefaultPort,
Configuration conf) throws IOException {
public static InetSocketAddress[] resolveWebHdfsUri(URI uri, Configuration conf)
throws IOException {
int defaultPort;
String scheme = uri.getScheme();
if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
defaultPort = DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
} else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
defaultPort = DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
} else {
throw new IllegalArgumentException("Unsupported scheme: " + scheme);
}
ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>();
if (!HAUtil.isLogicalUri(conf, uri)) {
InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(),
schemeDefaultPort);
defaultPort);
ret.add(addr);
} else {
Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil
.getHaNnHttpAddresses(conf);
.getHaNnWebHdfsAddresses(conf, scheme);
for (Map<String, InetSocketAddress> addrs : addresses.values()) {
for (InetSocketAddress addr : addrs.values()) {

View File

@ -57,6 +57,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@ -67,7 +68,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
@ -1584,78 +1585,88 @@ public class DistributedFileSystem extends FileSystem {
}
/**
* Add a new PathBasedCacheDirective.
* Add a new CacheDirective.
*
* @param directive A directive to add.
* @param info Information about a directive to add.
* @return the ID of the directive that was created.
* @throws IOException if the directive could not be added
*/
public long addPathBasedCacheDirective(
PathBasedCacheDirective directive) throws IOException {
Preconditions.checkNotNull(directive.getPath());
Path path = new Path(getPathName(fixRelativePart(directive.getPath()))).
public long addCacheDirective(
CacheDirectiveInfo info) throws IOException {
Preconditions.checkNotNull(info.getPath());
Path path = new Path(getPathName(fixRelativePart(info.getPath()))).
makeQualified(getUri(), getWorkingDirectory());
return dfs.addPathBasedCacheDirective(
new PathBasedCacheDirective.Builder(directive).
return dfs.addCacheDirective(
new CacheDirectiveInfo.Builder(info).
setPath(path).
build());
}
public void modifyPathBasedCacheDirective(
PathBasedCacheDirective directive) throws IOException {
if (directive.getPath() != null) {
directive = new PathBasedCacheDirective.Builder(directive).
setPath(new Path(getPathName(fixRelativePart(directive.getPath()))).
/**
* Modify a CacheDirective.
*
* @param info Information about the directive to modify.
* You must set the ID to indicate which CacheDirective you want
* to modify.
* @throws IOException if the directive could not be modified
*/
public void modifyCacheDirective(
CacheDirectiveInfo info) throws IOException {
if (info.getPath() != null) {
info = new CacheDirectiveInfo.Builder(info).
setPath(new Path(getPathName(fixRelativePart(info.getPath()))).
makeQualified(getUri(), getWorkingDirectory())).build();
}
dfs.modifyPathBasedCacheDirective(directive);
dfs.modifyCacheDirective(info);
}
/**
* Remove a PathBasedCacheDirective.
* Remove a CacheDirectiveInfo.
*
* @param id identifier of the PathBasedCacheDirective to remove
* @param id identifier of the CacheDirectiveInfo to remove
* @throws IOException if the directive could not be removed
*/
public void removePathBasedCacheDirective(long id)
public void removeCacheDirective(long id)
throws IOException {
dfs.removePathBasedCacheDirective(id);
dfs.removeCacheDirective(id);
}
/**
* List the set of cached paths of a cache pool. Incrementally fetches results
* from the server.
* List cache directives. Incrementally fetches results from the server.
*
* @param filter Filter parameters to use when listing the directives, null to
* list all directives visible to us.
* @return A RemoteIterator which returns PathBasedCacheDirective objects.
* @return A RemoteIterator which returns CacheDirectiveInfo objects.
*/
public RemoteIterator<PathBasedCacheDirective> listPathBasedCacheDirectives(
PathBasedCacheDirective filter) throws IOException {
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
CacheDirectiveInfo filter) throws IOException {
if (filter == null) {
filter = new PathBasedCacheDirective.Builder().build();
filter = new CacheDirectiveInfo.Builder().build();
}
if (filter.getPath() != null) {
filter = new PathBasedCacheDirective.Builder(filter).
filter = new CacheDirectiveInfo.Builder(filter).
setPath(new Path(getPathName(fixRelativePart(filter.getPath())))).
build();
}
final RemoteIterator<PathBasedCacheDirective> iter =
dfs.listPathBasedCacheDirectives(filter);
return new RemoteIterator<PathBasedCacheDirective>() {
final RemoteIterator<CacheDirectiveEntry> iter =
dfs.listCacheDirectives(filter);
return new RemoteIterator<CacheDirectiveEntry>() {
@Override
public boolean hasNext() throws IOException {
return iter.hasNext();
}
@Override
public PathBasedCacheDirective next() throws IOException {
public CacheDirectiveEntry next() throws IOException {
// Although the paths we get back from the NameNode should always be
// absolute, we call makeQualified to add the scheme and authority of
// this DistributedFilesystem.
PathBasedCacheDirective desc = iter.next();
Path p = desc.getPath().makeQualified(getUri(), getWorkingDirectory());
return new PathBasedCacheDirective.Builder(desc).setPath(p).build();
CacheDirectiveEntry desc = iter.next();
CacheDirectiveInfo info = desc.getInfo();
Path p = info.getPath().makeQualified(getUri(), getWorkingDirectory());
return new CacheDirectiveEntry(
new CacheDirectiveInfo.Builder(info).setPath(p).build(),
desc.getStats());
}
};
}

View File

@ -25,7 +25,11 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
@ -121,4 +125,100 @@ public class HdfsAdmin {
public void disallowSnapshot(Path path) throws IOException {
dfs.disallowSnapshot(path);
}
/**
* Add a new CacheDirectiveInfo.
*
* @param info Information about a directive to add.
* @return the ID of the directive that was created.
* @throws IOException if the directive could not be added
*/
public long addCacheDirective(CacheDirectiveInfo info)
throws IOException {
return dfs.addCacheDirective(info);
}
/**
* Modify a CacheDirective.
*
* @param info Information about the directive to modify.
* You must set the ID to indicate which CacheDirective you want
* to modify.
* @throws IOException if the directive could not be modified
*/
public void modifyCacheDirective(CacheDirectiveInfo info)
throws IOException {
dfs.modifyCacheDirective(info);
}
/**
* Remove a CacheDirective.
*
* @param id identifier of the CacheDirectiveInfo to remove
* @throws IOException if the directive could not be removed
*/
public void removeCacheDirective(long id)
throws IOException {
dfs.removeCacheDirective(id);
}
/**
* List cache directives. Incrementally fetches results from the server.
*
* @param filter Filter parameters to use when listing the directives, null to
* list all directives visible to us.
* @return A RemoteIterator which returns CacheDirectiveInfo objects.
*/
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
CacheDirectiveInfo filter) throws IOException {
return dfs.listCacheDirectives(filter);
}
/**
* Add a cache pool.
*
* @param info
* The request to add a cache pool.
* @throws IOException
* If the request could not be completed.
*/
public void addCachePool(CachePoolInfo info) throws IOException {
dfs.addCachePool(info);
}
/**
* Modify an existing cache pool.
*
* @param info
* The request to modify a cache pool.
* @throws IOException
* If the request could not be completed.
*/
public void modifyCachePool(CachePoolInfo info) throws IOException {
dfs.modifyCachePool(info);
}
/**
* Remove a cache pool.
*
* @param poolName
* Name of the cache pool to remove.
* @throws IOException
* if the cache pool did not exist, or could not be removed.
*/
public void removeCachePool(String poolName) throws IOException {
dfs.removeCachePool(poolName);
}
/**
* List all cache pools.
*
* @return A remote iterator from which you can get CachePoolInfo objects.
* Requests will be made as needed.
* @throws IOException
* If there was an error listing cache pools.
*/
public RemoteIterator<CachePoolInfo> listCachePools() throws IOException {
return dfs.listCachePools();
}
}

View File

@ -30,13 +30,16 @@ import com.google.common.base.Preconditions;
* This is an implementation class, not part of the public API.
*/
@InterfaceAudience.Private
public final class PathBasedCacheEntry {
public final class CacheDirective {
private final long entryId;
private final String path;
private final short replication;
private final CachePool pool;
private long bytesNeeded;
private long bytesCached;
private long filesAffected;
public PathBasedCacheEntry(long entryId, String path,
public CacheDirective(long entryId, String path,
short replication, CachePool pool) {
Preconditions.checkArgument(entryId > 0);
this.entryId = entryId;
@ -46,6 +49,9 @@ public final class PathBasedCacheEntry {
this.replication = replication;
Preconditions.checkNotNull(path);
this.pool = pool;
this.bytesNeeded = 0;
this.bytesCached = 0;
this.filesAffected = 0;
}
public long getEntryId() {
@ -64,14 +70,26 @@ public final class PathBasedCacheEntry {
return replication;
}
public PathBasedCacheDirective toDirective() {
return new PathBasedCacheDirective.Builder().
public CacheDirectiveInfo toDirective() {
return new CacheDirectiveInfo.Builder().
setId(entryId).
setPath(new Path(path)).
setReplication(replication).
setPool(pool.getPoolName()).
build();
}
public CacheDirectiveStats toStats() {
return new CacheDirectiveStats.Builder().
setBytesNeeded(bytesNeeded).
setBytesCached(bytesCached).
setFilesAffected(filesAffected).
build();
}
public CacheDirectiveEntry toEntry() {
return new CacheDirectiveEntry(toDirective(), toStats());
}
@Override
public String toString() {
@ -80,6 +98,9 @@ public final class PathBasedCacheEntry {
append(", path:").append(path).
append(", replication:").append(replication).
append(", pool:").append(pool).
append(", bytesNeeded:").append(bytesNeeded).
append(", bytesCached:").append(bytesCached).
append(", filesAffected:").append(filesAffected).
append(" }");
return builder.toString();
}
@ -91,7 +112,7 @@ public final class PathBasedCacheEntry {
if (o.getClass() != this.getClass()) {
return false;
}
PathBasedCacheEntry other = (PathBasedCacheEntry)o;
CacheDirective other = (CacheDirective)o;
return entryId == other.entryId;
}
@ -99,4 +120,40 @@ public final class PathBasedCacheEntry {
public int hashCode() {
return new HashCodeBuilder().append(entryId).toHashCode();
}
public long getBytesNeeded() {
return bytesNeeded;
}
public void clearBytesNeeded() {
this.bytesNeeded = 0;
}
public void addBytesNeeded(long toAdd) {
this.bytesNeeded += toAdd;
}
public long getBytesCached() {
return bytesCached;
}
public void clearBytesCached() {
this.bytesCached = 0;
}
public void addBytesCached(long toAdd) {
this.bytesCached += toAdd;
}
public long getFilesAffected() {
return filesAffected;
}
public void clearFilesAffected() {
this.filesAffected = 0;
}
public void incrementFilesAffected() {
this.filesAffected++;
}
};

View File

@ -0,0 +1,45 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Describes a path-based cache directive entry.
*/
@InterfaceStability.Evolving
@InterfaceAudience.Public
public class CacheDirectiveEntry {
private final CacheDirectiveInfo info;
private final CacheDirectiveStats stats;
public CacheDirectiveEntry(CacheDirectiveInfo info,
CacheDirectiveStats stats) {
this.info = info;
this.stats = stats;
}
public CacheDirectiveInfo getInfo() {
return info;
}
public CacheDirectiveStats getStats() {
return stats;
}
};

View File

@ -28,9 +28,9 @@ import org.apache.hadoop.fs.Path;
*/
@InterfaceStability.Evolving
@InterfaceAudience.Public
public class PathBasedCacheDirective {
public class CacheDirectiveInfo {
/**
* A builder for creating new PathBasedCacheDirective instances.
* A builder for creating new CacheDirectiveInfo instances.
*/
public static class Builder {
private Long id;
@ -39,12 +39,12 @@ public class PathBasedCacheDirective {
private String pool;
/**
* Builds a new PathBasedCacheDirective populated with the set properties.
* Builds a new CacheDirectiveInfo populated with the set properties.
*
* @return New PathBasedCacheDirective.
* @return New CacheDirectiveInfo.
*/
public PathBasedCacheDirective build() {
return new PathBasedCacheDirective(id, path, replication, pool);
public CacheDirectiveInfo build() {
return new CacheDirectiveInfo(id, path, replication, pool);
}
/**
@ -55,9 +55,9 @@ public class PathBasedCacheDirective {
/**
* Creates a builder with all elements set to the same values as the
* given PathBasedCacheDirective.
* given CacheDirectiveInfo.
*/
public Builder(PathBasedCacheDirective directive) {
public Builder(CacheDirectiveInfo directive) {
this.id = directive.getId();
this.path = directive.getPath();
this.replication = directive.getReplication();
@ -114,7 +114,7 @@ public class PathBasedCacheDirective {
private final Short replication;
private final String pool;
PathBasedCacheDirective(Long id, Path path, Short replication, String pool) {
CacheDirectiveInfo(Long id, Path path, Short replication, String pool) {
this.id = id;
this.path = path;
this.replication = replication;
@ -148,7 +148,7 @@ public class PathBasedCacheDirective {
public String getPool() {
return pool;
}
@Override
public boolean equals(Object o) {
if (o == null) {
@ -157,7 +157,7 @@ public class PathBasedCacheDirective {
if (getClass() != o.getClass()) {
return false;
}
PathBasedCacheDirective other = (PathBasedCacheDirective)o;
CacheDirectiveInfo other = (CacheDirectiveInfo)o;
return new EqualsBuilder().append(getId(), other.getId()).
append(getPath(), other.getPath()).
append(getReplication(), other.getReplication()).

View File

@ -0,0 +1,125 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Describes a path-based cache directive.
*/
@InterfaceStability.Evolving
@InterfaceAudience.Public
public class CacheDirectiveStats {
public static class Builder {
private long bytesNeeded;
private long bytesCached;
private long filesAffected;
/**
* Builds a new CacheDirectiveStats populated with the set properties.
*
* @return New CacheDirectiveStats.
*/
public CacheDirectiveStats build() {
return new CacheDirectiveStats(bytesNeeded, bytesCached, filesAffected);
}
/**
* Creates an empty builder.
*/
public Builder() {
}
/**
* Sets the bytes needed by this directive.
*
* @param bytesNeeded The bytes needed.
* @return This builder, for call chaining.
*/
public Builder setBytesNeeded(Long bytesNeeded) {
this.bytesNeeded = bytesNeeded;
return this;
}
/**
* Sets the bytes cached by this directive.
*
* @param bytesCached The bytes cached.
* @return This builder, for call chaining.
*/
public Builder setBytesCached(Long bytesCached) {
this.bytesCached = bytesCached;
return this;
}
/**
* Sets the files affected by this directive.
*
* @param filesAffected The files affected.
* @return This builder, for call chaining.
*/
public Builder setFilesAffected(Long filesAffected) {
this.filesAffected = filesAffected;
return this;
}
}
private final long bytesNeeded;
private final long bytesCached;
private final long filesAffected;
private CacheDirectiveStats(long bytesNeeded, long bytesCached,
long filesAffected) {
this.bytesNeeded = bytesNeeded;
this.bytesCached = bytesCached;
this.filesAffected = filesAffected;
}
/**
* @return The bytes needed.
*/
public Long getBytesNeeded() {
return bytesNeeded;
}
/**
* @return The bytes cached.
*/
public Long getBytesCached() {
return bytesCached;
}
/**
* @return The files affected.
*/
public Long getFilesAffected() {
return filesAffected;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("{");
builder.append("bytesNeeded: ").append(bytesNeeded);
builder.append(", ").append("bytesCached: ").append(bytesCached);
builder.append(", ").append("filesAffected: ").append(filesAffected);
builder.append("}");
return builder.toString();
}
};

View File

@ -46,7 +46,7 @@ import org.xml.sax.SAXException;
* This class is used in RPCs to create and modify cache pools.
* It is serializable and can be stored in the edit log.
*/
@InterfaceAudience.Private
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class CachePoolInfo {
public static final Log LOG = LogFactory.getLog(CachePoolInfo.class);
@ -225,4 +225,4 @@ public class CachePoolInfo {
setMode(perm.getPermission()).
setWeight(weight);
}
}
}

View File

@ -1097,49 +1097,49 @@ public interface ClientProtocol {
String fromSnapshot, String toSnapshot) throws IOException;
/**
* Add a PathBasedCache entry to the CacheManager.
* Add a CacheDirective to the CacheManager.
*
* @param directive A PathBasedCacheDirective to be added
* @return A PathBasedCacheDirective associated with the added directive
* @param directive A CacheDirectiveInfo to be added
* @return A CacheDirectiveInfo associated with the added directive
* @throws IOException if the directive could not be added
*/
@AtMostOnce
public long addPathBasedCacheDirective(
PathBasedCacheDirective directive) throws IOException;
public long addCacheDirective(
CacheDirectiveInfo directive) throws IOException;
/**
* Modify a PathBasedCache entry in the CacheManager.
* Modify a CacheDirective in the CacheManager.
*
* @return directive The directive to modify. Must contain
* a directive ID.
* @throws IOException if the directive could not be modified
*/
@AtMostOnce
public void modifyPathBasedCacheDirective(
PathBasedCacheDirective directive) throws IOException;
public void modifyCacheDirective(
CacheDirectiveInfo directive) throws IOException;
/**
* Remove a PathBasedCacheDirective from the CacheManager.
* Remove a CacheDirectiveInfo from the CacheManager.
*
* @param id of a PathBasedCacheDirective
* @param id of a CacheDirectiveInfo
* @throws IOException if the cache directive could not be removed
*/
@AtMostOnce
public void removePathBasedCacheDirective(long id) throws IOException;
public void removeCacheDirective(long id) throws IOException;
/**
* List the set of cached paths of a cache pool. Incrementally fetches results
* from the server.
*
* @param prevId The last listed entry ID, or -1 if this is the first call to
* listPathBasedCacheDirectives.
* listCacheDirectives.
* @param filter Parameters to use to filter the list results,
* or null to display all directives visible to us.
* @return A RemoteIterator which returns PathBasedCacheDirective objects.
* @return A RemoteIterator which returns CacheDirectiveInfo objects.
*/
@Idempotent
public RemoteIterator<PathBasedCacheDirective> listPathBasedCacheDirectives(
long prevId, PathBasedCacheDirective filter) throws IOException;
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
long prevId, CacheDirectiveInfo filter) throws IOException;
/**
* Add a new cache pool.

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
@ -35,7 +36,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
@ -44,8 +45,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlo
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
@ -106,25 +107,25 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCa
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDirectivesElementProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDirectivesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyPathBasedCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyPathBasedCacheDirectiveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDirectiveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
@ -1035,12 +1036,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
}
@Override
public AddPathBasedCacheDirectiveResponseProto addPathBasedCacheDirective(
RpcController controller, AddPathBasedCacheDirectiveRequestProto request)
public AddCacheDirectiveResponseProto addCacheDirective(
RpcController controller, AddCacheDirectiveRequestProto request)
throws ServiceException {
try {
return AddPathBasedCacheDirectiveResponseProto.newBuilder().
setId(server.addPathBasedCacheDirective(
return AddCacheDirectiveResponseProto.newBuilder().
setId(server.addCacheDirective(
PBHelper.convert(request.getInfo()))).build();
} catch (IOException e) {
throw new ServiceException(e);
@ -1048,26 +1049,26 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
}
@Override
public ModifyPathBasedCacheDirectiveResponseProto modifyPathBasedCacheDirective(
RpcController controller, ModifyPathBasedCacheDirectiveRequestProto request)
public ModifyCacheDirectiveResponseProto modifyCacheDirective(
RpcController controller, ModifyCacheDirectiveRequestProto request)
throws ServiceException {
try {
server.modifyPathBasedCacheDirective(
server.modifyCacheDirective(
PBHelper.convert(request.getInfo()));
return ModifyPathBasedCacheDirectiveResponseProto.newBuilder().build();
return ModifyCacheDirectiveResponseProto.newBuilder().build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public RemovePathBasedCacheDirectiveResponseProto
removePathBasedCacheDirective(RpcController controller,
RemovePathBasedCacheDirectiveRequestProto request)
public RemoveCacheDirectiveResponseProto
removeCacheDirective(RpcController controller,
RemoveCacheDirectiveRequestProto request)
throws ServiceException {
try {
server.removePathBasedCacheDirective(request.getId());
return RemovePathBasedCacheDirectiveResponseProto.
server.removeCacheDirective(request.getId());
return RemoveCacheDirectiveResponseProto.
newBuilder().build();
} catch (IOException e) {
throw new ServiceException(e);
@ -1075,28 +1076,26 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
}
@Override
public ListPathBasedCacheDirectivesResponseProto listPathBasedCacheDirectives(
RpcController controller, ListPathBasedCacheDirectivesRequestProto request)
public ListCacheDirectivesResponseProto listCacheDirectives(
RpcController controller, ListCacheDirectivesRequestProto request)
throws ServiceException {
try {
PathBasedCacheDirective filter =
CacheDirectiveInfo filter =
PBHelper.convert(request.getFilter());
RemoteIterator<PathBasedCacheDirective> iter =
server.listPathBasedCacheDirectives(request.getPrevId(), filter);
ListPathBasedCacheDirectivesResponseProto.Builder builder =
ListPathBasedCacheDirectivesResponseProto.newBuilder();
RemoteIterator<CacheDirectiveEntry> iter =
server.listCacheDirectives(request.getPrevId(), filter);
ListCacheDirectivesResponseProto.Builder builder =
ListCacheDirectivesResponseProto.newBuilder();
long prevId = 0;
while (iter.hasNext()) {
PathBasedCacheDirective directive = iter.next();
builder.addElements(
ListPathBasedCacheDirectivesElementProto.newBuilder().
setInfo(PBHelper.convert(directive)));
prevId = directive.getId();
CacheDirectiveEntry entry = iter.next();
builder.addElements(PBHelper.convert(entry));
prevId = entry.getInfo().getId();
}
if (prevId == 0) {
builder.setHasMore(false);
} else {
iter = server.listPathBasedCacheDirectives(prevId, filter);
iter = server.listCacheDirectives(prevId, filter);
builder.setHasMore(iter.hasNext());
}
return builder.build();

View File

@ -32,11 +32,11 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
@ -51,14 +51,13 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
@ -100,16 +99,16 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCa
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDirectivesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyPathBasedCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
@ -146,7 +145,6 @@ import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRespons
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
import org.apache.hadoop.security.token.Token;
import com.google.common.primitives.Shorts;
import com.google.protobuf.ByteString;
import com.google.protobuf.ServiceException;
@ -1006,11 +1004,11 @@ public class ClientNamenodeProtocolTranslatorPB implements
}
@Override
public long addPathBasedCacheDirective(
PathBasedCacheDirective directive) throws IOException {
public long addCacheDirective(
CacheDirectiveInfo directive) throws IOException {
try {
return rpcProxy.addPathBasedCacheDirective(null,
AddPathBasedCacheDirectiveRequestProto.newBuilder().
return rpcProxy.addCacheDirective(null,
AddCacheDirectiveRequestProto.newBuilder().
setInfo(PBHelper.convert(directive)).build()).getId();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
@ -1018,11 +1016,11 @@ public class ClientNamenodeProtocolTranslatorPB implements
}
@Override
public void modifyPathBasedCacheDirective(
PathBasedCacheDirective directive) throws IOException {
public void modifyCacheDirective(
CacheDirectiveInfo directive) throws IOException {
try {
rpcProxy.modifyPathBasedCacheDirective(null,
ModifyPathBasedCacheDirectiveRequestProto.newBuilder().
rpcProxy.modifyCacheDirective(null,
ModifyCacheDirectiveRequestProto.newBuilder().
setInfo(PBHelper.convert(directive)).build());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
@ -1030,29 +1028,29 @@ public class ClientNamenodeProtocolTranslatorPB implements
}
@Override
public void removePathBasedCacheDirective(long id)
public void removeCacheDirective(long id)
throws IOException {
try {
rpcProxy.removePathBasedCacheDirective(null,
RemovePathBasedCacheDirectiveRequestProto.newBuilder().
rpcProxy.removeCacheDirective(null,
RemoveCacheDirectiveRequestProto.newBuilder().
setId(id).build());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
private static class BatchedPathBasedCacheEntries
implements BatchedEntries<PathBasedCacheDirective> {
private ListPathBasedCacheDirectivesResponseProto response;
private static class BatchedCacheEntries
implements BatchedEntries<CacheDirectiveEntry> {
private ListCacheDirectivesResponseProto response;
BatchedPathBasedCacheEntries(
ListPathBasedCacheDirectivesResponseProto response) {
BatchedCacheEntries(
ListCacheDirectivesResponseProto response) {
this.response = response;
}
@Override
public PathBasedCacheDirective get(int i) {
return PBHelper.convert(response.getElements(i).getInfo());
public CacheDirectiveEntry get(int i) {
return PBHelper.convert(response.getElements(i));
}
@Override
@ -1066,46 +1064,46 @@ public class ClientNamenodeProtocolTranslatorPB implements
}
}
private class PathBasedCacheEntriesIterator
extends BatchedRemoteIterator<Long, PathBasedCacheDirective> {
private final PathBasedCacheDirective filter;
private class CacheEntriesIterator
extends BatchedRemoteIterator<Long, CacheDirectiveEntry> {
private final CacheDirectiveInfo filter;
public PathBasedCacheEntriesIterator(long prevKey,
PathBasedCacheDirective filter) {
public CacheEntriesIterator(long prevKey,
CacheDirectiveInfo filter) {
super(prevKey);
this.filter = filter;
}
@Override
public BatchedEntries<PathBasedCacheDirective> makeRequest(
public BatchedEntries<CacheDirectiveEntry> makeRequest(
Long nextKey) throws IOException {
ListPathBasedCacheDirectivesResponseProto response;
ListCacheDirectivesResponseProto response;
try {
response = rpcProxy.listPathBasedCacheDirectives(null,
ListPathBasedCacheDirectivesRequestProto.newBuilder().
response = rpcProxy.listCacheDirectives(null,
ListCacheDirectivesRequestProto.newBuilder().
setPrevId(nextKey).
setFilter(PBHelper.convert(filter)).
build());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
return new BatchedPathBasedCacheEntries(response);
return new BatchedCacheEntries(response);
}
@Override
public Long elementToPrevKey(PathBasedCacheDirective element) {
return element.getId();
public Long elementToPrevKey(CacheDirectiveEntry element) {
return element.getInfo().getId();
}
}
@Override
public RemoteIterator<PathBasedCacheDirective>
listPathBasedCacheDirectives(long prevId,
PathBasedCacheDirective filter) throws IOException {
public RemoteIterator<CacheDirectiveEntry>
listCacheDirectives(long prevId,
CacheDirectiveInfo filter) throws IOException {
if (filter == null) {
filter = new PathBasedCacheDirective.Builder().build();
filter = new CacheDirectiveInfo.Builder().build();
}
return new PathBasedCacheEntriesIterator(prevId, filter);
return new CacheEntriesIterator(prevId, filter);
}
@Override
@ -1143,11 +1141,11 @@ public class ClientNamenodeProtocolTranslatorPB implements
}
}
private static class BatchedPathDirectiveEntries
implements BatchedEntries<CachePoolInfo> {
private final ListCachePoolsResponseProto proto;
private static class BatchedCachePoolInfo
implements BatchedEntries<CachePoolInfo> {
private final ListCachePoolsResponseProto proto;
public BatchedPathDirectiveEntries(ListCachePoolsResponseProto proto) {
public BatchedCachePoolInfo(ListCachePoolsResponseProto proto) {
this.proto = proto;
}
@ -1179,7 +1177,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
public BatchedEntries<CachePoolInfo> makeRequest(String prevKey)
throws IOException {
try {
return new BatchedPathDirectiveEntries(
return new BatchedCachePoolInfo(
rpcProxy.listCachePools(null,
ListCachePoolsRequestProto.newBuilder().
setPrevPoolName(prevKey).build()));

View File

@ -37,11 +37,15 @@ import org.apache.hadoop.ha.proto.HAServiceProtocolProtos;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -57,12 +61,15 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathBasedCacheDirectiveInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto;
@ -1675,29 +1682,29 @@ public class PBHelper {
return DataChecksum.Type.valueOf(type.getNumber());
}
public static PathBasedCacheDirectiveInfoProto convert
(PathBasedCacheDirective directive) {
PathBasedCacheDirectiveInfoProto.Builder builder =
PathBasedCacheDirectiveInfoProto.newBuilder();
if (directive.getId() != null) {
builder.setId(directive.getId());
public static CacheDirectiveInfoProto convert
(CacheDirectiveInfo info) {
CacheDirectiveInfoProto.Builder builder =
CacheDirectiveInfoProto.newBuilder();
if (info.getId() != null) {
builder.setId(info.getId());
}
if (directive.getPath() != null) {
builder.setPath(directive.getPath().toUri().getPath());
if (info.getPath() != null) {
builder.setPath(info.getPath().toUri().getPath());
}
if (directive.getReplication() != null) {
builder.setReplication(directive.getReplication());
if (info.getReplication() != null) {
builder.setReplication(info.getReplication());
}
if (directive.getPool() != null) {
builder.setPool(directive.getPool());
if (info.getPool() != null) {
builder.setPool(info.getPool());
}
return builder.build();
}
public static PathBasedCacheDirective convert
(PathBasedCacheDirectiveInfoProto proto) {
PathBasedCacheDirective.Builder builder =
new PathBasedCacheDirective.Builder();
public static CacheDirectiveInfo convert
(CacheDirectiveInfoProto proto) {
CacheDirectiveInfo.Builder builder =
new CacheDirectiveInfo.Builder();
if (proto.hasId()) {
builder.setId(proto.getId());
}
@ -1714,6 +1721,37 @@ public class PBHelper {
return builder.build();
}
public static CacheDirectiveStatsProto convert(CacheDirectiveStats stats) {
CacheDirectiveStatsProto.Builder builder =
CacheDirectiveStatsProto.newBuilder();
builder.setBytesNeeded(stats.getBytesNeeded());
builder.setBytesCached(stats.getBytesCached());
builder.setFilesAffected(stats.getFilesAffected());
return builder.build();
}
public static CacheDirectiveStats convert(CacheDirectiveStatsProto proto) {
CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder();
builder.setBytesNeeded(proto.getBytesNeeded());
builder.setBytesCached(proto.getBytesCached());
builder.setFilesAffected(proto.getFilesAffected());
return builder.build();
}
public static CacheDirectiveEntryProto convert(CacheDirectiveEntry entry) {
CacheDirectiveEntryProto.Builder builder =
CacheDirectiveEntryProto.newBuilder();
builder.setInfo(PBHelper.convert(entry.getInfo()));
builder.setStats(PBHelper.convert(entry.getStats()));
return builder.build();
}
public static CacheDirectiveEntry convert(CacheDirectiveEntryProto proto) {
CacheDirectiveInfo info = PBHelper.convert(proto.getInfo());
CacheDirectiveStats stats = PBHelper.convert(proto.getStats());
return new CacheDirectiveEntry(info, stats);
}
public static CachePoolInfoProto convert(CachePoolInfo info) {
CachePoolInfoProto.Builder builder = CachePoolInfoProto.newBuilder();
builder.setPoolName(info.getPoolName());

View File

@ -32,7 +32,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirective;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.CacheManager;
@ -197,12 +197,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
scannedBlocks = 0;
namesystem.writeLock();
try {
rescanPathBasedCacheEntries();
} finally {
namesystem.writeUnlock();
}
namesystem.writeLock();
try {
rescanCacheDirectives();
rescanCachedBlockMap();
blockManager.getDatanodeManager().resetLastCachingDirectiveSentTime();
} finally {
@ -211,15 +206,18 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
}
/**
* Scan all PathBasedCacheEntries. Use the information to figure out
* Scan all CacheDirectives. Use the information to figure out
* what cache replication factor each block should have.
*
* @param mark Whether the current scan is setting or clearing the mark
*/
private void rescanPathBasedCacheEntries() {
private void rescanCacheDirectives() {
FSDirectory fsDir = namesystem.getFSDirectory();
for (PathBasedCacheEntry pce : cacheManager.getEntriesById().values()) {
for (CacheDirective pce : cacheManager.getEntriesById().values()) {
scannedDirectives++;
pce.clearBytesNeeded();
pce.clearBytesCached();
pce.clearFilesAffected();
String path = pce.getPath();
INode node;
try {
@ -252,18 +250,24 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
}
/**
* Apply a PathBasedCacheEntry to a file.
* Apply a CacheDirective to a file.
*
* @param pce The PathBasedCacheEntry to apply.
* @param pce The CacheDirective to apply.
* @param file The file.
*/
private void rescanFile(PathBasedCacheEntry pce, INodeFile file) {
private void rescanFile(CacheDirective pce, INodeFile file) {
pce.incrementFilesAffected();
BlockInfo[] blockInfos = file.getBlocks();
long cachedTotal = 0;
long neededTotal = 0;
for (BlockInfo blockInfo : blockInfos) {
if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) {
// We don't try to cache blocks that are under construction.
continue;
}
long neededByBlock =
pce.getReplication() * blockInfo.getNumBytes();
neededTotal += neededByBlock;
Block block = new Block(blockInfo.getBlockId());
CachedBlock ncblock = new CachedBlock(block.getBlockId(),
pce.getReplication(), mark);
@ -271,17 +275,35 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
if (ocblock == null) {
cachedBlocks.put(ncblock);
} else {
// Update bytesUsed using the current replication levels.
// Assumptions: we assume that all the blocks are the same length
// on each datanode. We can assume this because we're only caching
// blocks in state COMMITTED.
// Note that if two directives are caching the same block(s), they will
// both get them added to their bytesCached.
List<DatanodeDescriptor> cachedOn =
ocblock.getDatanodes(Type.CACHED);
long cachedByBlock = Math.min(cachedOn.size(), pce.getReplication()) *
blockInfo.getNumBytes();
cachedTotal += cachedByBlock;
if (mark != ocblock.getMark()) {
// Mark hasn't been set in this scan, so update replication and mark.
ocblock.setReplicationAndMark(pce.getReplication(), mark);
} else {
// Mark already set in this scan. Set replication to highest value in
// any PathBasedCacheEntry that covers this file.
// any CacheDirective that covers this file.
ocblock.setReplicationAndMark((short)Math.max(
pce.getReplication(), ocblock.getReplication()), mark);
}
}
}
pce.addBytesNeeded(neededTotal);
pce.addBytesCached(cachedTotal);
if (LOG.isTraceEnabled()) {
LOG.debug("Directive " + pce.getEntryId() + " is caching " +
file.getFullPathName() + ": " + cachedTotal + "/" + neededTotal);
}
}
/**

View File

@ -529,6 +529,21 @@ class BPOfferService {
}
}
private String blockIdArrayToString(long ids[]) {
long maxNumberOfBlocksToLog = dn.getMaxNumberOfBlocksToLog();
StringBuilder bld = new StringBuilder();
String prefix = "";
for (int i = 0; i < ids.length; i++) {
if (i >= maxNumberOfBlocksToLog) {
bld.append("...");
break;
}
bld.append(prefix).append(ids[i]);
prefix = ", ";
}
return bld.toString();
}
/**
* This method should handle all commands from Active namenode except
* DNA_REGISTER which should be handled earlier itself.
@ -569,12 +584,16 @@ class BPOfferService {
dn.metrics.incrBlocksRemoved(toDelete.length);
break;
case DatanodeProtocol.DNA_CACHE:
LOG.info("DatanodeCommand action: DNA_CACHE");
LOG.info("DatanodeCommand action: DNA_CACHE for " +
blockIdCmd.getBlockPoolId() + " of [" +
blockIdArrayToString(blockIdCmd.getBlockIds()) + "]");
dn.getFSDataset().cache(blockIdCmd.getBlockPoolId(), blockIdCmd.getBlockIds());
dn.metrics.incrBlocksCached(blockIdCmd.getBlockIds().length);
break;
case DatanodeProtocol.DNA_UNCACHE:
LOG.info("DatanodeCommand action: DNA_UNCACHE");
LOG.info("DatanodeCommand action: DNA_UNCACHE for " +
blockIdCmd.getBlockPoolId() + " of [" +
blockIdArrayToString(blockIdCmd.getBlockIds()) + "]");
dn.getFSDataset().uncache(blockIdCmd.getBlockPoolId(), blockIdCmd.getBlockIds());
dn.metrics.incrBlocksUncached(blockIdCmd.getBlockIds().length);
break;

View File

@ -532,7 +532,7 @@ class BPServiceActor implements Runnable {
long sendCost = sendTime - createTime;
dn.getMetrics().addCacheReport(sendCost);
LOG.info("CacheReport of " + blockIds.size()
+ " blocks took " + createCost + " msec to generate and "
+ " block(s) took " + createCost + " msec to generate and "
+ sendCost + " msecs for RPC and NN processing");
}
return cmd;

View File

@ -206,6 +206,7 @@ public class DataNode extends Configured
private SecureResources secureResources = null;
private AbstractList<StorageLocation> dataDirs;
private Configuration conf;
private final long maxNumberOfBlocksToLog;
private final List<String> usersWithLocalPathAccess;
private boolean connectToDnViaHostname;
@ -221,6 +222,8 @@ public class DataNode extends Configured
final AbstractList<StorageLocation> dataDirs,
final SecureResources resources) throws IOException {
super(conf);
this.maxNumberOfBlocksToLog = conf.getLong(DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT);
this.usersWithLocalPathAccess = Arrays.asList(
conf.getTrimmedStrings(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));
@ -1010,6 +1013,10 @@ public class DataNode extends Configured
}
}
public long getMaxNumberOfBlocksToLog() {
return maxNumberOfBlocksToLog;
}
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
Token<BlockTokenIdentifier> token) throws IOException {

View File

@ -48,11 +48,12 @@ import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirective;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
@ -100,11 +101,11 @@ public final class CacheManager {
/**
* Cache entries, sorted by ID.
*
* listPathBasedCacheDirectives relies on the ordering of elements in this map
* listCacheDirectives relies on the ordering of elements in this map
* to track what has already been listed by the client.
*/
private final TreeMap<Long, PathBasedCacheEntry> entriesById =
new TreeMap<Long, PathBasedCacheEntry>();
private final TreeMap<Long, CacheDirective> entriesById =
new TreeMap<Long, CacheDirective>();
/**
* The entry ID to use for a new entry. Entry IDs always increase, and are
@ -115,8 +116,8 @@ public final class CacheManager {
/**
* Cache entries, sorted by path
*/
private final TreeMap<String, List<PathBasedCacheEntry>> entriesByPath =
new TreeMap<String, List<PathBasedCacheEntry>>();
private final TreeMap<String, List<CacheDirective>> entriesByPath =
new TreeMap<String, List<CacheDirective>>();
/**
* Cache pools, sorted by name.
@ -236,7 +237,7 @@ public final class CacheManager {
return active;
}
public TreeMap<Long, PathBasedCacheEntry> getEntriesById() {
public TreeMap<Long, CacheDirective> getEntriesById() {
assert namesystem.hasReadLock();
return entriesById;
}
@ -264,7 +265,7 @@ public final class CacheManager {
}
}
private static String validatePoolName(PathBasedCacheDirective directive)
private static String validatePoolName(CacheDirectiveInfo directive)
throws InvalidRequestException {
String pool = directive.getPool();
if (pool == null) {
@ -276,7 +277,7 @@ public final class CacheManager {
return pool;
}
private static String validatePath(PathBasedCacheDirective directive)
private static String validatePath(CacheDirectiveInfo directive)
throws InvalidRequestException {
if (directive.getPath() == null) {
throw new InvalidRequestException("No path specified.");
@ -288,7 +289,7 @@ public final class CacheManager {
return path;
}
private static short validateReplication(PathBasedCacheDirective directive,
private static short validateReplication(CacheDirectiveInfo directive,
short defaultValue) throws InvalidRequestException {
short repl = (directive.getReplication() != null)
? directive.getReplication() : defaultValue;
@ -300,16 +301,16 @@ public final class CacheManager {
}
/**
* Get a PathBasedCacheEntry by ID, validating the ID and that the entry
* Get a CacheDirective by ID, validating the ID and that the entry
* exists.
*/
private PathBasedCacheEntry getById(long id) throws InvalidRequestException {
private CacheDirective getById(long id) throws InvalidRequestException {
// Check for invalid IDs.
if (id <= 0) {
throw new InvalidRequestException("Invalid negative ID.");
}
// Find the entry.
PathBasedCacheEntry entry = entriesById.get(id);
CacheDirective entry = entriesById.get(id);
if (entry == null) {
throw new InvalidRequestException("No directive with ID " + id
+ " found.");
@ -331,22 +332,22 @@ public final class CacheManager {
// RPC handlers
private void addInternal(PathBasedCacheEntry entry) {
private void addInternal(CacheDirective entry) {
entriesById.put(entry.getEntryId(), entry);
String path = entry.getPath();
List<PathBasedCacheEntry> entryList = entriesByPath.get(path);
List<CacheDirective> entryList = entriesByPath.get(path);
if (entryList == null) {
entryList = new ArrayList<PathBasedCacheEntry>(1);
entryList = new ArrayList<CacheDirective>(1);
entriesByPath.put(path, entryList);
}
entryList.add(entry);
}
public PathBasedCacheDirective addDirective(
PathBasedCacheDirective directive, FSPermissionChecker pc)
public CacheDirectiveInfo addDirective(
CacheDirectiveInfo directive, FSPermissionChecker pc)
throws IOException {
assert namesystem.hasWriteLock();
PathBasedCacheEntry entry;
CacheDirective entry;
try {
CachePool pool = getCachePool(validatePoolName(directive));
checkWritePermission(pc, pool);
@ -372,7 +373,7 @@ public final class CacheManager {
// Add a new entry with the next available ID.
id = getNextEntryId();
}
entry = new PathBasedCacheEntry(id, path, replication, pool);
entry = new CacheDirective(id, path, replication, pool);
addInternal(entry);
} catch (IOException e) {
LOG.warn("addDirective of " + directive + " failed: ", e);
@ -385,7 +386,7 @@ public final class CacheManager {
return entry.toDirective();
}
public void modifyDirective(PathBasedCacheDirective directive,
public void modifyDirective(CacheDirectiveInfo directive,
FSPermissionChecker pc) throws IOException {
assert namesystem.hasWriteLock();
String idString =
@ -397,7 +398,7 @@ public final class CacheManager {
if (id == null) {
throw new InvalidRequestException("Must supply an ID.");
}
PathBasedCacheEntry prevEntry = getById(id);
CacheDirective prevEntry = getById(id);
checkWritePermission(pc, prevEntry.getPool());
String path = prevEntry.getPath();
if (directive.getPath() != null) {
@ -413,8 +414,8 @@ public final class CacheManager {
checkWritePermission(pc, pool);
}
removeInternal(prevEntry);
PathBasedCacheEntry newEntry =
new PathBasedCacheEntry(id, path, replication, pool);
CacheDirective newEntry =
new CacheDirective(id, path, replication, pool);
addInternal(newEntry);
} catch (IOException e) {
LOG.warn("modifyDirective of " + idString + " failed: ", e);
@ -424,12 +425,12 @@ public final class CacheManager {
directive + ".");
}
public void removeInternal(PathBasedCacheEntry existing)
public void removeInternal(CacheDirective existing)
throws InvalidRequestException {
assert namesystem.hasWriteLock();
// Remove the corresponding entry in entriesByPath.
String path = existing.getPath();
List<PathBasedCacheEntry> entries = entriesByPath.get(path);
List<CacheDirective> entries = entriesByPath.get(path);
if (entries == null || !entries.remove(existing)) {
throw new InvalidRequestException("Failed to locate entry " +
existing.getEntryId() + " by path " + existing.getPath());
@ -444,7 +445,7 @@ public final class CacheManager {
throws IOException {
assert namesystem.hasWriteLock();
try {
PathBasedCacheEntry existing = getById(id);
CacheDirective existing = getById(id);
checkWritePermission(pc, existing.getPool());
removeInternal(existing);
} catch (IOException e) {
@ -457,9 +458,9 @@ public final class CacheManager {
LOG.info("removeDirective of " + id + " successful.");
}
public BatchedListEntries<PathBasedCacheDirective>
listPathBasedCacheDirectives(long prevId,
PathBasedCacheDirective filter,
public BatchedListEntries<CacheDirectiveEntry>
listCacheDirectives(long prevId,
CacheDirectiveInfo filter,
FSPermissionChecker pc) throws IOException {
assert namesystem.hasReadLock();
final int NUM_PRE_ALLOCATED_ENTRIES = 16;
@ -473,23 +474,23 @@ public final class CacheManager {
if (filter.getReplication() != null) {
throw new IOException("Filtering by replication is unsupported.");
}
ArrayList<PathBasedCacheDirective> replies =
new ArrayList<PathBasedCacheDirective>(NUM_PRE_ALLOCATED_ENTRIES);
ArrayList<CacheDirectiveEntry> replies =
new ArrayList<CacheDirectiveEntry>(NUM_PRE_ALLOCATED_ENTRIES);
int numReplies = 0;
SortedMap<Long, PathBasedCacheEntry> tailMap =
SortedMap<Long, CacheDirective> tailMap =
entriesById.tailMap(prevId + 1);
for (Entry<Long, PathBasedCacheEntry> cur : tailMap.entrySet()) {
for (Entry<Long, CacheDirective> cur : tailMap.entrySet()) {
if (numReplies >= maxListCacheDirectivesNumResponses) {
return new BatchedListEntries<PathBasedCacheDirective>(replies, true);
return new BatchedListEntries<CacheDirectiveEntry>(replies, true);
}
PathBasedCacheEntry curEntry = cur.getValue();
PathBasedCacheDirective directive = cur.getValue().toDirective();
CacheDirective curEntry = cur.getValue();
CacheDirectiveInfo info = cur.getValue().toDirective();
if (filter.getPool() != null &&
!directive.getPool().equals(filter.getPool())) {
!info.getPool().equals(filter.getPool())) {
continue;
}
if (filterPath != null &&
!directive.getPath().toUri().getPath().equals(filterPath)) {
!info.getPath().toUri().getPath().equals(filterPath)) {
continue;
}
boolean hasPermission = true;
@ -501,11 +502,11 @@ public final class CacheManager {
}
}
if (hasPermission) {
replies.add(cur.getValue().toDirective());
replies.add(new CacheDirectiveEntry(info, cur.getValue().toStats()));
numReplies++;
}
}
return new BatchedListEntries<PathBasedCacheDirective>(replies, false);
return new BatchedListEntries<CacheDirectiveEntry>(replies, false);
}
/**
@ -602,10 +603,10 @@ public final class CacheManager {
// Remove entries using this pool
// TODO: could optimize this somewhat to avoid the need to iterate
// over all entries in entriesById
Iterator<Entry<Long, PathBasedCacheEntry>> iter =
Iterator<Entry<Long, CacheDirective>> iter =
entriesById.entrySet().iterator();
while (iter.hasNext()) {
Entry<Long, PathBasedCacheEntry> entry = iter.next();
Entry<Long, CacheDirective> entry = iter.next();
if (entry.getValue().getPool() == pool) {
entriesByPath.remove(entry.getValue().getPath());
iter.remove();
@ -789,7 +790,7 @@ public final class CacheManager {
prog.setTotal(Phase.SAVING_CHECKPOINT, step, entriesById.size());
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
out.writeInt(entriesById.size());
for (PathBasedCacheEntry entry: entriesById.values()) {
for (CacheDirective entry: entriesById.values()) {
out.writeLong(entry.getEntryId());
Text.writeString(out, entry.getPath());
out.writeShort(entry.getReplication());
@ -838,15 +839,15 @@ public final class CacheManager {
throw new IOException("Entry refers to pool " + poolName +
", which does not exist.");
}
PathBasedCacheEntry entry =
new PathBasedCacheEntry(entryId, path, replication, pool);
CacheDirective entry =
new CacheDirective(entryId, path, replication, pool);
if (entriesById.put(entry.getEntryId(), entry) != null) {
throw new IOException("An entry with ID " + entry.getEntryId() +
" already exists");
}
List<PathBasedCacheEntry> entries = entriesByPath.get(entry.getPath());
List<CacheDirective> entries = entriesByPath.get(entry.getPath());
if (entries == null) {
entries = new LinkedList<PathBasedCacheEntry>();
entries = new LinkedList<CacheDirective>();
entriesByPath.put(entry.getPath(), entries);
}
entries.add(entry);

View File

@ -47,7 +47,7 @@ public enum Content {
}
private Counts() {
super(Content.values());
super(Content.class);
}
}

View File

@ -2407,8 +2407,9 @@ public class FSDirectory implements Closeable {
if (dirNode.isRoot() && nsQuota == HdfsConstants.QUOTA_RESET) {
throw new IllegalArgumentException("Cannot clear namespace quota on root.");
} else { // a directory inode
long oldNsQuota = dirNode.getNsQuota();
long oldDsQuota = dirNode.getDsQuota();
final Quota.Counts oldQuota = dirNode.getQuotaCounts();
final long oldNsQuota = oldQuota.get(Quota.NAMESPACE);
final long oldDsQuota = oldQuota.get(Quota.DISKSPACE);
if (nsQuota == HdfsConstants.QUOTA_DONT_SET) {
nsQuota = oldNsQuota;
}
@ -2460,8 +2461,9 @@ public class FSDirectory implements Closeable {
try {
INodeDirectory dir = unprotectedSetQuota(src, nsQuota, dsQuota);
if (dir != null) {
fsImage.getEditLog().logSetQuota(src, dir.getNsQuota(),
dir.getDsQuota());
final Quota.Counts q = dir.getQuotaCounts();
fsImage.getEditLog().logSetQuota(src,
q.get(Quota.NAMESPACE), q.get(Quota.DISKSPACE));
}
} finally {
writeUnlock();

View File

@ -38,15 +38,15 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddPathBasedCacheDirectiveOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyPathBasedCacheDirectiveOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp;
@ -63,7 +63,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemovePathBasedCacheDirectiveOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
@ -954,27 +954,27 @@ public class FSEditLog implements LogsPurgeable {
logEdit(op);
}
void logAddPathBasedCacheDirective(PathBasedCacheDirective directive,
void logAddCacheDirectiveInfo(CacheDirectiveInfo directive,
boolean toLogRpcIds) {
AddPathBasedCacheDirectiveOp op =
AddPathBasedCacheDirectiveOp.getInstance(cache.get())
AddCacheDirectiveInfoOp op =
AddCacheDirectiveInfoOp.getInstance(cache.get())
.setDirective(directive);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
void logModifyPathBasedCacheDirective(
PathBasedCacheDirective directive, boolean toLogRpcIds) {
ModifyPathBasedCacheDirectiveOp op =
ModifyPathBasedCacheDirectiveOp.getInstance(
void logModifyCacheDirectiveInfo(
CacheDirectiveInfo directive, boolean toLogRpcIds) {
ModifyCacheDirectiveInfoOp op =
ModifyCacheDirectiveInfoOp.getInstance(
cache.get()).setDirective(directive);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
void logRemovePathBasedCacheDirective(Long id, boolean toLogRpcIds) {
RemovePathBasedCacheDirectiveOp op =
RemovePathBasedCacheDirectiveOp.getInstance(cache.get()).setId(id);
void logRemoveCacheDirectiveInfo(Long id, boolean toLogRpcIds) {
RemoveCacheDirectiveInfoOp op =
RemoveCacheDirectiveInfoOp.getInstance(cache.get()).setId(id);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}

View File

@ -36,13 +36,13 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddPathBasedCacheDirectiveOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.BlockListUpdatingOp;
@ -56,10 +56,10 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DisallowSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyPathBasedCacheDirectiveOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemovePathBasedCacheDirectiveOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
@ -639,8 +639,8 @@ public class FSEditLogLoader {
break;
}
case OP_ADD_PATH_BASED_CACHE_DIRECTIVE: {
AddPathBasedCacheDirectiveOp addOp = (AddPathBasedCacheDirectiveOp) op;
PathBasedCacheDirective result = fsNamesys.
AddCacheDirectiveInfoOp addOp = (AddCacheDirectiveInfoOp) op;
CacheDirectiveInfo result = fsNamesys.
getCacheManager().addDirective(addOp.directive, null);
if (toAddRetryCache) {
Long id = result.getId();
@ -649,8 +649,8 @@ public class FSEditLogLoader {
break;
}
case OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE: {
ModifyPathBasedCacheDirectiveOp modifyOp =
(ModifyPathBasedCacheDirectiveOp) op;
ModifyCacheDirectiveInfoOp modifyOp =
(ModifyCacheDirectiveInfoOp) op;
fsNamesys.getCacheManager().modifyDirective(
modifyOp.directive, null);
if (toAddRetryCache) {
@ -659,8 +659,8 @@ public class FSEditLogLoader {
break;
}
case OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE: {
RemovePathBasedCacheDirectiveOp removeOp =
(RemovePathBasedCacheDirectiveOp) op;
RemoveCacheDirectiveInfoOp removeOp =
(RemoveCacheDirectiveInfoOp) op;
fsNamesys.getCacheManager().removeDirective(removeOp.id, null);
if (toAddRetryCache) {
fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId);

View File

@ -86,7 +86,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.util.XMLUtils;
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
@ -166,11 +166,11 @@ public abstract class FSEditLogOp {
inst.put(OP_SET_GENSTAMP_V2, new SetGenstampV2Op());
inst.put(OP_ALLOCATE_BLOCK_ID, new AllocateBlockIdOp());
inst.put(OP_ADD_PATH_BASED_CACHE_DIRECTIVE,
new AddPathBasedCacheDirectiveOp());
new AddCacheDirectiveInfoOp());
inst.put(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE,
new ModifyPathBasedCacheDirectiveOp());
new ModifyCacheDirectiveInfoOp());
inst.put(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE,
new RemovePathBasedCacheDirectiveOp());
new RemoveCacheDirectiveInfoOp());
inst.put(OP_ADD_CACHE_POOL, new AddCachePoolOp());
inst.put(OP_MODIFY_CACHE_POOL, new ModifyCachePoolOp());
inst.put(OP_REMOVE_CACHE_POOL, new RemoveCachePoolOp());
@ -2868,22 +2868,22 @@ public abstract class FSEditLogOp {
/**
* {@literal @AtMostOnce} for
* {@link ClientProtocol#addPathBasedCacheDirective}
* {@link ClientProtocol#addCacheDirective}
*/
static class AddPathBasedCacheDirectiveOp extends FSEditLogOp {
PathBasedCacheDirective directive;
static class AddCacheDirectiveInfoOp extends FSEditLogOp {
CacheDirectiveInfo directive;
public AddPathBasedCacheDirectiveOp() {
public AddCacheDirectiveInfoOp() {
super(OP_ADD_PATH_BASED_CACHE_DIRECTIVE);
}
static AddPathBasedCacheDirectiveOp getInstance(OpInstanceCache cache) {
return (AddPathBasedCacheDirectiveOp) cache
static AddCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
return (AddCacheDirectiveInfoOp) cache
.get(OP_ADD_PATH_BASED_CACHE_DIRECTIVE);
}
public AddPathBasedCacheDirectiveOp setDirective(
PathBasedCacheDirective directive) {
public AddCacheDirectiveInfoOp setDirective(
CacheDirectiveInfo directive) {
this.directive = directive;
assert(directive.getId() != null);
assert(directive.getPath() != null);
@ -2898,7 +2898,7 @@ public abstract class FSEditLogOp {
String path = FSImageSerialization.readString(in);
short replication = FSImageSerialization.readShort(in);
String pool = FSImageSerialization.readString(in);
directive = new PathBasedCacheDirective.Builder().
directive = new CacheDirectiveInfo.Builder().
setId(id).
setPath(new Path(path)).
setReplication(replication).
@ -2930,7 +2930,7 @@ public abstract class FSEditLogOp {
@Override
void fromXml(Stanza st) throws InvalidXmlException {
directive = new PathBasedCacheDirective.Builder().
directive = new CacheDirectiveInfo.Builder().
setId(Long.parseLong(st.getValue("ID"))).
setPath(new Path(st.getValue("PATH"))).
setReplication(Short.parseShort(st.getValue("REPLICATION"))).
@ -2942,7 +2942,7 @@ public abstract class FSEditLogOp {
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("AddPathBasedCacheDirective [");
builder.append("AddCacheDirectiveInfo [");
builder.append("id=" + directive.getId() + ",");
builder.append("path=" + directive.getPath().toUri().getPath() + ",");
builder.append("replication=" + directive.getReplication() + ",");
@ -2955,22 +2955,22 @@ public abstract class FSEditLogOp {
/**
* {@literal @AtMostOnce} for
* {@link ClientProtocol#modifyPathBasedCacheDirective}
* {@link ClientProtocol#modifyCacheDirective}
*/
static class ModifyPathBasedCacheDirectiveOp extends FSEditLogOp {
PathBasedCacheDirective directive;
static class ModifyCacheDirectiveInfoOp extends FSEditLogOp {
CacheDirectiveInfo directive;
public ModifyPathBasedCacheDirectiveOp() {
public ModifyCacheDirectiveInfoOp() {
super(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE);
}
static ModifyPathBasedCacheDirectiveOp getInstance(OpInstanceCache cache) {
return (ModifyPathBasedCacheDirectiveOp) cache
static ModifyCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
return (ModifyCacheDirectiveInfoOp) cache
.get(OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE);
}
public ModifyPathBasedCacheDirectiveOp setDirective(
PathBasedCacheDirective directive) {
public ModifyCacheDirectiveInfoOp setDirective(
CacheDirectiveInfo directive) {
this.directive = directive;
assert(directive.getId() != null);
return this;
@ -2978,8 +2978,8 @@ public abstract class FSEditLogOp {
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
PathBasedCacheDirective.Builder builder =
new PathBasedCacheDirective.Builder();
CacheDirectiveInfo.Builder builder =
new CacheDirectiveInfo.Builder();
builder.setId(FSImageSerialization.readLong(in));
byte flags = in.readByte();
if ((flags & 0x1) != 0) {
@ -2993,7 +2993,7 @@ public abstract class FSEditLogOp {
}
if ((flags & ~0x7) != 0) {
throw new IOException("unknown flags set in " +
"ModifyPathBasedCacheDirectiveOp: " + flags);
"ModifyCacheDirectiveInfoOp: " + flags);
}
this.directive = builder.build();
readRpcIds(in, logVersion);
@ -3041,8 +3041,8 @@ public abstract class FSEditLogOp {
@Override
void fromXml(Stanza st) throws InvalidXmlException {
PathBasedCacheDirective.Builder builder =
new PathBasedCacheDirective.Builder();
CacheDirectiveInfo.Builder builder =
new CacheDirectiveInfo.Builder();
builder.setId(Long.parseLong(st.getValue("ID")));
String path = st.getValueOrNull("PATH");
if (path != null) {
@ -3063,7 +3063,7 @@ public abstract class FSEditLogOp {
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("ModifyPathBasedCacheDirectiveOp[");
builder.append("ModifyCacheDirectiveInfoOp[");
builder.append("id=").append(directive.getId());
if (directive.getPath() != null) {
builder.append(",").append("path=").append(directive.getPath());
@ -3083,21 +3083,21 @@ public abstract class FSEditLogOp {
/**
* {@literal @AtMostOnce} for
* {@link ClientProtocol#removePathBasedCacheDirective}
* {@link ClientProtocol#removeCacheDirective}
*/
static class RemovePathBasedCacheDirectiveOp extends FSEditLogOp {
static class RemoveCacheDirectiveInfoOp extends FSEditLogOp {
long id;
public RemovePathBasedCacheDirectiveOp() {
public RemoveCacheDirectiveInfoOp() {
super(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE);
}
static RemovePathBasedCacheDirectiveOp getInstance(OpInstanceCache cache) {
return (RemovePathBasedCacheDirectiveOp) cache
static RemoveCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
return (RemoveCacheDirectiveInfoOp) cache
.get(OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE);
}
public RemovePathBasedCacheDirectiveOp setId(long id) {
public RemoveCacheDirectiveInfoOp setId(long id) {
this.id = id;
return this;
}
@ -3129,7 +3129,7 @@ public abstract class FSEditLogOp {
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("RemovePathBasedCacheDirective [");
builder.append("RemoveCacheDirectiveInfo [");
builder.append("id=" + Long.toString(id));
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append("]");

View File

@ -777,18 +777,22 @@ public class FSImage implements Closeable {
if (dir.isQuotaSet()) {
// check if quota is violated. It indicates a software bug.
final Quota.Counts q = dir.getQuotaCounts();
final long namespace = counts.get(Quota.NAMESPACE) - parentNamespace;
if (Quota.isViolated(dir.getNsQuota(), namespace)) {
final long nsQuota = q.get(Quota.NAMESPACE);
if (Quota.isViolated(nsQuota, namespace)) {
LOG.error("BUG: Namespace quota violation in image for "
+ dir.getFullPathName()
+ " quota = " + dir.getNsQuota() + " < consumed = " + namespace);
+ " quota = " + nsQuota + " < consumed = " + namespace);
}
final long diskspace = counts.get(Quota.DISKSPACE) - parentDiskspace;
if (Quota.isViolated(dir.getDsQuota(), diskspace)) {
final long dsQuota = q.get(Quota.DISKSPACE);
if (Quota.isViolated(dsQuota, diskspace)) {
LOG.error("BUG: Diskspace quota violation in image for "
+ dir.getFullPathName()
+ " quota = " + dir.getDsQuota() + " < consumed = " + diskspace);
+ " quota = " + dsQuota + " < consumed = " + diskspace);
}
((INodeDirectoryWithQuota)dir).setSpaceConsumed(namespace, diskspace);

View File

@ -371,8 +371,9 @@ public class FSImageFormat {
/** Update the root node's attributes */
private void updateRootAttr(INodeWithAdditionalFields root) {
long nsQuota = root.getNsQuota();
long dsQuota = root.getDsQuota();
final Quota.Counts q = root.getQuotaCounts();
final long nsQuota = q.get(Quota.NAMESPACE);
final long dsQuota = q.get(Quota.DISKSPACE);
FSDirectory fsDir = namesystem.dir;
if (nsQuota != -1 || dsQuota != -1) {
fsDir.rootDir.setQuota(nsQuota, dsQuota);

View File

@ -219,6 +219,12 @@ public class FSImageSerialization {
out.writeLong(file.getPreferredBlockSize());
}
private static void writeQuota(Quota.Counts quota, DataOutput out)
throws IOException {
out.writeLong(quota.get(Quota.NAMESPACE));
out.writeLong(quota.get(Quota.DISKSPACE));
}
/**
* Serialize a {@link INodeDirectory}
* @param node The node to write
@ -234,8 +240,8 @@ public class FSImageSerialization {
out.writeLong(0); // preferred block size
out.writeInt(-1); // # of blocks
out.writeLong(node.getNsQuota());
out.writeLong(node.getDsQuota());
writeQuota(node.getQuotaCounts(), out);
if (node instanceof INodeDirectorySnapshottable) {
out.writeBoolean(true);
} else {
@ -256,9 +262,7 @@ public class FSImageSerialization {
writeLocalName(a, out);
writePermissionStatus(a, out);
out.writeLong(a.getModificationTime());
out.writeLong(a.getNsQuota());
out.writeLong(a.getDsQuota());
writeQuota(a.getQuotaCounts(), out);
}
/**

View File

@ -152,7 +152,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -7056,8 +7057,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
}
long addPathBasedCacheDirective(
PathBasedCacheDirective directive) throws IOException {
long addCacheDirective(
CacheDirectiveInfo directive) throws IOException {
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = isPermissionEnabled ?
getPermissionChecker() : null;
@ -7073,15 +7074,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot add PathBasedCache directive", safeMode);
"Cannot add cache directive", safeMode);
}
if (directive.getId() != null) {
throw new IOException("addDirective: you cannot specify an ID " +
"for this operation.");
}
PathBasedCacheDirective effectiveDirective =
CacheDirectiveInfo effectiveDirective =
cacheManager.addDirective(directive, pc);
getEditLog().logAddPathBasedCacheDirective(effectiveDirective,
getEditLog().logAddCacheDirectiveInfo(effectiveDirective,
cacheEntry != null);
result = effectiveDirective.getId();
success = true;
@ -7091,15 +7092,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
getEditLog().logSync();
}
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "addPathBasedCacheDirective", null, null, null);
logAuditEvent(success, "addCacheDirective", null, null, null);
}
RetryCache.setState(cacheEntry, success, result);
}
return result;
}
void modifyPathBasedCacheDirective(
PathBasedCacheDirective directive) throws IOException {
void modifyCacheDirective(
CacheDirectiveInfo directive) throws IOException {
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = isPermissionEnabled ?
getPermissionChecker() : null;
@ -7113,10 +7114,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot add PathBasedCache directive", safeMode);
"Cannot add cache directive", safeMode);
}
cacheManager.modifyDirective(directive, pc);
getEditLog().logModifyPathBasedCacheDirective(directive,
getEditLog().logModifyCacheDirectiveInfo(directive,
cacheEntry != null);
success = true;
} finally {
@ -7125,13 +7126,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
getEditLog().logSync();
}
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "addPathBasedCacheDirective", null, null, null);
logAuditEvent(success, "addCacheDirective", null, null, null);
}
RetryCache.setState(cacheEntry, success);
}
}
void removePathBasedCacheDirective(Long id) throws IOException {
void removeCacheDirective(Long id) throws IOException {
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = isPermissionEnabled ?
getPermissionChecker() : null;
@ -7145,15 +7146,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot remove PathBasedCache directives", safeMode);
"Cannot remove cache directives", safeMode);
}
cacheManager.removeDirective(id, pc);
getEditLog().logRemovePathBasedCacheDirective(id, cacheEntry != null);
getEditLog().logRemoveCacheDirectiveInfo(id, cacheEntry != null);
success = true;
} finally {
writeUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "removePathBasedCacheDirective", null, null,
logAuditEvent(success, "removeCacheDirective", null, null,
null);
}
RetryCache.setState(cacheEntry, success);
@ -7161,23 +7162,23 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
getEditLog().logSync();
}
BatchedListEntries<PathBasedCacheDirective> listPathBasedCacheDirectives(
long startId, PathBasedCacheDirective filter) throws IOException {
BatchedListEntries<CacheDirectiveEntry> listCacheDirectives(
long startId, CacheDirectiveInfo filter) throws IOException {
checkOperation(OperationCategory.READ);
final FSPermissionChecker pc = isPermissionEnabled ?
getPermissionChecker() : null;
BatchedListEntries<PathBasedCacheDirective> results;
BatchedListEntries<CacheDirectiveEntry> results;
readLock();
boolean success = false;
try {
checkOperation(OperationCategory.READ);
results =
cacheManager.listPathBasedCacheDirectives(startId, filter, pc);
cacheManager.listCacheDirectives(startId, filter, pc);
success = true;
} finally {
readUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "listPathBasedCacheDirectives", null, null,
logAuditEvent(success, "listCacheDirectives", null, null,
null);
}
}

View File

@ -383,10 +383,11 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
public final ContentSummary computeAndConvertContentSummary(
ContentSummaryComputationContext summary) {
Content.Counts counts = computeContentSummary(summary).getCounts();
final Quota.Counts q = getQuotaCounts();
return new ContentSummary(counts.get(Content.LENGTH),
counts.get(Content.FILE) + counts.get(Content.SYMLINK),
counts.get(Content.DIRECTORY), getNsQuota(),
counts.get(Content.DISKSPACE), getDsQuota());
counts.get(Content.DIRECTORY), q.get(Quota.NAMESPACE),
counts.get(Content.DISKSPACE), q.get(Quota.DISKSPACE));
}
/**
@ -412,18 +413,15 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
/**
* Get the quota set for this inode
* @return the quota if it is set; -1 otherwise
* @return the quota counts. The count is -1 if it is not set.
*/
public long getNsQuota() {
return -1;
}
public long getDsQuota() {
return -1;
public Quota.Counts getQuotaCounts() {
return Quota.Counts.newInstance(-1, -1);
}
public final boolean isQuotaSet() {
return getNsQuota() >= 0 || getDsQuota() >= 0;
final Quota.Counts q = getQuotaCounts();
return q.get(Quota.NAMESPACE) >= 0 || q.get(Quota.DISKSPACE) >= 0;
}
/**

View File

@ -612,8 +612,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
@Override
public boolean metadataEquals(INodeDirectoryAttributes other) {
return other != null
&& getNsQuota() == other.getNsQuota()
&& getDsQuota() == other.getDsQuota()
&& getQuotaCounts().equals(other.getQuotaCounts())
&& getPermissionLong() == other.getPermissionLong();
}

View File

@ -27,9 +27,7 @@ import com.google.common.base.Preconditions;
*/
@InterfaceAudience.Private
public interface INodeDirectoryAttributes extends INodeAttributes {
public long getNsQuota();
public long getDsQuota();
public Quota.Counts getQuotaCounts();
public boolean metadataEquals(INodeDirectoryAttributes other);
@ -46,20 +44,14 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
}
@Override
public long getNsQuota() {
return -1;
}
@Override
public long getDsQuota() {
return -1;
public Quota.Counts getQuotaCounts() {
return Quota.Counts.newInstance(-1, -1);
}
@Override
public boolean metadataEquals(INodeDirectoryAttributes other) {
return other != null
&& getNsQuota() == other.getNsQuota()
&& getDsQuota() == other.getDsQuota()
&& this.getQuotaCounts().equals(other.getQuotaCounts())
&& getPermissionLong() == other.getPermissionLong();
}
}
@ -68,6 +60,7 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
private final long nsQuota;
private final long dsQuota;
public CopyWithQuota(byte[] name, PermissionStatus permissions,
long modificationTime, long nsQuota, long dsQuota) {
super(name, permissions, modificationTime);
@ -78,18 +71,14 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
public CopyWithQuota(INodeDirectory dir) {
super(dir);
Preconditions.checkArgument(dir.isQuotaSet());
this.nsQuota = dir.getNsQuota();
this.dsQuota = dir.getDsQuota();
final Quota.Counts q = dir.getQuotaCounts();
this.nsQuota = q.get(Quota.NAMESPACE);
this.dsQuota = q.get(Quota.DISKSPACE);
}
@Override
public final long getNsQuota() {
return nsQuota;
}
@Override
public final long getDsQuota() {
return dsQuota;
public Quota.Counts getQuotaCounts() {
return Quota.Counts.newInstance(nsQuota, dsQuota);
}
}
}

View File

@ -44,7 +44,7 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
* @param dsQuota Diskspace quota to be assigned to this indoe
* @param other The other inode from which all other properties are copied
*/
public INodeDirectoryWithQuota(INodeDirectory other, boolean adopt,
INodeDirectoryWithQuota(INodeDirectory other, boolean adopt,
long nsQuota, long dsQuota) {
super(other, adopt);
final Quota.Counts counts = other.computeQuotaUsage();
@ -54,6 +54,11 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
this.dsQuota = dsQuota;
}
public INodeDirectoryWithQuota(INodeDirectory other, boolean adopt,
Quota.Counts quota) {
this(other, adopt, quota.get(Quota.NAMESPACE), quota.get(Quota.DISKSPACE));
}
/** constructor with no quota verification */
INodeDirectoryWithQuota(long id, byte[] name, PermissionStatus permissions,
long modificationTime, long nsQuota, long dsQuota) {
@ -67,20 +72,9 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
super(id, name, permissions, 0L);
}
/** Get this directory's namespace quota
* @return this directory's namespace quota
*/
@Override
public long getNsQuota() {
return nsQuota;
}
/** Get this directory's diskspace quota
* @return this directory's diskspace quota
*/
@Override
public long getDsQuota() {
return dsQuota;
public Quota.Counts getQuotaCounts() {
return Quota.Counts.newInstance(nsQuota, dsQuota);
}
/** Set this directory's quota
@ -120,7 +114,7 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
}
private void checkDiskspace(final long computed) {
if (-1 != getDsQuota() && diskspace != computed) {
if (-1 != getQuotaCounts().get(Quota.DISKSPACE) && diskspace != computed) {
NameNode.LOG.error("BUG: Inconsistent diskspace for directory "
+ getFullPathName() + ". Cached = " + diskspace
+ " != Computed = " + computed);

View File

@ -295,15 +295,10 @@ public abstract class INodeReference extends INode {
}
@Override
public final long getNsQuota() {
return referred.getNsQuota();
public Quota.Counts getQuotaCounts() {
return referred.getQuotaCounts();
}
@Override
public final long getDsQuota() {
return referred.getDsQuota();
}
@Override
public final void clear() {
super.clear();

View File

@ -804,6 +804,10 @@ public class NameNode implements NameNodeStatusMXBean {
return httpServer.getHttpAddress();
}
/**
* @return NameNode HTTPS address, used by the Web UI, image transfer,
* and HTTP-based file system clients like Hftp and WebHDFS
*/
public InetSocketAddress getHttpsAddress() {
return httpServer.getHttpsAddress();
}

View File

@ -61,7 +61,8 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@ -1236,52 +1237,52 @@ class NameNodeRpcServer implements NamenodeProtocols {
}
@Override
public long addPathBasedCacheDirective(
PathBasedCacheDirective path) throws IOException {
return namesystem.addPathBasedCacheDirective(path);
public long addCacheDirective(
CacheDirectiveInfo path) throws IOException {
return namesystem.addCacheDirective(path);
}
@Override
public void modifyPathBasedCacheDirective(
PathBasedCacheDirective directive) throws IOException {
namesystem.modifyPathBasedCacheDirective(directive);
public void modifyCacheDirective(
CacheDirectiveInfo directive) throws IOException {
namesystem.modifyCacheDirective(directive);
}
@Override
public void removePathBasedCacheDirective(long id) throws IOException {
namesystem.removePathBasedCacheDirective(id);
public void removeCacheDirective(long id) throws IOException {
namesystem.removeCacheDirective(id);
}
private class ServerSidePathBasedCacheEntriesIterator
extends BatchedRemoteIterator<Long, PathBasedCacheDirective> {
private class ServerSideCacheEntriesIterator
extends BatchedRemoteIterator<Long, CacheDirectiveEntry> {
private final PathBasedCacheDirective filter;
private final CacheDirectiveInfo filter;
public ServerSidePathBasedCacheEntriesIterator(Long firstKey,
PathBasedCacheDirective filter) {
public ServerSideCacheEntriesIterator (Long firstKey,
CacheDirectiveInfo filter) {
super(firstKey);
this.filter = filter;
}
@Override
public BatchedEntries<PathBasedCacheDirective> makeRequest(
public BatchedEntries<CacheDirectiveEntry> makeRequest(
Long nextKey) throws IOException {
return namesystem.listPathBasedCacheDirectives(nextKey, filter);
return namesystem.listCacheDirectives(nextKey, filter);
}
@Override
public Long elementToPrevKey(PathBasedCacheDirective entry) {
return entry.getId();
public Long elementToPrevKey(CacheDirectiveEntry entry) {
return entry.getInfo().getId();
}
}
@Override
public RemoteIterator<PathBasedCacheDirective> listPathBasedCacheDirectives(long prevId,
PathBasedCacheDirective filter) throws IOException {
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(long prevId,
CacheDirectiveInfo filter) throws IOException {
if (filter == null) {
filter = new PathBasedCacheDirective.Builder().build();
filter = new CacheDirectiveInfo.Builder().build();
}
return new ServerSidePathBasedCacheEntriesIterator(prevId, filter);
return new ServerSideCacheEntriesIterator(prevId, filter);
}
@Override

View File

@ -23,7 +23,6 @@ import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean;
import java.lang.management.MemoryUsage;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URLEncoder;
@ -57,7 +56,6 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.Status;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.UserGroupInformation;
@ -1087,7 +1085,7 @@ class NamenodeJspHelper {
doc.endTag();
doc.startTag("ds_quota");
doc.pcdata(""+inode.getDsQuota());
doc.pcdata(""+inode.getQuotaCounts().get(Quota.DISKSPACE));
doc.endTag();
doc.startTag("permission_status");

View File

@ -41,7 +41,7 @@ public enum Quota {
}
Counts() {
super(Quota.values());
super(Quota.class);
}
}

View File

@ -491,7 +491,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
INodeDirectoryWithSnapshot(INodeDirectory that, boolean adopt,
DirectoryDiffList diffs) {
super(that, adopt, that.getNsQuota(), that.getDsQuota());
super(that, adopt, that.getQuotaCounts());
this.diffs = diffs != null? diffs: new DirectoryDiffList();
}

View File

@ -68,6 +68,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.hdfs.web.ParamFilter;
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
@ -98,6 +99,7 @@ import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.Credentials;
@ -214,7 +216,8 @@ public class NamenodeWebHdfsMethods {
final Credentials c = DelegationTokenSecretManager.createCredentials(
namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
t.setKind(WebHdfsFileSystem.TOKEN_KIND);
Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND : SWebHdfsFileSystem.TOKEN_KIND;
t.setKind(kind);
return t;
}

View File

@ -30,8 +30,10 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.tools.TableListing.Justification;
import org.apache.hadoop.ipc.RemoteException;
@ -120,7 +122,7 @@ public class CacheAdmin extends Configured implements Tool {
int run(Configuration conf, List<String> args) throws IOException;
}
private static class AddPathBasedCacheDirectiveCommand implements Command {
private static class AddCacheDirectiveInfoCommand implements Command {
@Override
public String getName() {
return "-addDirective";
@ -143,7 +145,7 @@ public class CacheAdmin extends Configured implements Tool {
"added. You must have write permission on the cache pool "
+ "in order to add new directives.");
return getShortUsage() + "\n" +
"Add a new PathBasedCache directive.\n\n" +
"Add a new cache directive.\n\n" +
listing.toString();
}
@ -171,14 +173,14 @@ public class CacheAdmin extends Configured implements Tool {
}
DistributedFileSystem dfs = getDFS(conf);
PathBasedCacheDirective directive = new PathBasedCacheDirective.Builder().
CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder().
setPath(new Path(path)).
setReplication(replication).
setPool(poolName).
build();
try {
long id = dfs.addPathBasedCacheDirective(directive);
System.out.println("Added PathBasedCache entry " + id);
long id = dfs.addCacheDirective(directive);
System.out.println("Added cache directive " + id);
} catch (IOException e) {
System.err.println(prettifyException(e));
return 2;
@ -188,7 +190,7 @@ public class CacheAdmin extends Configured implements Tool {
}
}
private static class RemovePathBasedCacheDirectiveCommand implements Command {
private static class RemoveCacheDirectiveInfoCommand implements Command {
@Override
public String getName() {
return "-removeDirective";
@ -205,7 +207,7 @@ public class CacheAdmin extends Configured implements Tool {
listing.addRow("<id>", "The id of the cache directive to remove. " +
"You must have write permission on the pool of the " +
"directive in order to remove it. To see a list " +
"of PathBasedCache directive IDs, use the -listDirectives command.");
"of cache directive IDs, use the -listDirectives command.");
return getShortUsage() + "\n" +
"Remove a cache directive.\n\n" +
listing.toString();
@ -238,8 +240,8 @@ public class CacheAdmin extends Configured implements Tool {
}
DistributedFileSystem dfs = getDFS(conf);
try {
dfs.getClient().removePathBasedCacheDirective(id);
System.out.println("Removed PathBasedCache directive " + id);
dfs.getClient().removeCacheDirective(id);
System.out.println("Removed cached directive " + id);
} catch (IOException e) {
System.err.println(prettifyException(e));
return 2;
@ -248,7 +250,7 @@ public class CacheAdmin extends Configured implements Tool {
}
}
private static class ModifyPathBasedCacheDirectiveCommand implements Command {
private static class ModifyCacheDirectiveInfoCommand implements Command {
@Override
public String getName() {
return "-modifyDirective";
@ -273,14 +275,14 @@ public class CacheAdmin extends Configured implements Tool {
"added. You must have write permission on the cache pool "
+ "in order to move a directive into it. (optional)");
return getShortUsage() + "\n" +
"Modify a PathBasedCache directive.\n\n" +
"Modify a cache directive.\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
PathBasedCacheDirective.Builder builder =
new PathBasedCacheDirective.Builder();
CacheDirectiveInfo.Builder builder =
new CacheDirectiveInfo.Builder();
boolean modified = false;
String idString = StringUtils.popOptionWithArgument("-id", args);
if (idString == null) {
@ -316,8 +318,8 @@ public class CacheAdmin extends Configured implements Tool {
}
DistributedFileSystem dfs = getDFS(conf);
try {
dfs.modifyPathBasedCacheDirective(builder.build());
System.out.println("Modified PathBasedCache entry " + idString);
dfs.modifyCacheDirective(builder.build());
System.out.println("Modified cache directive " + idString);
} catch (IOException e) {
System.err.println(prettifyException(e));
return 2;
@ -326,7 +328,7 @@ public class CacheAdmin extends Configured implements Tool {
}
}
private static class RemovePathBasedCacheDirectivesCommand implements Command {
private static class RemoveCacheDirectiveInfosCommand implements Command {
@Override
public String getName() {
return "-removeDirectives";
@ -362,31 +364,31 @@ public class CacheAdmin extends Configured implements Tool {
return 1;
}
DistributedFileSystem dfs = getDFS(conf);
RemoteIterator<PathBasedCacheDirective> iter =
dfs.listPathBasedCacheDirectives(
new PathBasedCacheDirective.Builder().
RemoteIterator<CacheDirectiveEntry> iter =
dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().
setPath(new Path(path)).build());
int exitCode = 0;
while (iter.hasNext()) {
PathBasedCacheDirective directive = iter.next();
CacheDirectiveEntry entry = iter.next();
try {
dfs.removePathBasedCacheDirective(directive.getId());
System.out.println("Removed PathBasedCache directive " +
directive.getId());
dfs.removeCacheDirective(entry.getInfo().getId());
System.out.println("Removed cache directive " +
entry.getInfo().getId());
} catch (IOException e) {
System.err.println(prettifyException(e));
exitCode = 2;
}
}
if (exitCode == 0) {
System.out.println("Removed every PathBasedCache directive with path " +
System.out.println("Removed every cache directive with path " +
path);
}
return exitCode;
}
}
private static class ListPathBasedCacheDirectiveCommand implements Command {
private static class ListCacheDirectiveInfoCommand implements Command {
@Override
public String getName() {
return "-listDirectives";
@ -394,27 +396,28 @@ public class CacheAdmin extends Configured implements Tool {
@Override
public String getShortUsage() {
return "[" + getName() + " [-path <path>] [-pool <pool>]]\n";
return "[" + getName() + " [-stats] [-path <path>] [-pool <pool>]]\n";
}
@Override
public String getLongUsage() {
TableListing listing = getOptionDescriptionListing();
listing.addRow("<path>", "List only " +
"PathBasedCache directives with this path. " +
"Note that if there is a PathBasedCache directive for <path> " +
"cache directives with this path. " +
"Note that if there is a cache directive for <path> " +
"in a cache pool that we don't have read access for, it " +
"will not be listed.");
listing.addRow("<pool>", "List only path cache directives in that pool.");
listing.addRow("-stats", "List path-based cache directive statistics.");
return getShortUsage() + "\n" +
"List PathBasedCache directives.\n\n" +
"List cache directives.\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
PathBasedCacheDirective.Builder builder =
new PathBasedCacheDirective.Builder();
CacheDirectiveInfo.Builder builder =
new CacheDirectiveInfo.Builder();
String pathFilter = StringUtils.popOptionWithArgument("-path", args);
if (pathFilter != null) {
builder.setPath(new Path(pathFilter));
@ -423,28 +426,42 @@ public class CacheAdmin extends Configured implements Tool {
if (poolFilter != null) {
builder.setPool(poolFilter);
}
boolean printStats = StringUtils.popOption("-stats", args);
if (!args.isEmpty()) {
System.err.println("Can't understand argument: " + args.get(0));
return 1;
}
TableListing tableListing = new TableListing.Builder().
addField("ID", Justification.LEFT).
TableListing.Builder tableBuilder = new TableListing.Builder().
addField("ID", Justification.RIGHT).
addField("POOL", Justification.LEFT).
addField("REPLICATION", Justification.LEFT).
addField("PATH", Justification.LEFT).
build();
addField("REPLICATION", Justification.RIGHT).
addField("PATH", Justification.LEFT);
if (printStats) {
tableBuilder.addField("NEEDED", Justification.RIGHT).
addField("CACHED", Justification.RIGHT).
addField("FILES", Justification.RIGHT);
}
TableListing tableListing = tableBuilder.build();
DistributedFileSystem dfs = getDFS(conf);
RemoteIterator<PathBasedCacheDirective> iter =
dfs.listPathBasedCacheDirectives(builder.build());
RemoteIterator<CacheDirectiveEntry> iter =
dfs.listCacheDirectives(builder.build());
int numEntries = 0;
while (iter.hasNext()) {
PathBasedCacheDirective directive = iter.next();
String row[] = new String[] {
"" + directive.getId(), directive.getPool(),
"" + directive.getReplication(),
directive.getPath().toUri().getPath(),
};
tableListing.addRow(row);
CacheDirectiveEntry entry = iter.next();
CacheDirectiveInfo directive = entry.getInfo();
CacheDirectiveStats stats = entry.getStats();
List<String> row = new LinkedList<String>();
row.add("" + directive.getId());
row.add(directive.getPool());
row.add("" + directive.getReplication());
row.add(directive.getPath().toUri().getPath());
if (printStats) {
row.add("" + stats.getBytesNeeded());
row.add("" + stats.getBytesCached());
row.add("" + stats.getFilesAffected());
}
tableListing.addRow(row.toArray(new String[0]));
numEntries++;
}
System.out.print(String.format("Found %d entr%s\n",
@ -734,7 +751,7 @@ public class CacheAdmin extends Configured implements Tool {
addField("OWNER", Justification.LEFT).
addField("GROUP", Justification.LEFT).
addField("MODE", Justification.LEFT).
addField("WEIGHT", Justification.LEFT).
addField("WEIGHT", Justification.RIGHT).
build();
int numResults = 0;
try {
@ -824,11 +841,11 @@ public class CacheAdmin extends Configured implements Tool {
}
private static Command[] COMMANDS = {
new AddPathBasedCacheDirectiveCommand(),
new ModifyPathBasedCacheDirectiveCommand(),
new ListPathBasedCacheDirectiveCommand(),
new RemovePathBasedCacheDirectiveCommand(),
new RemovePathBasedCacheDirectivesCommand(),
new AddCacheDirectiveInfoCommand(),
new ModifyCacheDirectiveInfoCommand(),
new ListCacheDirectiveInfoCommand(),
new RemoveCacheDirectiveInfoCommand(),
new RemoveCacheDirectiveInfosCommand(),
new AddCachePoolCommand(),
new ModifyCachePoolCommand(),
new RemoveCachePoolCommand(),

View File

@ -30,9 +30,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
* Example:
*
* NAME OWNER GROUP MODE WEIGHT
* pool1 andrew andrew rwxr-xr-x 100
* pool2 andrew andrew rwxr-xr-x 100
* pool3 andrew andrew rwxr-xr-x 100
* pool1 andrew andrew rwxr-xr-x 100
* pool2 andrew andrew rwxr-xr-x 100
* pool3 andrew andrew rwxr-xr-x 100
*
*/
@InterfaceAudience.Private

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.util;
import java.util.Arrays;
import java.util.HashMap;
import com.google.common.base.Preconditions;
@ -34,21 +35,19 @@ import com.google.common.base.Preconditions;
* @param <E> the enum type
*/
public class EnumCounters<E extends Enum<E>> {
/** An array of enum constants. */
private final E[] enumConstants;
/** The class of the enum. */
private final Class<E> enumClass;
/** The counter array, counters[i] corresponds to the enumConstants[i]. */
private final long[] counters;
/**
* Construct counters for the given enum constants.
* @param enumConstants an array of enum constants such that,
* for all i, enumConstants[i].ordinal() == i.
* @param enumClass the enum class of the counters.
*/
public EnumCounters(final E[] enumConstants) {
for(int i = 0; i < enumConstants.length; i++) {
Preconditions.checkArgument(enumConstants[i].ordinal() == i);
}
this.enumConstants = enumConstants;
public EnumCounters(final Class<E> enumClass) {
final E[] enumConstants = enumClass.getEnumConstants();
Preconditions.checkNotNull(enumConstants);
this.enumClass = enumClass;
this.counters = new long[enumConstants.length];
}
@ -69,6 +68,13 @@ public class EnumCounters<E extends Enum<E>> {
counters[e.ordinal()] = value;
}
/** Set this counters to that counters. */
public final void set(final EnumCounters<E> that) {
for(int i = 0; i < counters.length; i++) {
this.counters[i] = that.counters[i];
}
}
/** Add the given value to counter e. */
public final void add(final E e, final long value) {
counters[e.ordinal()] += value;
@ -86,15 +92,33 @@ public class EnumCounters<E extends Enum<E>> {
counters[e.ordinal()] -= value;
}
/** Subtract that counters from this counters. */
/** Subtract this counters from that counters. */
public final void subtract(final EnumCounters<E> that) {
for(int i = 0; i < counters.length; i++) {
this.counters[i] -= that.counters[i];
}
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
} else if (obj == null || !(obj instanceof EnumCounters)) {
return false;
}
final EnumCounters<?> that = (EnumCounters<?>)obj;
return this.enumClass == that.enumClass
&& Arrays.equals(this.counters, that.counters);
}
@Override
public int hashCode() {
return Arrays.hashCode(counters);
}
@Override
public String toString() {
final E[] enumConstants = enumClass.getEnumConstants();
final StringBuilder b = new StringBuilder();
for(int i = 0; i < counters.length; i++) {
final String name = enumConstants[i].name();

View File

@ -0,0 +1,66 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import java.io.IOException;
import java.security.GeneralSecurityException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.io.Text;
public class SWebHdfsFileSystem extends WebHdfsFileSystem {
public static final Text TOKEN_KIND = new Text("SWEBHDFS delegation");
public static final String SCHEME = "swebhdfs";
@Override
public String getScheme() {
return SCHEME;
}
@Override
protected String getTransportScheme() {
return "https";
}
@Override
protected synchronized void initializeTokenAspect() {
tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND);
}
@Override
protected void initializeConnectionFactory(Configuration conf)
throws IOException {
connectionFactory = new URLConnectionFactory(
URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT);
try {
connectionFactory.setConnConfigurator(URLConnectionFactory
.newSslConnConfigurator(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
conf));
} catch (GeneralSecurityException e) {
throw new IOException(e);
}
}
@Override
protected int getDefaultPort() {
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
}
}

View File

@ -58,7 +58,8 @@ final class TokenAspect<T extends FileSystem & Renewable> {
public boolean handleKind(Text kind) {
return kind.equals(HftpFileSystem.TOKEN_KIND)
|| kind.equals(HsftpFileSystem.TOKEN_KIND)
|| kind.equals(WebHdfsFileSystem.TOKEN_KIND);
|| kind.equals(WebHdfsFileSystem.TOKEN_KIND)
|| kind.equals(SWebHdfsFileSystem.TOKEN_KIND);
}
@Override
@ -83,6 +84,8 @@ final class TokenAspect<T extends FileSystem & Renewable> {
uri = DFSUtil.createUri(HsftpFileSystem.SCHEME, address);
} else if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) {
uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, address);
} else if (kind.equals(SWebHdfsFileSystem.TOKEN_KIND)) {
uri = DFSUtil.createUri(SWebHdfsFileSystem.SCHEME, address);
} else {
throw new IllegalArgumentException("Unsupported scheme");
}

View File

@ -56,7 +56,6 @@ import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.web.TokenAspect.DTSelecorByKind;
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
@ -98,7 +97,6 @@ import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Progressable;
import org.mortbay.util.ajax.JSON;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
@ -118,8 +116,7 @@ public class WebHdfsFileSystem extends FileSystem
/** Delegation token kind */
public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
protected TokenAspect<WebHdfsFileSystem> tokenAspect = new TokenAspect<WebHdfsFileSystem>(
this, TOKEN_KIND);
protected TokenAspect<WebHdfsFileSystem> tokenAspect;
private UserGroupInformation ugi;
private URI uri;
@ -140,17 +137,44 @@ public class WebHdfsFileSystem extends FileSystem
return SCHEME;
}
/**
* return the underlying transport protocol (http / https).
*/
protected String getTransportScheme() {
return "http";
}
/**
* Initialize tokenAspect. This function is intended to
* be overridden by SWebHdfsFileSystem.
*/
protected synchronized void initializeTokenAspect() {
tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND);
}
/**
* Initialize connectionFactory. This function is intended to
* be overridden by SWebHdfsFileSystem.
*/
protected void initializeConnectionFactory(Configuration conf)
throws IOException {
connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
}
@Override
public synchronized void initialize(URI uri, Configuration conf
) throws IOException {
super.initialize(uri, conf);
setConf(conf);
initializeTokenAspect();
initializeConnectionFactory(conf);
ugi = UserGroupInformation.getCurrentUser();
try {
this.uri = new URI(uri.getScheme(), uri.getAuthority(), null,
null, null);
this.nnAddrs = DFSUtil.resolve(this.uri, getDefaultPort(), conf);
this.nnAddrs = DFSUtil.resolveWebHdfsUri(this.uri, conf);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
@ -342,7 +366,7 @@ public class WebHdfsFileSystem extends FileSystem
*/
private URL getNamenodeURL(String path, String query) throws IOException {
InetSocketAddress nnAddr = getCurrentNNAddr();
final URL url = new URL("http", nnAddr.getHostName(),
final URL url = new URL(getTransportScheme(), nnAddr.getHostName(),
nnAddr.getPort(), path + '?' + query);
if (LOG.isTraceEnabled()) {
LOG.trace("url=" + url);
@ -840,7 +864,9 @@ public class WebHdfsFileSystem extends FileSystem
@Override
public void close() throws IOException {
super.close();
tokenAspect.removeRenewAction();
synchronized (this) {
tokenAspect.removeRenewAction();
}
}
class OffsetUrlOpener extends ByteRangeInputStream.URLOpener {

View File

@ -364,46 +364,53 @@ message IsFileClosedResponseProto {
required bool result = 1;
}
message PathBasedCacheDirectiveInfoProto {
message CacheDirectiveInfoProto {
optional int64 id = 1;
optional string path = 2;
optional uint32 replication = 3;
optional string pool = 4;
}
message AddPathBasedCacheDirectiveRequestProto {
required PathBasedCacheDirectiveInfoProto info = 1;
message CacheDirectiveStatsProto {
required int64 bytesNeeded = 1;
required int64 bytesCached = 2;
required int64 filesAffected = 3;
}
message AddPathBasedCacheDirectiveResponseProto {
message AddCacheDirectiveRequestProto {
required CacheDirectiveInfoProto info = 1;
}
message AddCacheDirectiveResponseProto {
required int64 id = 1;
}
message ModifyPathBasedCacheDirectiveRequestProto {
required PathBasedCacheDirectiveInfoProto info = 1;
message ModifyCacheDirectiveRequestProto {
required CacheDirectiveInfoProto info = 1;
}
message ModifyPathBasedCacheDirectiveResponseProto {
message ModifyCacheDirectiveResponseProto {
}
message RemovePathBasedCacheDirectiveRequestProto {
message RemoveCacheDirectiveRequestProto {
required int64 id = 1;
}
message RemovePathBasedCacheDirectiveResponseProto {
message RemoveCacheDirectiveResponseProto {
}
message ListPathBasedCacheDirectivesRequestProto {
message ListCacheDirectivesRequestProto {
required int64 prevId = 1;
required PathBasedCacheDirectiveInfoProto filter = 2;
required CacheDirectiveInfoProto filter = 2;
}
message ListPathBasedCacheDirectivesElementProto {
required PathBasedCacheDirectiveInfoProto info = 1;
message CacheDirectiveEntryProto {
required CacheDirectiveInfoProto info = 1;
required CacheDirectiveStatsProto stats = 2;
}
message ListPathBasedCacheDirectivesResponseProto {
repeated ListPathBasedCacheDirectivesElementProto elements = 1;
message ListCacheDirectivesResponseProto {
repeated CacheDirectiveEntryProto elements = 1;
required bool hasMore = 2;
}
@ -631,14 +638,14 @@ service ClientNamenodeProtocol {
returns(ListCorruptFileBlocksResponseProto);
rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto);
rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto);
rpc addPathBasedCacheDirective(AddPathBasedCacheDirectiveRequestProto)
returns (AddPathBasedCacheDirectiveResponseProto);
rpc modifyPathBasedCacheDirective(ModifyPathBasedCacheDirectiveRequestProto)
returns (ModifyPathBasedCacheDirectiveResponseProto);
rpc removePathBasedCacheDirective(RemovePathBasedCacheDirectiveRequestProto)
returns (RemovePathBasedCacheDirectiveResponseProto);
rpc listPathBasedCacheDirectives(ListPathBasedCacheDirectivesRequestProto)
returns (ListPathBasedCacheDirectivesResponseProto);
rpc addCacheDirective(AddCacheDirectiveRequestProto)
returns (AddCacheDirectiveResponseProto);
rpc modifyCacheDirective(ModifyCacheDirectiveRequestProto)
returns (ModifyCacheDirectiveResponseProto);
rpc removeCacheDirective(RemoveCacheDirectiveRequestProto)
returns (RemoveCacheDirectiveResponseProto);
rpc listCacheDirectives(ListCacheDirectivesRequestProto)
returns (ListCacheDirectivesResponseProto);
rpc addCachePool(AddCachePoolRequestProto)
returns(AddCachePoolResponseProto);
rpc modifyCachePool(ModifyCachePoolRequestProto)

View File

@ -17,3 +17,4 @@ org.apache.hadoop.hdfs.DistributedFileSystem
org.apache.hadoop.hdfs.web.HftpFileSystem
org.apache.hadoop.hdfs.web.HsftpFileSystem
org.apache.hadoop.hdfs.web.WebHdfsFileSystem
org.apache.hadoop.hdfs.web.SWebHdfsFileSystem

View File

@ -1179,7 +1179,7 @@
<property>
<name>dfs.webhdfs.enabled</name>
<value>false</value>
<value>true</value>
<description>
Enable WebHDFS (REST API) in Namenodes and Datanodes.
</description>

View File

@ -118,7 +118,7 @@ Centralized Cache Management in HDFS
Usage: <<<hdfs cacheadmin -addDirective -path <path> -replication <replication> -pool <pool-name> >>>
Add a new PathBasedCache directive.
Add a new cache directive.
*--+--+
\<path\> | A path to cache. The path can be a directory or a file.
@ -135,7 +135,7 @@ Centralized Cache Management in HDFS
Remove a cache directive.
*--+--+
\<id\> | The id of the cache directive to remove. You must have write permission on the pool of the directive in order to remove it. To see a list of PathBasedCache directive IDs, use the -listDirectives command.
\<id\> | The id of the cache directive to remove. You must have write permission on the pool of the directive in order to remove it. To see a list of cachedirective IDs, use the -listDirectives command.
*--+--+
*** {removeDirectives}
@ -152,10 +152,10 @@ Centralized Cache Management in HDFS
Usage: <<<hdfs cacheadmin -listDirectives [-path <path>] [-pool <pool>] >>>
List PathBasedCache directives.
List cache directives.
*--+--+
\<path\> | List only PathBasedCache directives with this path. Note that if there is a PathBasedCache directive for <path> in a cache pool that we don't have read access for, it will not be listed.
\<path\> | List only cache directives with this path. Note that if there is a cache directive for <path> in a cache pool that we don't have read access for, it will not be listed.
*--+--+
\<pool\> | List only path cache directives in that pool.
*--+--+

View File

@ -89,7 +89,7 @@ abstract public class TestSymlinkHdfs extends SymlinkBaseTest {
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.set(FsPermission.UMASK_LABEL, "000");
cluster = new MiniDFSCluster.Builder(conf).build();
webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
dfs = cluster.getFileSystem();
}

View File

@ -1038,20 +1038,20 @@ public class DFSTestUtil {
// OP_MODIFY_CACHE_POOL
filesystem.modifyCachePool(new CachePoolInfo("pool1").setWeight(99));
// OP_ADD_PATH_BASED_CACHE_DIRECTIVE
long id = filesystem.addPathBasedCacheDirective(
new PathBasedCacheDirective.Builder().
long id = filesystem.addCacheDirective(
new CacheDirectiveInfo.Builder().
setPath(new Path("/path")).
setReplication((short)1).
setPool("pool1").
build());
// OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE
filesystem.modifyPathBasedCacheDirective(
new PathBasedCacheDirective.Builder().
filesystem.modifyCacheDirective(
new CacheDirectiveInfo.Builder().
setId(id).
setReplication((short)2).
build());
// OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE
filesystem.removePathBasedCacheDirective(id);
filesystem.removeCacheDirective(id);
// OP_REMOVE_CACHE_POOL
filesystem.removeCachePool("pool1");
}

View File

@ -73,6 +73,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.IOUtils;
@ -886,8 +887,8 @@ public class TestDFSClientRetries {
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final FileSystem fs = isWebHDFS?
WebHdfsTestUtil.getWebHdfsFileSystem(conf): dfs;
final FileSystem fs = isWebHDFS ? WebHdfsTestUtil.getWebHdfsFileSystem(
conf, WebHdfsFileSystem.SCHEME) : dfs;
final URI uri = dfs.getUri();
assertTrue(HdfsUtils.isHealthy(uri));
@ -1091,7 +1092,7 @@ public class TestDFSClientRetries {
final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
username, new String[]{"supergroup"});
return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf)
return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME)
: DFSTestUtil.getFileSystemAs(ugi, conf);
}

View File

@ -556,7 +556,7 @@ public class TestDFSUtil {
Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
Map<String, Map<String, InetSocketAddress>> map =
DFSUtil.getHaNnHttpAddresses(conf);
DFSUtil.getHaNnWebHdfsAddresses(conf, "webhdfs");
assertEquals(NS1_NN1_ADDR, map.get("ns1").get("nn1").toString());
assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString());
@ -574,7 +574,7 @@ public class TestDFSUtil {
Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
URI uri = new URI("webhdfs://ns1");
assertTrue(HAUtil.isLogicalUri(conf, uri));
InetSocketAddress[] addrs = DFSUtil.resolve(uri, DEFAULT_PORT, conf);
InetSocketAddress[] addrs = DFSUtil.resolveWebHdfsUri(uri, conf);
assertArrayEquals(new InetSocketAddress[] {
new InetSocketAddress(NS1_NN1_HOST, DEFAULT_PORT),
new InetSocketAddress(NS1_NN2_HOST, DEFAULT_PORT),

View File

@ -147,7 +147,7 @@ public class TestDelegationTokenForProxyUser {
public void testWebHdfsDoAs() throws Exception {
WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()");
WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName());
final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config);
final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config, WebHdfsFileSystem.SCHEME);
final Path root = new Path("/");
cluster.getFileSystem().setPermission(root, new FsPermission((short)0777));

View File

@ -554,7 +554,7 @@ public abstract class FSImageTestUtil {
* get NameSpace quota.
*/
public static long getNSQuota(FSNamesystem ns) {
return ns.dir.rootDir.getNsQuota();
return ns.dir.rootDir.getQuotaCounts().get(Quota.NAMESPACE);
}
public static void assertNNFilesMatch(MiniDFSCluster cluster) throws Exception {

View File

@ -42,7 +42,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@ -241,20 +241,20 @@ public class OfflineEditsViewerHelper {
.setMode(new FsPermission((short)0700))
.setWeight(1989));
// OP_ADD_PATH_BASED_CACHE_DIRECTIVE 33
long id = dfs.addPathBasedCacheDirective(
new PathBasedCacheDirective.Builder().
long id = dfs.addCacheDirective(
new CacheDirectiveInfo.Builder().
setPath(new Path("/bar")).
setReplication((short)1).
setPool(pool).
build());
// OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE 38
dfs.modifyPathBasedCacheDirective(
new PathBasedCacheDirective.Builder().
dfs.modifyCacheDirective(
new CacheDirectiveInfo.Builder().
setId(id).
setPath(new Path("/bar2")).
build());
// OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE 34
dfs.removePathBasedCacheDirective(id);
dfs.removeCacheDirective(id);
// OP_REMOVE_CACHE_POOL 37
dfs.removeCachePool(pool);
// sync to disk, otherwise we parse partial edits

View File

@ -163,7 +163,7 @@ public class TestAuditLogs {
setupAuditLogs();
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf);
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
InputStream istream = webfs.open(file);
int val = istream.read();
istream.close();
@ -182,7 +182,7 @@ public class TestAuditLogs {
setupAuditLogs();
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf);
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
FileStatus st = webfs.getFileStatus(file);
verifyAuditLogs(true);
@ -222,7 +222,7 @@ public class TestAuditLogs {
setupAuditLogs();
try {
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf);
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
InputStream istream = webfs.open(file);
int val = istream.read();
fail("open+read must not succeed, got " + val);

View File

@ -31,7 +31,6 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Iterator;
@ -54,8 +53,11 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@ -66,14 +68,17 @@ import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.GSet;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Supplier;
public class TestPathBasedCacheRequests {
static final Log LOG = LogFactory.getLog(TestPathBasedCacheRequests.class);
public class TestCacheDirectives {
static final Log LOG = LogFactory.getLog(TestCacheDirectives.class);
private static final UserGroupInformation unprivilegedUser =
UserGroupInformation.createRemoteUser("unprivilegedUser");
@ -100,6 +105,7 @@ public class TestPathBasedCacheRequests {
proto = cluster.getNameNodeRpc();
prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
LogManager.getLogger(CacheReplicationMonitor.class).setLevel(Level.TRACE);
}
@After
@ -307,24 +313,25 @@ public class TestPathBasedCacheRequests {
}
private static void validateListAll(
RemoteIterator<PathBasedCacheDirective> iter,
RemoteIterator<CacheDirectiveEntry> iter,
Long... ids) throws Exception {
for (Long id: ids) {
assertTrue("Unexpectedly few elements", iter.hasNext());
assertEquals("Unexpected directive ID", id, iter.next().getId());
assertEquals("Unexpected directive ID", id,
iter.next().getInfo().getId());
}
assertFalse("Unexpectedly many list elements", iter.hasNext());
}
private static long addAsUnprivileged(
final PathBasedCacheDirective directive) throws Exception {
final CacheDirectiveInfo directive) throws Exception {
return unprivilegedUser
.doAs(new PrivilegedExceptionAction<Long>() {
@Override
public Long run() throws IOException {
DistributedFileSystem myDfs =
(DistributedFileSystem) FileSystem.get(conf);
return myDfs.addPathBasedCacheDirective(directive);
return myDfs.addCacheDirective(directive);
}
});
}
@ -340,15 +347,15 @@ public class TestPathBasedCacheRequests {
proto.addCachePool(new CachePoolInfo("pool4").
setMode(new FsPermission((short)0)));
PathBasedCacheDirective alpha = new PathBasedCacheDirective.Builder().
CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().
setPath(new Path("/alpha")).
setPool("pool1").
build();
PathBasedCacheDirective beta = new PathBasedCacheDirective.Builder().
CacheDirectiveInfo beta = new CacheDirectiveInfo.Builder().
setPath(new Path("/beta")).
setPool("pool2").
build();
PathBasedCacheDirective delta = new PathBasedCacheDirective.Builder().
CacheDirectiveInfo delta = new CacheDirectiveInfo.Builder().
setPath(new Path("/delta")).
setPool("pool1").
build();
@ -356,12 +363,12 @@ public class TestPathBasedCacheRequests {
long alphaId = addAsUnprivileged(alpha);
long alphaId2 = addAsUnprivileged(alpha);
assertFalse("Expected to get unique directives when re-adding an "
+ "existing PathBasedCacheDirective",
+ "existing CacheDirectiveInfo",
alphaId == alphaId2);
long betaId = addAsUnprivileged(beta);
try {
addAsUnprivileged(new PathBasedCacheDirective.Builder().
addAsUnprivileged(new CacheDirectiveInfo.Builder().
setPath(new Path("/unicorn")).
setPool("no_such_pool").
build());
@ -371,7 +378,7 @@ public class TestPathBasedCacheRequests {
}
try {
addAsUnprivileged(new PathBasedCacheDirective.Builder().
addAsUnprivileged(new CacheDirectiveInfo.Builder().
setPath(new Path("/blackhole")).
setPool("pool4").
build());
@ -383,7 +390,7 @@ public class TestPathBasedCacheRequests {
}
try {
addAsUnprivileged(new PathBasedCacheDirective.Builder().
addAsUnprivileged(new CacheDirectiveInfo.Builder().
setPath(new Path("/illegal:path/")).
setPool("pool1").
build());
@ -394,12 +401,12 @@ public class TestPathBasedCacheRequests {
}
try {
addAsUnprivileged(new PathBasedCacheDirective.Builder().
addAsUnprivileged(new CacheDirectiveInfo.Builder().
setPath(new Path("/emptypoolname")).
setReplication((short)1).
setPool("").
build());
fail("expected an error when adding a PathBasedCache " +
fail("expected an error when adding a cache " +
"directive with an empty pool name.");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Invalid empty pool name", e);
@ -410,75 +417,75 @@ public class TestPathBasedCacheRequests {
// We expect the following to succeed, because DistributedFileSystem
// qualifies the path.
long relativeId = addAsUnprivileged(
new PathBasedCacheDirective.Builder().
new CacheDirectiveInfo.Builder().
setPath(new Path("relative")).
setPool("pool1").
build());
RemoteIterator<PathBasedCacheDirective> iter;
iter = dfs.listPathBasedCacheDirectives(null);
RemoteIterator<CacheDirectiveEntry> iter;
iter = dfs.listCacheDirectives(null);
validateListAll(iter, alphaId, alphaId2, betaId, deltaId, relativeId );
iter = dfs.listPathBasedCacheDirectives(
new PathBasedCacheDirective.Builder().setPool("pool3").build());
iter = dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().setPool("pool3").build());
assertFalse(iter.hasNext());
iter = dfs.listPathBasedCacheDirectives(
new PathBasedCacheDirective.Builder().setPool("pool1").build());
iter = dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().setPool("pool1").build());
validateListAll(iter, alphaId, alphaId2, deltaId, relativeId );
iter = dfs.listPathBasedCacheDirectives(
new PathBasedCacheDirective.Builder().setPool("pool2").build());
iter = dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().setPool("pool2").build());
validateListAll(iter, betaId);
dfs.removePathBasedCacheDirective(betaId);
iter = dfs.listPathBasedCacheDirectives(
new PathBasedCacheDirective.Builder().setPool("pool2").build());
dfs.removeCacheDirective(betaId);
iter = dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().setPool("pool2").build());
assertFalse(iter.hasNext());
try {
dfs.removePathBasedCacheDirective(betaId);
dfs.removeCacheDirective(betaId);
fail("expected an error when removing a non-existent ID");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID", e);
}
try {
proto.removePathBasedCacheDirective(-42l);
proto.removeCacheDirective(-42l);
fail("expected an error when removing a negative ID");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains(
"Invalid negative ID", e);
}
try {
proto.removePathBasedCacheDirective(43l);
proto.removeCacheDirective(43l);
fail("expected an error when removing a non-existent ID");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID", e);
}
dfs.removePathBasedCacheDirective(alphaId);
dfs.removePathBasedCacheDirective(alphaId2);
dfs.removePathBasedCacheDirective(deltaId);
dfs.removeCacheDirective(alphaId);
dfs.removeCacheDirective(alphaId2);
dfs.removeCacheDirective(deltaId);
dfs.modifyPathBasedCacheDirective(new PathBasedCacheDirective.Builder().
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().
setId(relativeId).
setReplication((short)555).
build());
iter = dfs.listPathBasedCacheDirectives(null);
iter = dfs.listCacheDirectives(null);
assertTrue(iter.hasNext());
PathBasedCacheDirective modified = iter.next();
CacheDirectiveInfo modified = iter.next().getInfo();
assertEquals(relativeId, modified.getId().longValue());
assertEquals((short)555, modified.getReplication().shortValue());
dfs.removePathBasedCacheDirective(relativeId);
iter = dfs.listPathBasedCacheDirectives(null);
dfs.removeCacheDirective(relativeId);
iter = dfs.listCacheDirectives(null);
assertFalse(iter.hasNext());
// Verify that PBCDs with path "." work correctly
PathBasedCacheDirective directive =
new PathBasedCacheDirective.Builder().setPath(new Path("."))
CacheDirectiveInfo directive =
new CacheDirectiveInfo.Builder().setPath(new Path("."))
.setPool("pool1").build();
long id = dfs.addPathBasedCacheDirective(directive);
dfs.modifyPathBasedCacheDirective(new PathBasedCacheDirective.Builder(
long id = dfs.addCacheDirective(directive);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(
directive).setId(id).setReplication((short)2).build());
dfs.removePathBasedCacheDirective(id);
dfs.removeCacheDirective(id);
}
@Test(timeout=60000)
@ -514,15 +521,15 @@ public class TestPathBasedCacheRequests {
String entryPrefix = "/party-";
long prevId = -1;
for (int i=0; i<numEntries; i++) {
prevId = dfs.addPathBasedCacheDirective(
new PathBasedCacheDirective.Builder().
prevId = dfs.addCacheDirective(
new CacheDirectiveInfo.Builder().
setPath(new Path(entryPrefix + i)).setPool(pool).build());
}
RemoteIterator<PathBasedCacheDirective> dit
= dfs.listPathBasedCacheDirectives(null);
RemoteIterator<CacheDirectiveEntry> dit
= dfs.listCacheDirectives(null);
for (int i=0; i<numEntries; i++) {
assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
PathBasedCacheDirective cd = dit.next();
CacheDirectiveInfo cd = dit.next().getInfo();
assertEquals(i+1, cd.getId().longValue());
assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
assertEquals(pool, cd.getPool());
@ -543,18 +550,18 @@ public class TestPathBasedCacheRequests {
assertEquals(weight, (int)info.getWeight());
assertFalse("Unexpected # of cache pools found", pit.hasNext());
dit = dfs.listPathBasedCacheDirectives(null);
dit = dfs.listCacheDirectives(null);
for (int i=0; i<numEntries; i++) {
assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
PathBasedCacheDirective cd = dit.next();
CacheDirectiveInfo cd = dit.next().getInfo();
assertEquals(i+1, cd.getId().longValue());
assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
assertEquals(pool, cd.getPool());
}
assertFalse("Unexpected # of cache directives found", dit.hasNext());
long nextId = dfs.addPathBasedCacheDirective(
new PathBasedCacheDirective.Builder().
long nextId = dfs.addCacheDirective(
new CacheDirectiveInfo.Builder().
setPath(new Path("/foobar")).setPool(pool).build());
assertEquals(prevId + 1, nextId);
}
@ -686,22 +693,22 @@ public class TestPathBasedCacheRequests {
// Cache and check each path in sequence
int expected = 0;
for (int i=0; i<numFiles; i++) {
PathBasedCacheDirective directive =
new PathBasedCacheDirective.Builder().
CacheDirectiveInfo directive =
new CacheDirectiveInfo.Builder().
setPath(new Path(paths.get(i))).
setPool(pool).
build();
nnRpc.addPathBasedCacheDirective(directive);
nnRpc.addCacheDirective(directive);
expected += numBlocksPerFile;
waitForCachedBlocks(namenode, expected, expected,
"testWaitForCachedReplicas:1");
}
// Uncache and check each path in sequence
RemoteIterator<PathBasedCacheDirective> entries =
nnRpc.listPathBasedCacheDirectives(0, null);
RemoteIterator<CacheDirectiveEntry> entries =
nnRpc.listCacheDirectives(0, null);
for (int i=0; i<numFiles; i++) {
PathBasedCacheDirective directive = entries.next();
nnRpc.removePathBasedCacheDirective(directive.getId());
CacheDirectiveEntry entry = entries.next();
nnRpc.removeCacheDirective(entry.getInfo().getId());
expected -= numBlocksPerFile;
waitForCachedBlocks(namenode, expected, expected,
"testWaitForCachedReplicas:2");
@ -712,7 +719,7 @@ public class TestPathBasedCacheRequests {
}
@Test(timeout=120000)
public void testAddingPathBasedCacheDirectivesWhenCachingIsDisabled()
public void testAddingCacheDirectiveInfosWhenCachingIsDisabled()
throws Exception {
HdfsConfiguration conf = createCachingConf();
conf.setBoolean(DFS_NAMENODE_CACHING_ENABLED_KEY, false);
@ -738,22 +745,22 @@ public class TestPathBasedCacheRequests {
}
// Check the initial statistics at the namenode
waitForCachedBlocks(namenode, 0, 0,
"testAddingPathBasedCacheDirectivesWhenCachingIsDisabled:0");
"testAddingCacheDirectiveInfosWhenCachingIsDisabled:0");
// Cache and check each path in sequence
int expected = 0;
for (int i=0; i<numFiles; i++) {
PathBasedCacheDirective directive =
new PathBasedCacheDirective.Builder().
CacheDirectiveInfo directive =
new CacheDirectiveInfo.Builder().
setPath(new Path(paths.get(i))).
setPool(pool).
build();
dfs.addPathBasedCacheDirective(directive);
dfs.addCacheDirective(directive);
waitForCachedBlocks(namenode, expected, 0,
"testAddingPathBasedCacheDirectivesWhenCachingIsDisabled:1");
"testAddingCacheDirectiveInfosWhenCachingIsDisabled:1");
}
Thread.sleep(20000);
waitForCachedBlocks(namenode, expected, 0,
"testAddingPathBasedCacheDirectivesWhenCachingIsDisabled:2");
"testAddingCacheDirectiveInfosWhenCachingIsDisabled:2");
} finally {
cluster.shutdown();
}
@ -788,18 +795,76 @@ public class TestPathBasedCacheRequests {
waitForCachedBlocks(namenode, 0, 0,
"testWaitForCachedReplicasInDirectory:0");
// cache entire directory
long id = dfs.addPathBasedCacheDirective(
new PathBasedCacheDirective.Builder().
long id = dfs.addCacheDirective(
new CacheDirectiveInfo.Builder().
setPath(new Path("/foo")).
setReplication((short)2).
setPool(pool).
build());
waitForCachedBlocks(namenode, 4, 8,
"testWaitForCachedReplicasInDirectory:1");
// remove and watch numCached go to 0
dfs.removePathBasedCacheDirective(id);
waitForCachedBlocks(namenode, 0, 0,
// Verify that listDirectives gives the stats we want.
RemoteIterator<CacheDirectiveEntry> iter =
dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().
setPath(new Path("/foo")).
build());
CacheDirectiveEntry entry = iter.next();
CacheDirectiveStats stats = entry.getStats();
Assert.assertEquals(Long.valueOf(2),
stats.getFilesAffected());
Assert.assertEquals(Long.valueOf(
2 * numBlocksPerFile * BLOCK_SIZE * 2),
stats.getBytesNeeded());
Assert.assertEquals(Long.valueOf(
2 * numBlocksPerFile * BLOCK_SIZE * 2),
stats.getBytesCached());
long id2 = dfs.addCacheDirective(
new CacheDirectiveInfo.Builder().
setPath(new Path("/foo/bar")).
setReplication((short)4).
setPool(pool).
build());
// wait for an additional 2 cached replicas to come up
waitForCachedBlocks(namenode, 4, 10,
"testWaitForCachedReplicasInDirectory:2");
// the directory directive's stats are unchanged
iter = dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().
setPath(new Path("/foo")).
build());
entry = iter.next();
stats = entry.getStats();
Assert.assertEquals(Long.valueOf(2),
stats.getFilesAffected());
Assert.assertEquals(Long.valueOf(
2 * numBlocksPerFile * BLOCK_SIZE * 2),
stats.getBytesNeeded());
Assert.assertEquals(Long.valueOf(
2 * numBlocksPerFile * BLOCK_SIZE * 2),
stats.getBytesCached());
// verify /foo/bar's stats
iter = dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().
setPath(new Path("/foo/bar")).
build());
entry = iter.next();
stats = entry.getStats();
Assert.assertEquals(Long.valueOf(1),
stats.getFilesAffected());
Assert.assertEquals(Long.valueOf(
4 * numBlocksPerFile * BLOCK_SIZE),
stats.getBytesNeeded());
// only 3 because the file only has 3 replicas, not 4 as requested.
Assert.assertEquals(Long.valueOf(
3 * numBlocksPerFile * BLOCK_SIZE),
stats.getBytesCached());
// remove and watch numCached go to 0
dfs.removeCacheDirective(id);
dfs.removeCacheDirective(id2);
waitForCachedBlocks(namenode, 0, 0,
"testWaitForCachedReplicasInDirectory:3");
} finally {
cluster.shutdown();
}
@ -839,8 +904,8 @@ public class TestPathBasedCacheRequests {
waitForCachedBlocks(namenode, 0, 0, "testReplicationFactor:0");
checkNumCachedReplicas(dfs, paths, 0, 0);
// cache directory
long id = dfs.addPathBasedCacheDirective(
new PathBasedCacheDirective.Builder().
long id = dfs.addCacheDirective(
new CacheDirectiveInfo.Builder().
setPath(new Path("/foo")).
setReplication((short)1).
setPool(pool).
@ -849,8 +914,8 @@ public class TestPathBasedCacheRequests {
checkNumCachedReplicas(dfs, paths, 4, 4);
// step up the replication factor
for (int i=2; i<=3; i++) {
dfs.modifyPathBasedCacheDirective(
new PathBasedCacheDirective.Builder().
dfs.modifyCacheDirective(
new CacheDirectiveInfo.Builder().
setId(id).
setReplication((short)i).
build());
@ -859,8 +924,8 @@ public class TestPathBasedCacheRequests {
}
// step it down
for (int i=2; i>=1; i--) {
dfs.modifyPathBasedCacheDirective(
new PathBasedCacheDirective.Builder().
dfs.modifyCacheDirective(
new CacheDirectiveInfo.Builder().
setId(id).
setReplication((short)i).
build());
@ -868,7 +933,7 @@ public class TestPathBasedCacheRequests {
checkNumCachedReplicas(dfs, paths, 4, 4*i);
}
// remove and watch numCached go to 0
dfs.removePathBasedCacheDirective(id);
dfs.removeCacheDirective(id);
waitForCachedBlocks(namenode, 0, 0, "testReplicationFactor:4");
checkNumCachedReplicas(dfs, paths, 0, 0);
} finally {

View File

@ -61,7 +61,8 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
@ -737,14 +738,14 @@ public class TestRetryCacheWithHA {
}
}
/** addPathBasedCacheDirective */
class AddPathBasedCacheDirectiveOp extends AtMostOnceOp {
private PathBasedCacheDirective directive;
/** addCacheDirective */
class AddCacheDirectiveInfoOp extends AtMostOnceOp {
private CacheDirectiveInfo directive;
private Long result;
AddPathBasedCacheDirectiveOp(DFSClient client,
PathBasedCacheDirective directive) {
super("addPathBasedCacheDirective", client);
AddCacheDirectiveInfoOp(DFSClient client,
CacheDirectiveInfo directive) {
super("addCacheDirective", client);
this.directive = directive;
}
@ -755,15 +756,15 @@ public class TestRetryCacheWithHA {
@Override
void invoke() throws Exception {
result = client.addPathBasedCacheDirective(directive);
result = client.addCacheDirective(directive);
}
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
for (int i = 0; i < CHECKTIMES; i++) {
RemoteIterator<PathBasedCacheDirective> iter =
dfs.listPathBasedCacheDirectives(
new PathBasedCacheDirective.Builder().
RemoteIterator<CacheDirectiveEntry> iter =
dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().
setPool(directive.getPool()).
setPath(directive.getPath()).
build());
@ -781,15 +782,15 @@ public class TestRetryCacheWithHA {
}
}
/** modifyPathBasedCacheDirective */
class ModifyPathBasedCacheDirectiveOp extends AtMostOnceOp {
private final PathBasedCacheDirective directive;
/** modifyCacheDirective */
class ModifyCacheDirectiveInfoOp extends AtMostOnceOp {
private final CacheDirectiveInfo directive;
private final short newReplication;
private long id;
ModifyPathBasedCacheDirectiveOp(DFSClient client,
PathBasedCacheDirective directive, short newReplication) {
super("modifyPathBasedCacheDirective", client);
ModifyCacheDirectiveInfoOp(DFSClient client,
CacheDirectiveInfo directive, short newReplication) {
super("modifyCacheDirective", client);
this.directive = directive;
this.newReplication = newReplication;
}
@ -797,13 +798,13 @@ public class TestRetryCacheWithHA {
@Override
void prepare() throws Exception {
dfs.addCachePool(new CachePoolInfo(directive.getPool()));
id = client.addPathBasedCacheDirective(directive);
id = client.addCacheDirective(directive);
}
@Override
void invoke() throws Exception {
client.modifyPathBasedCacheDirective(
new PathBasedCacheDirective.Builder().
client.modifyCacheDirective(
new CacheDirectiveInfo.Builder().
setId(id).
setReplication(newReplication).
build());
@ -812,14 +813,14 @@ public class TestRetryCacheWithHA {
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
for (int i = 0; i < CHECKTIMES; i++) {
RemoteIterator<PathBasedCacheDirective> iter =
dfs.listPathBasedCacheDirectives(
new PathBasedCacheDirective.Builder().
RemoteIterator<CacheDirectiveEntry> iter =
dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().
setPool(directive.getPool()).
setPath(directive.getPath()).
build());
while (iter.hasNext()) {
PathBasedCacheDirective result = iter.next();
CacheDirectiveInfo result = iter.next().getInfo();
if ((result.getId() == id) &&
(result.getReplication().shortValue() == newReplication)) {
return true;
@ -836,15 +837,15 @@ public class TestRetryCacheWithHA {
}
}
/** removePathBasedCacheDirective */
class RemovePathBasedCacheDirectiveOp extends AtMostOnceOp {
private PathBasedCacheDirective directive;
/** removeCacheDirective */
class RemoveCacheDirectiveInfoOp extends AtMostOnceOp {
private CacheDirectiveInfo directive;
private long id;
RemovePathBasedCacheDirectiveOp(DFSClient client, String pool,
RemoveCacheDirectiveInfoOp(DFSClient client, String pool,
String path) {
super("removePathBasedCacheDirective", client);
this.directive = new PathBasedCacheDirective.Builder().
super("removeCacheDirective", client);
this.directive = new CacheDirectiveInfo.Builder().
setPool(pool).
setPath(new Path(path)).
build();
@ -853,20 +854,20 @@ public class TestRetryCacheWithHA {
@Override
void prepare() throws Exception {
dfs.addCachePool(new CachePoolInfo(directive.getPool()));
id = dfs.addPathBasedCacheDirective(directive);
id = dfs.addCacheDirective(directive);
}
@Override
void invoke() throws Exception {
client.removePathBasedCacheDirective(id);
client.removeCacheDirective(id);
}
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
for (int i = 0; i < CHECKTIMES; i++) {
RemoteIterator<PathBasedCacheDirective> iter =
dfs.listPathBasedCacheDirectives(
new PathBasedCacheDirective.Builder().
RemoteIterator<CacheDirectiveEntry> iter =
dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().
setPool(directive.getPool()).
setPath(directive.getPath()).
build());
@ -1072,10 +1073,10 @@ public class TestRetryCacheWithHA {
}
@Test (timeout=60000)
public void testAddPathBasedCacheDirective() throws Exception {
public void testAddCacheDirectiveInfo() throws Exception {
DFSClient client = genClientWithDummyHandler();
AtMostOnceOp op = new AddPathBasedCacheDirectiveOp(client,
new PathBasedCacheDirective.Builder().
AtMostOnceOp op = new AddCacheDirectiveInfoOp(client,
new CacheDirectiveInfo.Builder().
setPool("pool").
setPath(new Path("/path")).
build());
@ -1083,10 +1084,10 @@ public class TestRetryCacheWithHA {
}
@Test (timeout=60000)
public void testModifyPathBasedCacheDirective() throws Exception {
public void testModifyCacheDirectiveInfo() throws Exception {
DFSClient client = genClientWithDummyHandler();
AtMostOnceOp op = new ModifyPathBasedCacheDirectiveOp(client,
new PathBasedCacheDirective.Builder().
AtMostOnceOp op = new ModifyCacheDirectiveInfoOp(client,
new CacheDirectiveInfo.Builder().
setPool("pool").
setPath(new Path("/path")).
setReplication((short)1).build(),
@ -1095,9 +1096,9 @@ public class TestRetryCacheWithHA {
}
@Test (timeout=60000)
public void testRemovePathBasedCacheDescriptor() throws Exception {
public void testRemoveCacheDescriptor() throws Exception {
DFSClient client = genClientWithDummyHandler();
AtMostOnceOp op = new RemovePathBasedCacheDirectiveOp(client, "pool",
AtMostOnceOp op = new RemoveCacheDirectiveInfoOp(client, "pool",
"/path");
testClientRetryWithFailover(op);
}

View File

@ -65,6 +65,7 @@ public class TestHttpsFileSystem {
cluster.getFileSystem().create(new Path("/test")).close();
InetSocketAddress addr = cluster.getNameNode().getHttpsAddress();
nnAddr = addr.getHostName() + ":" + addr.getPort();
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr);
}
@AfterClass
@ -80,4 +81,15 @@ public class TestHttpsFileSystem {
Assert.assertTrue(fs.exists(new Path("/test")));
fs.close();
}
@Test
public void testSWebHdfsFileSystem() throws Exception {
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, "swebhdfs");
final Path f = new Path("/testswebhdfs");
FSDataOutputStream os = fs.create(f);
os.write(23);
os.close();
Assert.assertTrue(fs.exists(f));
fs.close();
}
}

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDFSClientRetries;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@ -101,7 +102,7 @@ public class TestWebHDFS {
try {
cluster.waitActive();
final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
final Path dir = new Path("/test/largeFile");
Assert.assertTrue(fs.mkdirs(dir));
@ -229,9 +230,9 @@ public class TestWebHDFS {
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
WebHdfsTestUtil.getWebHdfsFileSystem(conf).setPermission(
new Path("/"),
new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME)
.setPermission(new Path("/"),
new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
// trick the NN into not believing it's not the superuser so we can
// tell if the correct user is used by listStatus
@ -243,8 +244,9 @@ public class TestWebHDFS {
.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException, URISyntaxException {
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
Path d = new Path("/my-dir");
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsFileSystem.SCHEME);
Path d = new Path("/my-dir");
Assert.assertTrue(fs.mkdirs(d));
for (int i=0; i < listLimit*3; i++) {
Path p = new Path(d, "file-"+i);
@ -258,4 +260,16 @@ public class TestWebHDFS {
cluster.shutdown();
}
}
/**
* WebHdfs should be enabled by default after HDFS-5532
*
* @throws Exception
*/
@Test
public void testWebHdfsEnabledByDefault() throws Exception {
Configuration conf = new HdfsConfiguration();
Assert.assertTrue(conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
false));
}
}

View File

@ -82,7 +82,7 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
ugi = UserGroupInformation.createUserForTesting(
current.getShortUserName() + "x", new String[]{"user"});
fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf);
fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME);
defaultWorkingDirectory = fs.getWorkingDirectory().toUri().getPath();
}

View File

@ -18,35 +18,32 @@
package org.apache.hadoop.hdfs.web;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.SocketAddress;
import java.net.SocketTimeoutException;
import java.nio.channels.SocketChannel;
import java.util.ArrayList;
import java.util.List;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* This test suite checks that WebHdfsFileSystem sets connection timeouts and
@ -77,7 +74,7 @@ public class TestWebHdfsTimeouts {
serverSocket = new ServerSocket(0, CONNECTION_BACKLOG);
nnHttpAddress = new InetSocketAddress("localhost", serverSocket.getLocalPort());
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:" + serverSocket.getLocalPort());
fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf);
fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
fs.connectionFactory = connectionFactory;
clients = new ArrayList<SocketChannel>();
serverThread = null;

View File

@ -46,20 +46,36 @@ public class WebHdfsTestUtil {
return conf;
}
public static WebHdfsFileSystem getWebHdfsFileSystem(final Configuration conf
) throws IOException, URISyntaxException {
final String uri = WebHdfsFileSystem.SCHEME + "://"
+ conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
public static WebHdfsFileSystem getWebHdfsFileSystem(
final Configuration conf, String scheme) throws IOException,
URISyntaxException {
final String uri;
if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
uri = WebHdfsFileSystem.SCHEME + "://"
+ conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
} else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
uri = SWebHdfsFileSystem.SCHEME + "://"
+ conf.get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
} else {
throw new IllegalArgumentException("unknown scheme:" + scheme);
}
return (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
}
public static WebHdfsFileSystem getWebHdfsFileSystemAs(
final UserGroupInformation ugi, final Configuration conf
final UserGroupInformation ugi, final Configuration conf
) throws IOException, InterruptedException {
return getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME);
}
public static WebHdfsFileSystem getWebHdfsFileSystemAs(
final UserGroupInformation ugi, final Configuration conf, String scheme
) throws IOException, InterruptedException {
return ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
@Override
public WebHdfsFileSystem run() throws Exception {
return getWebHdfsFileSystem(conf);
return getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
}
});
}

View File

@ -90,7 +90,7 @@
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>poolparty bob bobgroup rwxrwxrwx 51</expected-output>
<expected-output>poolparty bob bobgroup rwxrwxrwx 51</expected-output>
</comparator>
</comparators>
</test>
@ -129,11 +129,11 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>bar alice alicegroup rwxr-xr-x 100 </expected-output>
<expected-output>bar alice alicegroup rwxr-xr-x 100</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>foo bob bob rw-rw-r-- 100 </expected-output>
<expected-output>foo bob bob rw-rw-r-- 100</expected-output>
</comparator>
</comparators>
</test>
@ -156,7 +156,7 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>foo bob bob rw-rw-r-- 100 </expected-output>
<expected-output>foo bob bob rw-rw-r-- 100</expected-output>
</comparator>
</comparators>
</test>
@ -180,15 +180,15 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>1 pool1 1 /foo</expected-output>
<expected-output> 1 pool1 1 /foo</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>2 pool1 1 /bar</expected-output>
<expected-output> 2 pool1 1 /bar</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>3 pool1 2 /baz</expected-output>
<expected-output> 3 pool1 2 /baz</expected-output>
</comparator>
</comparators>
</test>
@ -234,11 +234,11 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>8 pool2 1 /baz</expected-output>
<expected-output> 8 pool2 1 /baz</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>9 pool2 1 /buz</expected-output>
<expected-output> 9 pool2 1 /buz</expected-output>
</comparator>
</comparators>
</test>
@ -265,11 +265,11 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>10 pool1 1 /foo</expected-output>
<expected-output> 10 pool1 1 /foo</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>12 pool2 1 /foo</expected-output>
<expected-output> 12 pool2 1 /foo</expected-output>
</comparator>
</comparators>
</test>
@ -296,7 +296,7 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>16 pool2 1 /foo</expected-output>
<expected-output> 16 pool2 1 /foo</expected-output>
</comparator>
</comparators>
</test>
@ -320,7 +320,7 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>19 pool1 1 /bar</expected-output>
<expected-output> 19 pool1 1 /bar</expected-output>
</comparator>
</comparators>
</test>
@ -349,11 +349,11 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>22 pool1 1 /bar</expected-output>
<expected-output> 22 pool1 1 /bar</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>24 pool2 1 /bar</expected-output>
<expected-output> 24 pool2 1 /bar</expected-output>
</comparator>
</comparators>
</test>
@ -379,7 +379,7 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>25 pool1 1 /bar3</expected-output>
<expected-output> 25 pool1 1 /bar3</expected-output>
</comparator>
</comparators>
</test>

View File

@ -223,6 +223,9 @@ Release 2.3.0 - UNRELEASED
MAPREDUCE-5625. TestFixedLengthInputFormat fails in jdk7 environment
(Mariappan Asokan via jeagles)
MAPREDUCE-5631. TestJobEndNotifier.testNotifyRetries fails with Should
have taken more than 5 seconds in jdk7 (Jonathan Eagles via jlowe)
Release 2.2.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -38,6 +38,7 @@ import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
@ -160,8 +161,13 @@ public class TestJobEndNotifier extends JobEndNotifier {
//Check retries happen as intended
@Test
public void testNotifyRetries() throws InterruptedException {
Configuration conf = new Configuration();
JobConf conf = new JobConf();
conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS, "0");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, "1");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_URL, "http://nonexistent");
conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, "5000");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL, "5000");
JobReport jobReport = mock(JobReport.class);
long startTime = System.currentTimeMillis();
@ -170,7 +176,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
this.notify(jobReport);
long endTime = System.currentTimeMillis();
Assert.assertEquals("Only 1 try was expected but was : "
+ this.notificationCount, this.notificationCount, 1);
+ this.notificationCount, 1, this.notificationCount);
Assert.assertTrue("Should have taken more than 5 seconds it took "
+ (endTime - startTime), endTime - startTime > 5000);
@ -185,7 +191,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
this.notify(jobReport);
endTime = System.currentTimeMillis();
Assert.assertEquals("Only 3 retries were expected but was : "
+ this.notificationCount, this.notificationCount, 3);
+ this.notificationCount, 3, this.notificationCount);
Assert.assertTrue("Should have taken more than 9 seconds it took "
+ (endTime - startTime), endTime - startTime > 9000);
@ -198,14 +204,14 @@ public class TestJobEndNotifier extends JobEndNotifier {
MRApp app = spy(new MRAppWithCustomContainerAllocator(
2, 2, true, this.getClass().getName(), true, 2, true));
doNothing().when(app).sysexit();
Configuration conf = new Configuration();
JobConf conf = new JobConf();
conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
JobImpl job = (JobImpl)app.submit(conf);
app.waitForInternalState(job, JobStateInternal.SUCCEEDED);
// Unregistration succeeds: successfullyUnregistered is set
app.shutDownJob();
Assert.assertEquals(true, app.isLastAMRetry());
Assert.assertTrue(app.isLastAMRetry());
Assert.assertEquals(1, JobEndServlet.calledTimes);
Assert.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED",
JobEndServlet.requestUri.getQuery());
@ -221,7 +227,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
this.getClass().getName(), true, 1, false));
doNothing().when(app).sysexit();
Configuration conf = new Configuration();
JobConf conf = new JobConf();
conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
JobImpl job = (JobImpl)app.submit(conf);
@ -234,10 +240,10 @@ public class TestJobEndNotifier extends JobEndNotifier {
app.shutDownJob();
// Not the last AM attempt. So user should that the job is still running.
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals(false, app.isLastAMRetry());
Assert.assertFalse(app.isLastAMRetry());
Assert.assertEquals(0, JobEndServlet.calledTimes);
Assert.assertEquals(null, JobEndServlet.requestUri);
Assert.assertEquals(null, JobEndServlet.foundJobState);
Assert.assertNull(JobEndServlet.requestUri);
Assert.assertNull(JobEndServlet.foundJobState);
server.stop();
}
@ -248,7 +254,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
this.getClass().getName(), true, 2, false));
doNothing().when(app).sysexit();
Configuration conf = new Configuration();
JobConf conf = new JobConf();
conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
JobImpl job = (JobImpl)app.submit(conf);
@ -259,7 +265,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
// Now shutdown. User should see FAILED state.
// Unregistration fails: isLastAMRetry is recalculated, this is
app.shutDownJob();
Assert.assertEquals(true, app.isLastAMRetry());
Assert.assertTrue(app.isLastAMRetry());
Assert.assertEquals(1, JobEndServlet.calledTimes);
Assert.assertEquals("jobid=" + job.getID() + "&status=FAILED",
JobEndServlet.requestUri.getQuery());

View File

@ -589,7 +589,7 @@
<dependency>
<groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId>
<version>2.5</version>
<version>2.6</version>
</dependency>
<dependency>
<groupId>commons-collections</groupId>

View File

@ -114,6 +114,9 @@ Release 2.3.0 - UNRELEASED
YARN-584. In scheduler web UIs, queues unexpand on refresh. (Harshit
Daga via Sandy Ryza)
YARN-1303. Fixed DistributedShell to not fail with multiple commands separated
by a semi-colon as shell-command. (Xuan Gong via vinodkv)
OPTIMIZATIONS
BUG FIXES
@ -160,6 +163,13 @@ Release 2.3.0 - UNRELEASED
process same allocate request twice resulting in additional containers
getting allocated. (Omkar Vinit Joshi via bikas)
YARN-1425. TestRMRestart fails because MockRM.waitForState(AttemptId) uses
current attempt instead of the attempt passed as argument (Omkar Vinit
Joshi via bikas)
YARN-1053. Diagnostic message from ContainerExitEvent is ignored in
ContainerImpl (Omkar Vinit Joshi via bikas)
Release 2.2.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -19,8 +19,10 @@
package org.apache.hadoop.yarn.applications.distributedshell;
import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.StringReader;
import java.net.URI;
import java.net.URISyntaxException;
@ -218,6 +220,8 @@ public class ApplicationMaster {
// Hardcoded path to shell script in launch container's local env
private final String ExecShellStringPath = "ExecShellScript.sh";
private final String shellCommandPath = "shellCommands";
private volatile boolean done;
private volatile boolean success;
@ -300,8 +304,6 @@ public class ApplicationMaster {
Options opts = new Options();
opts.addOption("app_attempt_id", true,
"App Attempt ID. Not to be used unless for testing purposes");
opts.addOption("shell_command", true,
"Shell command to be executed by the Application Master");
opts.addOption("shell_script", true,
"Location of the shell script to be executed");
opts.addOption("shell_args", true, "Command line args for the shell script");
@ -372,11 +374,20 @@ public class ApplicationMaster {
+ appAttemptID.getApplicationId().getClusterTimestamp()
+ ", attemptId=" + appAttemptID.getAttemptId());
if (!cliParser.hasOption("shell_command")) {
File shellCommandFile = new File(shellCommandPath);
if (!shellCommandFile.exists()) {
throw new IllegalArgumentException(
"No shell command specified to be executed by application master");
}
shellCommand = cliParser.getOptionValue("shell_command");
FileInputStream fs = null;
DataInputStream ds = null;
try {
ds = new DataInputStream(new FileInputStream(shellCommandFile));
shellCommand = ds.readUTF();
} finally {
org.apache.commons.io.IOUtils.closeQuietly(ds);
org.apache.commons.io.IOUtils.closeQuietly(fs);
}
if (cliParser.hasOption("shell_args")) {
shellArgs = cliParser.getOptionValue("shell_args");

View File

@ -32,14 +32,17 @@ import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
@ -162,6 +165,7 @@ public class Client {
// Command line options
private Options opts;
private final String shellCommandPath = "shellCommands";
/**
* @param args Command line arguments
*/
@ -483,6 +487,27 @@ public class Client {
hdfsShellScriptTimestamp = shellFileStatus.getModificationTime();
}
if (!shellCommand.isEmpty()) {
String shellCommandSuffix =
appName + "/" + appId.getId() + "/" + shellCommandPath;
Path shellCommandDst =
new Path(fs.getHomeDirectory(), shellCommandSuffix);
FSDataOutputStream ostream = null;
try {
ostream = FileSystem
.create(fs, shellCommandDst, new FsPermission((short) 0710));
ostream.writeUTF(shellCommand);
} finally {
IOUtils.closeQuietly(ostream);
}
FileStatus scFileStatus = fs.getFileStatus(shellCommandDst);
LocalResource scRsrc =
LocalResource.newInstance(
ConverterUtils.getYarnUrlFromURI(shellCommandDst.toUri()),
LocalResourceType.FILE, LocalResourceVisibility.APPLICATION,
scFileStatus.getLen(), scFileStatus.getModificationTime());
localResources.put(shellCommandPath, scRsrc);
}
// Set local resource info into app master container launch context
amContainer.setLocalResources(localResources);
@ -541,9 +566,7 @@ public class Client {
vargs.add("--container_vcores " + String.valueOf(containerVirtualCores));
vargs.add("--num_containers " + String.valueOf(numContainers));
vargs.add("--priority " + String.valueOf(shellCmdPriority));
if (!shellCommand.isEmpty()) {
vargs.add("--shell_command " + shellCommand + "");
}
if (!shellArgs.isEmpty()) {
vargs.add("--shell_args " + shellArgs + "");
}

View File

@ -18,12 +18,15 @@
package org.apache.hadoop.yarn.applications.distributedshell;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
@ -170,6 +173,39 @@ public class TestDistributedShell {
}
@Test(timeout=90000)
public void testDSShellWithCommands() throws Exception {
String[] args = {
"--jar",
APPMASTER_JAR,
"--num_containers",
"2",
"--shell_command",
"\"echo output_ignored;echo output_expected\"",
"--master_memory",
"512",
"--master_vcores",
"2",
"--container_memory",
"128",
"--container_vcores",
"1"
};
LOG.info("Initializing DS Client");
final Client client =
new Client(new Configuration(yarnCluster.getConfig()));
boolean initSuccess = client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
boolean result = client.run();
LOG.info("Client run completed. Result=" + result);
List<String> expectedContent = new ArrayList<String>();
expectedContent.add("output_expected");
verifyContainerLog(2, expectedContent, false, "");
}
@Test(timeout=90000)
public void testDSShellWithInvalidArgs() throws Exception {
Client client = new Client(new Configuration(yarnCluster.getConfig()));
@ -332,5 +368,64 @@ public class TestDistributedShell {
LOG.info("Running DS Client");
Assert.assertTrue(client.run());
}
private int verifyContainerLog(int containerNum,
List<String> expectedContent, boolean count, String expectedWord) {
File logFolder =
new File(yarnCluster.getNodeManager(0).getConfig()
.get(YarnConfiguration.NM_LOG_DIRS,
YarnConfiguration.DEFAULT_NM_LOG_DIRS));
File[] listOfFiles = logFolder.listFiles();
int currentContainerLogFileIndex = -1;
for (int i = listOfFiles.length - 1; i >= 0; i--) {
if (listOfFiles[i].listFiles().length == containerNum + 1) {
currentContainerLogFileIndex = i;
break;
}
}
Assert.assertTrue(currentContainerLogFileIndex != -1);
File[] containerFiles =
listOfFiles[currentContainerLogFileIndex].listFiles();
int numOfWords = 0;
for (int i = 0; i < containerFiles.length; i++) {
for (File output : containerFiles[i].listFiles()) {
if (output.getName().trim().contains("stdout")) {
BufferedReader br = null;
try {
String sCurrentLine;
br = new BufferedReader(new FileReader(output));
int numOfline = 0;
while ((sCurrentLine = br.readLine()) != null) {
if (count) {
if (sCurrentLine.contains(expectedWord)) {
numOfWords++;
}
} else if (output.getName().trim().equals("stdout")){
Assert.assertEquals("The current is" + sCurrentLine,
expectedContent.get(numOfline), sCurrentLine.trim());
numOfline++;
}
}
} catch (IOException e) {
e.printStackTrace();
} finally {
try {
if (br != null)
br.close();
} catch (IOException ex) {
ex.printStackTrace();
}
}
}
}
}
return numOfWords;
}
}

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
import java.io.IOException;
import java.net.URISyntaxException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
@ -687,6 +686,10 @@ public class ContainerImpl implements Container {
public void transition(ContainerImpl container, ContainerEvent event) {
ContainerExitEvent exitEvent = (ContainerExitEvent) event;
container.exitCode = exitEvent.getExitCode();
if (exitEvent.getDiagnosticInfo() != null) {
container.diagnostics.append(exitEvent.getDiagnosticInfo())
.append('\n');
}
// TODO: Add containerWorkDir to the deletion service.
// TODO: Add containerOuputDir to the deletion service.
@ -806,6 +809,10 @@ public class ContainerImpl implements Container {
public void transition(ContainerImpl container, ContainerEvent event) {
ContainerExitEvent exitEvent = (ContainerExitEvent) event;
container.exitCode = exitEvent.getExitCode();
if (exitEvent.getDiagnosticInfo() != null) {
container.diagnostics.append(exitEvent.getDiagnosticInfo())
.append('\n');
}
// The process/process-grp is killed. Decrement reference counts and
// cleanup resources

View File

@ -55,6 +55,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
@ -844,9 +845,13 @@ public class TestContainer {
}
public void containerFailed(int exitCode) {
String diagnosticMsg = "Container completed with exit code " + exitCode;
c.handle(new ContainerExitEvent(cId,
ContainerEventType.CONTAINER_EXITED_WITH_FAILURE, exitCode,
"Container completed with exit code " + exitCode));
diagnosticMsg));
ContainerStatus containerStatus = c.cloneAndGetContainerStatus();
assert containerStatus.getDiagnostics().contains(diagnosticMsg);
assert containerStatus.getExitStatus() == exitCode;
drainDispatcherEvents();
}
@ -857,9 +862,13 @@ public class TestContainer {
public void containerKilledOnRequest() {
int exitCode = ExitCode.FORCE_KILLED.getExitCode();
String diagnosticMsg = "Container completed with exit code " + exitCode;
c.handle(new ContainerExitEvent(cId,
ContainerEventType.CONTAINER_KILLED_ON_REQUEST, exitCode,
"Container completed with exit code " + exitCode));
diagnosticMsg));
ContainerStatus containerStatus = c.cloneAndGetContainerStatus();
assert containerStatus.getDiagnostics().contains(diagnosticMsg);
assert containerStatus.getExitStatus() == exitCode;
drainDispatcherEvents();
}

View File

@ -107,7 +107,7 @@ public class MockRM extends ResourceManager {
throws Exception {
RMApp app = getRMContext().getRMApps().get(attemptId.getApplicationId());
Assert.assertNotNull("app shouldn't be null", app);
RMAppAttempt attempt = app.getCurrentAppAttempt();
RMAppAttempt attempt = app.getRMAppAttempt(attemptId);
int timeoutSecs = 0;
while (!finalState.equals(attempt.getAppAttemptState()) && timeoutSecs++ < 40) {
System.out.println("AppAttempt : " + attemptId

View File

@ -487,6 +487,8 @@ public class TestRMRestart {
Assert.assertEquals(2, rmApp.getAppAttempts().size());
// am1 attempt should be in FAILED state where as am2 attempt should be in
// LAUNCHED state
rm2.waitForState(am1.getApplicationAttemptId(), RMAppAttemptState.FAILED);
rm2.waitForState(am2.getApplicationAttemptId(), RMAppAttemptState.LAUNCHED);
Assert.assertEquals(RMAppAttemptState.FAILED,
rmApp.getAppAttempts().get(am1.getApplicationAttemptId())
.getAppAttemptState());
@ -524,14 +526,17 @@ public class TestRMRestart {
Assert.assertEquals(3, rmApp.getAppAttempts().size());
// am1 and am2 attempts should be in FAILED state where as am3 should be
// in LAUNCHED state
rm3.waitForState(am1.getApplicationAttemptId(), RMAppAttemptState.FAILED);
rm3.waitForState(am2.getApplicationAttemptId(), RMAppAttemptState.FAILED);
ApplicationAttemptId latestAppAttemptId =
rmApp.getCurrentAppAttempt().getAppAttemptId();
rm3.waitForState(latestAppAttemptId, RMAppAttemptState.LAUNCHED);
Assert.assertEquals(RMAppAttemptState.FAILED,
rmApp.getAppAttempts().get(am1.getApplicationAttemptId())
.getAppAttemptState());
Assert.assertEquals(RMAppAttemptState.FAILED,
rmApp.getAppAttempts().get(am2.getApplicationAttemptId())
.getAppAttemptState());
ApplicationAttemptId latestAppAttemptId =
rmApp.getCurrentAppAttempt().getAppAttemptId();
Assert.assertEquals(RMAppAttemptState.LAUNCHED,rmApp.getAppAttempts()
.get(latestAppAttemptId).getAppAttemptState());
@ -562,6 +567,7 @@ public class TestRMRestart {
rm4.waitForState(rmApp.getApplicationId(), RMAppState.ACCEPTED);
Assert.assertEquals(4, rmApp.getAppAttempts().size());
Assert.assertEquals(RMAppState.ACCEPTED, rmApp.getState());
rm4.waitForState(latestAppAttemptId, RMAppAttemptState.SCHEDULED);
Assert.assertEquals(RMAppAttemptState.SCHEDULED, rmApp.getAppAttempts()
.get(latestAppAttemptId).getAppAttemptState());
@ -571,6 +577,8 @@ public class TestRMRestart {
rm4.waitForState(app2.getApplicationId(), RMAppState.ACCEPTED);
Assert.assertEquals(RMAppState.ACCEPTED, app2.getState());
Assert.assertEquals(1, app2.getAppAttempts().size());
rm4.waitForState(app2.getCurrentAppAttempt().getAppAttemptId(),
RMAppAttemptState.SCHEDULED);
Assert.assertEquals(RMAppAttemptState.SCHEDULED, app2
.getCurrentAppAttempt().getAppAttemptState());