Merge trunk into HA branch.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1293742 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
ae9014ef6a
|
@ -3,6 +3,7 @@ Hadoop Change Log
|
||||||
Trunk (unreleased changes)
|
Trunk (unreleased changes)
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
HADOOP-7920. Remove Avro Rpc. (suresh)
|
HADOOP-7920. Remove Avro Rpc. (suresh)
|
||||||
|
|
||||||
NEW FEATURES
|
NEW FEATURES
|
||||||
|
@ -160,8 +161,22 @@ Trunk (unreleased changes)
|
||||||
|
|
||||||
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
||||||
|
|
||||||
|
Release 0.23.3 - UNRELEASED
|
||||||
|
|
||||||
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
|
NEW FEATURES
|
||||||
|
|
||||||
|
IMPROVEMENTS
|
||||||
|
|
||||||
|
OPTIMIZATIONS
|
||||||
|
|
||||||
|
BUG FIXES
|
||||||
|
|
||||||
Release 0.23.2 - UNRELEASED
|
Release 0.23.2 - UNRELEASED
|
||||||
|
|
||||||
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
NEW FEATURES
|
NEW FEATURES
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
|
@ -1,10 +1,13 @@
|
||||||
Hadoop HDFS Change Log
|
Hadoop HDFS Change Log
|
||||||
|
|
||||||
Trunk (unreleased changes)
|
Trunk (unreleased changes)
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
HDFS-2676. Remove Avro RPC. (suresh)
|
HDFS-2676. Remove Avro RPC. (suresh)
|
||||||
|
|
||||||
NEW FEATURES
|
NEW FEATURES
|
||||||
|
|
||||||
HDFS-395. DFS Scalability: Incremental block reports. (Tomasz Nykiel
|
HDFS-395. DFS Scalability: Incremental block reports. (Tomasz Nykiel
|
||||||
via hairong)
|
via hairong)
|
||||||
|
|
||||||
|
@ -132,8 +135,17 @@ Trunk (unreleased changes)
|
||||||
HDFS-2878. Fix TestBlockRecovery and move it back into main test directory.
|
HDFS-2878. Fix TestBlockRecovery and move it back into main test directory.
|
||||||
(todd)
|
(todd)
|
||||||
|
|
||||||
HDFS-2655. BlockReaderLocal#skip performs unnecessary IO. (Brandon Li
|
HDFS-2655. BlockReaderLocal#skip performs unnecessary IO.
|
||||||
via jitendra)
|
(Brandon Li via jitendra)
|
||||||
|
|
||||||
|
HDFS-3003. Remove getHostPortString() from NameNode, replace it with
|
||||||
|
NetUtils.getHostPortString(). (Brandon Li via atm)
|
||||||
|
|
||||||
|
HDFS-3009. Remove duplicate code in DFSClient#isLocalAddress by using
|
||||||
|
NetUtils. (Hari Mankude via suresh)
|
||||||
|
|
||||||
|
HDFS-3002. TestNameNodeMetrics need not wait for metrics update.
|
||||||
|
(suresh)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
HDFS-2477. Optimize computing the diff between a block report and the
|
HDFS-2477. Optimize computing the diff between a block report and the
|
||||||
|
@ -212,6 +224,20 @@ Trunk (unreleased changes)
|
||||||
HDFS-2968. Protocol translator for BlockRecoveryCommand broken when
|
HDFS-2968. Protocol translator for BlockRecoveryCommand broken when
|
||||||
multiple blocks need recovery. (todd)
|
multiple blocks need recovery. (todd)
|
||||||
|
|
||||||
|
Release 0.23.3 - UNRELEASED
|
||||||
|
|
||||||
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
|
NEW FEATURES
|
||||||
|
|
||||||
|
HDFS-2978. The NameNode should expose name dir statuses via JMX. (atm)
|
||||||
|
|
||||||
|
IMPROVEMENTS
|
||||||
|
|
||||||
|
OPTIMIZATIONS
|
||||||
|
|
||||||
|
BUG FIXES
|
||||||
|
|
||||||
Release 0.23.2 - UNRELEASED
|
Release 0.23.2 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -274,6 +300,11 @@ Release 0.23.2 - UNRELEASED
|
||||||
dfs.client.block.write.replace-datanode-on-failure.enable should be true.
|
dfs.client.block.write.replace-datanode-on-failure.enable should be true.
|
||||||
(szetszwo)
|
(szetszwo)
|
||||||
|
|
||||||
|
HDFS-3008. Negative caching of local addrs doesn't work. (eli)
|
||||||
|
|
||||||
|
HDFS-3006. In WebHDFS, when the return body is empty, set the Content-Type
|
||||||
|
to application/octet-stream instead of application/json. (szetszwo)
|
||||||
|
|
||||||
Release 0.23.1 - 2012-02-17
|
Release 0.23.1 - 2012-02-17
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -576,26 +576,19 @@ public class DFSClient implements java.io.Closeable {
|
||||||
private static boolean isLocalAddress(InetSocketAddress targetAddr) {
|
private static boolean isLocalAddress(InetSocketAddress targetAddr) {
|
||||||
InetAddress addr = targetAddr.getAddress();
|
InetAddress addr = targetAddr.getAddress();
|
||||||
Boolean cached = localAddrMap.get(addr.getHostAddress());
|
Boolean cached = localAddrMap.get(addr.getHostAddress());
|
||||||
if (cached != null && cached) {
|
if (cached != null) {
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace("Address " + targetAddr + " is local");
|
LOG.trace("Address " + targetAddr +
|
||||||
|
(cached ? " is local" : " is not local"));
|
||||||
}
|
}
|
||||||
return true;
|
return cached;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
boolean local = NetUtils.isLocalAddress(addr);
|
||||||
|
|
||||||
// Check if the address is any local or loop back
|
|
||||||
boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress();
|
|
||||||
|
|
||||||
// Check if the address is defined on any interface
|
|
||||||
if (!local) {
|
|
||||||
try {
|
|
||||||
local = NetworkInterface.getByInetAddress(addr) != null;
|
|
||||||
} catch (SocketException e) {
|
|
||||||
local = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace("Address " + targetAddr + " is local");
|
LOG.trace("Address " + targetAddr +
|
||||||
|
(local ? " is local" : " is not local"));
|
||||||
}
|
}
|
||||||
localAddrMap.put(addr.getHostAddress(), local);
|
localAddrMap.put(addr.getHostAddress(), local);
|
||||||
return local;
|
return local;
|
||||||
|
|
|
@ -519,7 +519,7 @@ public class DFSUtil {
|
||||||
// Use default address as fall back
|
// Use default address as fall back
|
||||||
String defaultAddress;
|
String defaultAddress;
|
||||||
try {
|
try {
|
||||||
defaultAddress = NameNode.getHostPortString(NameNode.getAddress(conf));
|
defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
defaultAddress = null;
|
defaultAddress = null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,7 +117,7 @@ public class DatanodeWebHdfsMethods {
|
||||||
@PUT
|
@PUT
|
||||||
@Path("/")
|
@Path("/")
|
||||||
@Consumes({"*/*"})
|
@Consumes({"*/*"})
|
||||||
@Produces({MediaType.APPLICATION_JSON})
|
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
|
||||||
public Response putRoot(
|
public Response putRoot(
|
||||||
final InputStream in,
|
final InputStream in,
|
||||||
@Context final UserGroupInformation ugi,
|
@Context final UserGroupInformation ugi,
|
||||||
|
@ -147,7 +147,7 @@ public class DatanodeWebHdfsMethods {
|
||||||
@PUT
|
@PUT
|
||||||
@Path("{" + UriFsPathParam.NAME + ":.*}")
|
@Path("{" + UriFsPathParam.NAME + ":.*}")
|
||||||
@Consumes({"*/*"})
|
@Consumes({"*/*"})
|
||||||
@Produces({MediaType.APPLICATION_JSON})
|
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
|
||||||
public Response put(
|
public Response put(
|
||||||
final InputStream in,
|
final InputStream in,
|
||||||
@Context final UserGroupInformation ugi,
|
@Context final UserGroupInformation ugi,
|
||||||
|
@ -209,7 +209,7 @@ public class DatanodeWebHdfsMethods {
|
||||||
final InetSocketAddress nnHttpAddr = NameNode.getHttpAddress(conf);
|
final InetSocketAddress nnHttpAddr = NameNode.getHttpAddress(conf);
|
||||||
final URI uri = new URI(WebHdfsFileSystem.SCHEME, null,
|
final URI uri = new URI(WebHdfsFileSystem.SCHEME, null,
|
||||||
nnHttpAddr.getHostName(), nnHttpAddr.getPort(), fullpath, null, null);
|
nnHttpAddr.getHostName(), nnHttpAddr.getPort(), fullpath, null, null);
|
||||||
return Response.created(uri).type(MediaType.APPLICATION_JSON).build();
|
return Response.created(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
throw new UnsupportedOperationException(op + " is not supported");
|
throw new UnsupportedOperationException(op + " is not supported");
|
||||||
|
@ -222,7 +222,7 @@ public class DatanodeWebHdfsMethods {
|
||||||
@POST
|
@POST
|
||||||
@Path("/")
|
@Path("/")
|
||||||
@Consumes({"*/*"})
|
@Consumes({"*/*"})
|
||||||
@Produces({MediaType.APPLICATION_JSON})
|
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
|
||||||
public Response postRoot(
|
public Response postRoot(
|
||||||
final InputStream in,
|
final InputStream in,
|
||||||
@Context final UserGroupInformation ugi,
|
@Context final UserGroupInformation ugi,
|
||||||
|
@ -243,7 +243,7 @@ public class DatanodeWebHdfsMethods {
|
||||||
@POST
|
@POST
|
||||||
@Path("{" + UriFsPathParam.NAME + ":.*}")
|
@Path("{" + UriFsPathParam.NAME + ":.*}")
|
||||||
@Consumes({"*/*"})
|
@Consumes({"*/*"})
|
||||||
@Produces({MediaType.APPLICATION_JSON})
|
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
|
||||||
public Response post(
|
public Response post(
|
||||||
final InputStream in,
|
final InputStream in,
|
||||||
@Context final UserGroupInformation ugi,
|
@Context final UserGroupInformation ugi,
|
||||||
|
@ -287,7 +287,7 @@ public class DatanodeWebHdfsMethods {
|
||||||
IOUtils.cleanup(LOG, out);
|
IOUtils.cleanup(LOG, out);
|
||||||
IOUtils.cleanup(LOG, dfsclient);
|
IOUtils.cleanup(LOG, dfsclient);
|
||||||
}
|
}
|
||||||
return Response.ok().type(MediaType.APPLICATION_JSON).build();
|
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
throw new UnsupportedOperationException(op + " is not supported");
|
throw new UnsupportedOperationException(op + " is not supported");
|
||||||
|
|
|
@ -108,13 +108,13 @@ public class BackupNode extends NameNode {
|
||||||
@Override // NameNode
|
@Override // NameNode
|
||||||
protected void setRpcServerAddress(Configuration conf,
|
protected void setRpcServerAddress(Configuration conf,
|
||||||
InetSocketAddress addr) {
|
InetSocketAddress addr) {
|
||||||
conf.set(BN_ADDRESS_NAME_KEY, getHostPortString(addr));
|
conf.set(BN_ADDRESS_NAME_KEY, NetUtils.getHostPortString(addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override // Namenode
|
@Override // Namenode
|
||||||
protected void setRpcServiceServerAddress(Configuration conf,
|
protected void setRpcServiceServerAddress(Configuration conf,
|
||||||
InetSocketAddress addr) {
|
InetSocketAddress addr) {
|
||||||
conf.set(BN_SERVICE_RPC_ADDRESS_KEY, getHostPortString(addr));
|
conf.set(BN_SERVICE_RPC_ADDRESS_KEY, NetUtils.getHostPortString(addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override // NameNode
|
@Override // NameNode
|
||||||
|
@ -126,7 +126,7 @@ public class BackupNode extends NameNode {
|
||||||
|
|
||||||
@Override // NameNode
|
@Override // NameNode
|
||||||
protected void setHttpServerAddress(Configuration conf){
|
protected void setHttpServerAddress(Configuration conf){
|
||||||
conf.set(BN_HTTP_ADDRESS_NAME_KEY, getHostPortString(getHttpAddress()));
|
conf.set(BN_HTTP_ADDRESS_NAME_KEY, NetUtils.getHostPortString(getHttpAddress()));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override // NameNode
|
@Override // NameNode
|
||||||
|
@ -287,8 +287,8 @@ public class BackupNode extends NameNode {
|
||||||
InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
|
InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
|
||||||
this.namenode = new NamenodeProtocolTranslatorPB(nnAddress, conf,
|
this.namenode = new NamenodeProtocolTranslatorPB(nnAddress, conf,
|
||||||
UserGroupInformation.getCurrentUser());
|
UserGroupInformation.getCurrentUser());
|
||||||
this.nnRpcAddress = getHostPortString(nnAddress);
|
this.nnRpcAddress = NetUtils.getHostPortString(nnAddress);
|
||||||
this.nnHttpAddress = getHostPortString(super.getHttpServerAddress(conf));
|
this.nnHttpAddress = NetUtils.getHostPortString(super.getHttpServerAddress(conf));
|
||||||
// get version and id info from the name-node
|
// get version and id info from the name-node
|
||||||
NamespaceInfo nsInfo = null;
|
NamespaceInfo nsInfo = null;
|
||||||
while(!isStopRequested()) {
|
while(!isStopRequested()) {
|
||||||
|
|
|
@ -153,6 +153,8 @@ import org.apache.hadoop.hdfs.server.common.GenerationStamp;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||||
import org.apache.hadoop.hdfs.server.common.Util;
|
import org.apache.hadoop.hdfs.server.common.Util;
|
||||||
|
@ -5125,6 +5127,30 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
public String getBlockPoolId() {
|
public String getBlockPoolId() {
|
||||||
return blockPoolId;
|
return blockPoolId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override // NameNodeMXBean
|
||||||
|
public String getNameDirStatuses() {
|
||||||
|
Map<String, Map<File, StorageDirType>> statusMap =
|
||||||
|
new HashMap<String, Map<File, StorageDirType>>();
|
||||||
|
|
||||||
|
Map<File, StorageDirType> activeDirs = new HashMap<File, StorageDirType>();
|
||||||
|
for (Iterator<StorageDirectory> it
|
||||||
|
= getFSImage().getStorage().dirIterator(); it.hasNext();) {
|
||||||
|
StorageDirectory st = it.next();
|
||||||
|
activeDirs.put(st.getRoot(), st.getStorageDirType());
|
||||||
|
}
|
||||||
|
statusMap.put("active", activeDirs);
|
||||||
|
|
||||||
|
List<Storage.StorageDirectory> removedStorageDirs
|
||||||
|
= getFSImage().getStorage().getRemovedStorageDirs();
|
||||||
|
Map<File, StorageDirType> failedDirs = new HashMap<File, StorageDirType>();
|
||||||
|
for (StorageDirectory st : removedStorageDirs) {
|
||||||
|
failedDirs.put(st.getRoot(), st.getStorageDirType());
|
||||||
|
}
|
||||||
|
statusMap.put("failed", failedDirs);
|
||||||
|
|
||||||
|
return JSON.toString(statusMap);
|
||||||
|
}
|
||||||
|
|
||||||
/** @return the block manager. */
|
/** @return the block manager. */
|
||||||
public BlockManager getBlockManager() {
|
public BlockManager getBlockManager() {
|
||||||
|
|
|
@ -71,7 +71,7 @@ public class FileChecksumServlets {
|
||||||
String tokenString = ugi.getTokens().iterator().next().encodeToUrlString();
|
String tokenString = ugi.getTokens().iterator().next().encodeToUrlString();
|
||||||
dtParam = JspHelper.getDelegationTokenUrlParam(tokenString);
|
dtParam = JspHelper.getDelegationTokenUrlParam(tokenString);
|
||||||
}
|
}
|
||||||
String addr = NameNode.getHostPortString(nn.getNameNodeAddress());
|
String addr = NetUtils.getHostPortString(nn.getNameNodeAddress());
|
||||||
String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
|
String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
|
||||||
|
|
||||||
return new URL(scheme, hostname, port,
|
return new URL(scheme, hostname, port,
|
||||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.util.ServletUtil;
|
import org.apache.hadoop.util.ServletUtil;
|
||||||
|
|
||||||
|
@ -72,7 +73,7 @@ public class FileDataServlet extends DfsServlet {
|
||||||
// Add namenode address to the url params
|
// Add namenode address to the url params
|
||||||
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
|
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
|
||||||
getServletContext());
|
getServletContext());
|
||||||
String addr = NameNode.getHostPortString(nn.getNameNodeAddress());
|
String addr = NetUtils.getHostPortString(nn.getNameNodeAddress());
|
||||||
String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
|
String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
|
||||||
|
|
||||||
return new URL(scheme, hostname, port,
|
return new URL(scheme, hostname, port,
|
||||||
|
|
|
@ -294,13 +294,6 @@ public class NameNode {
|
||||||
+ namenode.getHostName()+portString);
|
+ namenode.getHostName()+portString);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Compose a "host:port" string from the address.
|
|
||||||
*/
|
|
||||||
public static String getHostPortString(InetSocketAddress addr) {
|
|
||||||
return addr.getHostName() + ":" + addr.getPort();
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Common NameNode methods implementation for the active name-node role.
|
// Common NameNode methods implementation for the active name-node role.
|
||||||
//
|
//
|
||||||
|
@ -329,7 +322,7 @@ public class NameNode {
|
||||||
*/
|
*/
|
||||||
protected void setRpcServiceServerAddress(Configuration conf,
|
protected void setRpcServiceServerAddress(Configuration conf,
|
||||||
InetSocketAddress serviceRPCAddress) {
|
InetSocketAddress serviceRPCAddress) {
|
||||||
setServiceAddress(conf, getHostPortString(serviceRPCAddress));
|
setServiceAddress(conf, NetUtils.getHostPortString(serviceRPCAddress));
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void setRpcServerAddress(Configuration conf,
|
protected void setRpcServerAddress(Configuration conf,
|
||||||
|
@ -349,7 +342,7 @@ public class NameNode {
|
||||||
|
|
||||||
protected void setHttpServerAddress(Configuration conf) {
|
protected void setHttpServerAddress(Configuration conf) {
|
||||||
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY,
|
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY,
|
||||||
getHostPortString(getHttpAddress()));
|
NetUtils.getHostPortString(getHttpAddress()));
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void loadNamesystem(Configuration conf) throws IOException {
|
protected void loadNamesystem(Configuration conf) throws IOException {
|
||||||
|
@ -362,8 +355,8 @@ public class NameNode {
|
||||||
|
|
||||||
NamenodeRegistration setRegistration() {
|
NamenodeRegistration setRegistration() {
|
||||||
nodeRegistration = new NamenodeRegistration(
|
nodeRegistration = new NamenodeRegistration(
|
||||||
getHostPortString(rpcServer.getRpcAddress()),
|
NetUtils.getHostPortString(rpcServer.getRpcAddress()),
|
||||||
getHostPortString(getHttpAddress()),
|
NetUtils.getHostPortString(getHttpAddress()),
|
||||||
getFSImage().getStorage(), getRole());
|
getFSImage().getStorage(), getRole());
|
||||||
return nodeRegistration;
|
return nodeRegistration;
|
||||||
}
|
}
|
||||||
|
|
|
@ -166,4 +166,12 @@ public interface NameNodeMXBean {
|
||||||
* @return the block pool id
|
* @return the block pool id
|
||||||
*/
|
*/
|
||||||
public String getBlockPoolId();
|
public String getBlockPoolId();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get status information about the directories storing image and edits logs
|
||||||
|
* of the NN.
|
||||||
|
*
|
||||||
|
* @return the name dir status information, as a JSON string.
|
||||||
|
*/
|
||||||
|
public String getNameDirStatuses();
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.net.NodeBase;
|
import org.apache.hadoop.net.NodeBase;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
@ -409,7 +410,7 @@ class NamenodeJspHelper {
|
||||||
nodeToRedirect = nn.getHttpAddress().getHostName();
|
nodeToRedirect = nn.getHttpAddress().getHostName();
|
||||||
redirectPort = nn.getHttpAddress().getPort();
|
redirectPort = nn.getHttpAddress().getPort();
|
||||||
}
|
}
|
||||||
String addr = NameNode.getHostPortString(nn.getNameNodeAddress());
|
String addr = NetUtils.getHostPortString(nn.getNameNodeAddress());
|
||||||
String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName();
|
String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName();
|
||||||
redirectLocation = "http://" + fqdn + ":" + redirectPort
|
redirectLocation = "http://" + fqdn + ":" + redirectPort
|
||||||
+ "/browseDirectory.jsp?namenodeInfoPort="
|
+ "/browseDirectory.jsp?namenodeInfoPort="
|
||||||
|
|
|
@ -215,7 +215,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
@PUT
|
@PUT
|
||||||
@Path("/")
|
@Path("/")
|
||||||
@Consumes({"*/*"})
|
@Consumes({"*/*"})
|
||||||
@Produces({MediaType.APPLICATION_JSON})
|
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
|
||||||
public Response putRoot(
|
public Response putRoot(
|
||||||
@Context final UserGroupInformation ugi,
|
@Context final UserGroupInformation ugi,
|
||||||
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
|
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
|
||||||
|
@ -263,7 +263,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
@PUT
|
@PUT
|
||||||
@Path("{" + UriFsPathParam.NAME + ":.*}")
|
@Path("{" + UriFsPathParam.NAME + ":.*}")
|
||||||
@Consumes({"*/*"})
|
@Consumes({"*/*"})
|
||||||
@Produces({MediaType.APPLICATION_JSON})
|
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
|
||||||
public Response put(
|
public Response put(
|
||||||
@Context final UserGroupInformation ugi,
|
@Context final UserGroupInformation ugi,
|
||||||
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
|
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
|
||||||
|
@ -324,7 +324,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
|
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
|
||||||
fullpath, op.getValue(), -1L,
|
fullpath, op.getValue(), -1L,
|
||||||
permission, overwrite, bufferSize, replication, blockSize);
|
permission, overwrite, bufferSize, replication, blockSize);
|
||||||
return Response.temporaryRedirect(uri).build();
|
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
case MKDIRS:
|
case MKDIRS:
|
||||||
{
|
{
|
||||||
|
@ -336,7 +336,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
{
|
{
|
||||||
np.createSymlink(destination.getValue(), fullpath,
|
np.createSymlink(destination.getValue(), fullpath,
|
||||||
PermissionParam.getDefaultFsPermission(), createParent.getValue());
|
PermissionParam.getDefaultFsPermission(), createParent.getValue());
|
||||||
return Response.ok().type(MediaType.APPLICATION_JSON).build();
|
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
case RENAME:
|
case RENAME:
|
||||||
{
|
{
|
||||||
|
@ -348,7 +348,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
} else {
|
} else {
|
||||||
np.rename2(fullpath, destination.getValue(),
|
np.rename2(fullpath, destination.getValue(),
|
||||||
s.toArray(new Options.Rename[s.size()]));
|
s.toArray(new Options.Rename[s.size()]));
|
||||||
return Response.ok().type(MediaType.APPLICATION_JSON).build();
|
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case SETREPLICATION:
|
case SETREPLICATION:
|
||||||
|
@ -364,17 +364,17 @@ public class NamenodeWebHdfsMethods {
|
||||||
}
|
}
|
||||||
|
|
||||||
np.setOwner(fullpath, owner.getValue(), group.getValue());
|
np.setOwner(fullpath, owner.getValue(), group.getValue());
|
||||||
return Response.ok().type(MediaType.APPLICATION_JSON).build();
|
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
case SETPERMISSION:
|
case SETPERMISSION:
|
||||||
{
|
{
|
||||||
np.setPermission(fullpath, permission.getFsPermission());
|
np.setPermission(fullpath, permission.getFsPermission());
|
||||||
return Response.ok().type(MediaType.APPLICATION_JSON).build();
|
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
case SETTIMES:
|
case SETTIMES:
|
||||||
{
|
{
|
||||||
np.setTimes(fullpath, modificationTime.getValue(), accessTime.getValue());
|
np.setTimes(fullpath, modificationTime.getValue(), accessTime.getValue());
|
||||||
return Response.ok().type(MediaType.APPLICATION_JSON).build();
|
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
case RENEWDELEGATIONTOKEN:
|
case RENEWDELEGATIONTOKEN:
|
||||||
{
|
{
|
||||||
|
@ -389,7 +389,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
|
final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
|
||||||
token.decodeFromUrlString(delegationTokenArgument.getValue());
|
token.decodeFromUrlString(delegationTokenArgument.getValue());
|
||||||
np.cancelDelegationToken(token);
|
np.cancelDelegationToken(token);
|
||||||
return Response.ok().type(MediaType.APPLICATION_JSON).build();
|
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
throw new UnsupportedOperationException(op + " is not supported");
|
throw new UnsupportedOperationException(op + " is not supported");
|
||||||
|
@ -406,7 +406,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
@POST
|
@POST
|
||||||
@Path("/")
|
@Path("/")
|
||||||
@Consumes({"*/*"})
|
@Consumes({"*/*"})
|
||||||
@Produces({MediaType.APPLICATION_JSON})
|
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
|
||||||
public Response postRoot(
|
public Response postRoot(
|
||||||
@Context final UserGroupInformation ugi,
|
@Context final UserGroupInformation ugi,
|
||||||
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
|
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
|
||||||
|
@ -427,7 +427,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
@POST
|
@POST
|
||||||
@Path("{" + UriFsPathParam.NAME + ":.*}")
|
@Path("{" + UriFsPathParam.NAME + ":.*}")
|
||||||
@Consumes({"*/*"})
|
@Consumes({"*/*"})
|
||||||
@Produces({MediaType.APPLICATION_JSON})
|
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
|
||||||
public Response post(
|
public Response post(
|
||||||
@Context final UserGroupInformation ugi,
|
@Context final UserGroupInformation ugi,
|
||||||
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
|
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
|
||||||
|
@ -459,7 +459,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
{
|
{
|
||||||
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
|
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
|
||||||
fullpath, op.getValue(), -1L, bufferSize);
|
fullpath, op.getValue(), -1L, bufferSize);
|
||||||
return Response.temporaryRedirect(uri).build();
|
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
throw new UnsupportedOperationException(op + " is not supported");
|
throw new UnsupportedOperationException(op + " is not supported");
|
||||||
|
@ -542,7 +542,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
{
|
{
|
||||||
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
|
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
|
||||||
fullpath, op.getValue(), offset.getValue(), offset, length, bufferSize);
|
fullpath, op.getValue(), offset.getValue(), offset, length, bufferSize);
|
||||||
return Response.temporaryRedirect(uri).build();
|
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
case GET_BLOCK_LOCATIONS:
|
case GET_BLOCK_LOCATIONS:
|
||||||
{
|
{
|
||||||
|
@ -578,7 +578,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
{
|
{
|
||||||
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
|
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
|
||||||
fullpath, op.getValue(), -1L);
|
fullpath, op.getValue(), -1L);
|
||||||
return Response.temporaryRedirect(uri).build();
|
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
case GETDELEGATIONTOKEN:
|
case GETDELEGATIONTOKEN:
|
||||||
{
|
{
|
||||||
|
|
|
@ -756,10 +756,10 @@ public class MiniDFSCluster {
|
||||||
// After the NN has started, set back the bound ports into
|
// After the NN has started, set back the bound ports into
|
||||||
// the conf
|
// the conf
|
||||||
conf.set(DFSUtil.addKeySuffixes(
|
conf.set(DFSUtil.addKeySuffixes(
|
||||||
DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId), NameNode
|
DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId), NetUtils
|
||||||
.getHostPortString(nn.getNameNodeAddress()));
|
.getHostPortString(nn.getNameNodeAddress()));
|
||||||
conf.set(DFSUtil.addKeySuffixes(
|
conf.set(DFSUtil.addKeySuffixes(
|
||||||
DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NameNode
|
DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NetUtils
|
||||||
.getHostPortString(nn.getHttpAddress()));
|
.getHostPortString(nn.getHttpAddress()));
|
||||||
DFSUtil.setGenericConf(conf, nameserviceId, nnId,
|
DFSUtil.setGenericConf(conf, nameserviceId, nnId,
|
||||||
DFS_NAMENODE_HTTP_ADDRESS_KEY);
|
DFS_NAMENODE_HTTP_ADDRESS_KEY);
|
||||||
|
@ -779,7 +779,7 @@ public class MiniDFSCluster {
|
||||||
*/
|
*/
|
||||||
public URI getURI(int nnIndex) {
|
public URI getURI(int nnIndex) {
|
||||||
InetSocketAddress addr = nameNodes[nnIndex].nameNode.getNameNodeAddress();
|
InetSocketAddress addr = nameNodes[nnIndex].nameNode.getNameNodeAddress();
|
||||||
String hostPort = NameNode.getHostPortString(addr);
|
String hostPort = NetUtils.getHostPortString(addr);
|
||||||
URI uri = null;
|
URI uri = null;
|
||||||
try {
|
try {
|
||||||
uri = new URI("hdfs://" + hostPort);
|
uri = new URI("hdfs://" + hostPort);
|
||||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.util.ServletUtil;
|
import org.apache.hadoop.util.ServletUtil;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
@ -134,7 +135,7 @@ public class TestDatanodeJsp {
|
||||||
Mockito.doReturn("100").when(reqMock).getParameter("chunkSizeToView");
|
Mockito.doReturn("100").when(reqMock).getParameter("chunkSizeToView");
|
||||||
Mockito.doReturn("1").when(reqMock).getParameter("startOffset");
|
Mockito.doReturn("1").when(reqMock).getParameter("startOffset");
|
||||||
Mockito.doReturn("1024").when(reqMock).getParameter("blockSize");
|
Mockito.doReturn("1024").when(reqMock).getParameter("blockSize");
|
||||||
Mockito.doReturn(NameNode.getHostPortString(NameNode.getAddress(CONF)))
|
Mockito.doReturn(NetUtils.getHostPortString(NameNode.getAddress(CONF)))
|
||||||
.when(reqMock).getParameter("nnaddr");
|
.when(reqMock).getParameter("nnaddr");
|
||||||
Mockito.doReturn(testFile.toString()).when(reqMock).getPathInfo();
|
Mockito.doReturn(testFile.toString()).when(reqMock).getPathInfo();
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
|
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -333,7 +334,7 @@ public class TestBackupNode {
|
||||||
InetSocketAddress add = backup.getNameNodeAddress();
|
InetSocketAddress add = backup.getNameNodeAddress();
|
||||||
// Write to BN
|
// Write to BN
|
||||||
FileSystem bnFS = FileSystem.get(new Path("hdfs://"
|
FileSystem bnFS = FileSystem.get(new Path("hdfs://"
|
||||||
+ NameNode.getHostPortString(add)).toUri(), conf);
|
+ NetUtils.getHostPortString(add)).toUri(), conf);
|
||||||
boolean canWrite = true;
|
boolean canWrite = true;
|
||||||
try {
|
try {
|
||||||
TestCheckpoint.writeFile(bnFS, file3, replication);
|
TestCheckpoint.writeFile(bnFS, file3, replication);
|
||||||
|
|
|
@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||||
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
|
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
@ -1433,7 +1434,7 @@ public class TestCheckpoint extends TestCase {
|
||||||
.format(true).build();
|
.format(true).build();
|
||||||
|
|
||||||
NamenodeProtocols nn = cluster.getNameNodeRpc();
|
NamenodeProtocols nn = cluster.getNameNodeRpc();
|
||||||
String fsName = NameNode.getHostPortString(
|
String fsName = NetUtils.getHostPortString(
|
||||||
cluster.getNameNode().getHttpAddress());
|
cluster.getNameNode().getHttpAddress());
|
||||||
|
|
||||||
// Make a finalized log on the server side.
|
// Make a finalized log on the server side.
|
||||||
|
|
|
@ -17,23 +17,33 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import static org.junit.Assert.*;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
import java.lang.management.ManagementFactory;
|
import java.lang.management.ManagementFactory;
|
||||||
|
import java.net.URI;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import javax.management.MBeanServer;
|
import javax.management.MBeanServer;
|
||||||
import javax.management.ObjectName;
|
import javax.management.ObjectName;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.util.VersionInfo;
|
import org.apache.hadoop.util.VersionInfo;
|
||||||
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.mortbay.util.ajax.JSON;
|
||||||
|
|
||||||
import junit.framework.Assert;
|
import junit.framework.Assert;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class for testing {@link NameNodeMXBean} implementation
|
* Class for testing {@link NameNodeMXBean} implementation
|
||||||
*/
|
*/
|
||||||
public class TestNameNodeMXBean {
|
public class TestNameNodeMXBean {
|
||||||
|
@SuppressWarnings({ "unchecked", "deprecation" })
|
||||||
@Test
|
@Test
|
||||||
public void testNameNodeMXBeanInfo() throws Exception {
|
public void testNameNodeMXBeanInfo() throws Exception {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
|
@ -88,8 +98,46 @@ public class TestNameNodeMXBean {
|
||||||
String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
|
String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
|
||||||
"DeadNodes"));
|
"DeadNodes"));
|
||||||
Assert.assertEquals(fsn.getDeadNodes(), deadnodeinfo);
|
Assert.assertEquals(fsn.getDeadNodes(), deadnodeinfo);
|
||||||
|
// get attribute NameDirStatuses
|
||||||
|
String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
|
||||||
|
"NameDirStatuses"));
|
||||||
|
Assert.assertEquals(fsn.getNameDirStatuses(), nameDirStatuses);
|
||||||
|
Map<String, Map<String, String>> statusMap =
|
||||||
|
(Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
|
||||||
|
Collection<URI> nameDirUris = cluster.getNameDirs(0);
|
||||||
|
for (URI nameDirUri : nameDirUris) {
|
||||||
|
File nameDir = new File(nameDirUri);
|
||||||
|
System.out.println("Checking for the presence of " + nameDir +
|
||||||
|
" in active name dirs.");
|
||||||
|
assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath()));
|
||||||
|
}
|
||||||
|
assertEquals(2, statusMap.get("active").size());
|
||||||
|
assertEquals(0, statusMap.get("failed").size());
|
||||||
|
|
||||||
|
// This will cause the first dir to fail.
|
||||||
|
File failedNameDir = new File(nameDirUris.toArray(new URI[0])[0]);
|
||||||
|
assertEquals(0, FileUtil.chmod(failedNameDir.getAbsolutePath(), "000"));
|
||||||
|
cluster.getNameNodeRpc().rollEditLog();
|
||||||
|
|
||||||
|
nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
|
||||||
|
"NameDirStatuses"));
|
||||||
|
statusMap = (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
|
||||||
|
for (URI nameDirUri : nameDirUris) {
|
||||||
|
File nameDir = new File(nameDirUri);
|
||||||
|
String expectedStatus =
|
||||||
|
nameDir.equals(failedNameDir) ? "failed" : "active";
|
||||||
|
System.out.println("Checking for the presence of " + nameDir +
|
||||||
|
" in " + expectedStatus + " name dirs.");
|
||||||
|
assertTrue(statusMap.get(expectedStatus).containsKey(
|
||||||
|
nameDir.getAbsolutePath()));
|
||||||
|
}
|
||||||
|
assertEquals(1, statusMap.get("active").size());
|
||||||
|
assertEquals(1, statusMap.get("failed").size());
|
||||||
} finally {
|
} finally {
|
||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
|
for (URI dir : cluster.getNameDirs(0)) {
|
||||||
|
FileUtil.chmod(new File(dir).toString(), "700");
|
||||||
|
}
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.DFSInputStream;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
import org.mortbay.jetty.InclusiveByteRange;
|
import org.mortbay.jetty.InclusiveByteRange;
|
||||||
|
@ -263,7 +264,7 @@ public class TestStreamFile {
|
||||||
|
|
||||||
Mockito.doReturn(CONF).when(mockServletContext).getAttribute(
|
Mockito.doReturn(CONF).when(mockServletContext).getAttribute(
|
||||||
JspHelper.CURRENT_CONF);
|
JspHelper.CURRENT_CONF);
|
||||||
Mockito.doReturn(NameNode.getHostPortString(NameNode.getAddress(CONF)))
|
Mockito.doReturn(NetUtils.getHostPortString(NameNode.getAddress(CONF)))
|
||||||
.when(mockHttpServletRequest).getParameter("nnaddr");
|
.when(mockHttpServletRequest).getParameter("nnaddr");
|
||||||
Mockito.doReturn(testFile.toString()).when(mockHttpServletRequest)
|
Mockito.doReturn(testFile.toString()).when(mockHttpServletRequest)
|
||||||
.getPathInfo();
|
.getPathInfo();
|
||||||
|
|
|
@ -27,6 +27,7 @@ import java.util.List;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
@ -54,7 +55,7 @@ public class TestTransferFsImage {
|
||||||
new File("/xxxxx-does-not-exist/blah"));
|
new File("/xxxxx-does-not-exist/blah"));
|
||||||
|
|
||||||
try {
|
try {
|
||||||
String fsName = NameNode.getHostPortString(
|
String fsName = NetUtils.getHostPortString(
|
||||||
cluster.getNameNode().getHttpAddress());
|
cluster.getNameNode().getHttpAddress());
|
||||||
String id = "getimage=1&txid=0";
|
String id = "getimage=1&txid=0";
|
||||||
|
|
||||||
|
@ -86,7 +87,7 @@ public class TestTransferFsImage {
|
||||||
);
|
);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
String fsName = NameNode.getHostPortString(
|
String fsName = NetUtils.getHostPortString(
|
||||||
cluster.getNameNode().getHttpAddress());
|
cluster.getNameNode().getHttpAddress());
|
||||||
String id = "getimage=1&txid=0";
|
String id = "getimage=1&txid=0";
|
||||||
|
|
||||||
|
|
|
@ -103,12 +103,6 @@ public class TestNameNodeMetrics {
|
||||||
DFSTestUtil.createFile(fs, file, fileLen, replicas, rand.nextLong());
|
DFSTestUtil.createFile(fs, file, fileLen, replicas, rand.nextLong());
|
||||||
}
|
}
|
||||||
|
|
||||||
private void updateMetrics() throws Exception {
|
|
||||||
// Wait for metrics update (corresponds to dfs.namenode.replication.interval
|
|
||||||
// for some block related metrics to get updated)
|
|
||||||
Thread.sleep(1000);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void readFile(FileSystem fileSys,Path name) throws IOException {
|
private void readFile(FileSystem fileSys,Path name) throws IOException {
|
||||||
//Just read file so that getNumBlockLocations are incremented
|
//Just read file so that getNumBlockLocations are incremented
|
||||||
DataInputStream stm = fileSys.open(name);
|
DataInputStream stm = fileSys.open(name);
|
||||||
|
@ -125,7 +119,6 @@ public class TestNameNodeMetrics {
|
||||||
createFile(file, 3200, (short)3);
|
createFile(file, 3200, (short)3);
|
||||||
final long blockCount = 32;
|
final long blockCount = 32;
|
||||||
int blockCapacity = namesystem.getBlockCapacity();
|
int blockCapacity = namesystem.getBlockCapacity();
|
||||||
updateMetrics();
|
|
||||||
assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS));
|
assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS));
|
||||||
|
|
||||||
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
|
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
|
||||||
|
@ -140,7 +133,6 @@ public class TestNameNodeMetrics {
|
||||||
while (threshold < blockCount) {
|
while (threshold < blockCount) {
|
||||||
blockCapacity <<= 1;
|
blockCapacity <<= 1;
|
||||||
}
|
}
|
||||||
updateMetrics();
|
|
||||||
long filesTotal = file.depth() + 1; // Add 1 for root
|
long filesTotal = file.depth() + 1; // Add 1 for root
|
||||||
rb = getMetrics(NS_METRICS);
|
rb = getMetrics(NS_METRICS);
|
||||||
assertGauge("FilesTotal", filesTotal, rb);
|
assertGauge("FilesTotal", filesTotal, rb);
|
||||||
|
@ -150,7 +142,6 @@ public class TestNameNodeMetrics {
|
||||||
filesTotal--; // reduce the filecount for deleted file
|
filesTotal--; // reduce the filecount for deleted file
|
||||||
|
|
||||||
waitForDeletion();
|
waitForDeletion();
|
||||||
updateMetrics();
|
|
||||||
rb = getMetrics(NS_METRICS);
|
rb = getMetrics(NS_METRICS);
|
||||||
assertGauge("FilesTotal", filesTotal, rb);
|
assertGauge("FilesTotal", filesTotal, rb);
|
||||||
assertGauge("BlocksTotal", 0L, rb);
|
assertGauge("BlocksTotal", 0L, rb);
|
||||||
|
@ -179,7 +170,7 @@ public class TestNameNodeMetrics {
|
||||||
} finally {
|
} finally {
|
||||||
cluster.getNamesystem().writeUnlock();
|
cluster.getNamesystem().writeUnlock();
|
||||||
}
|
}
|
||||||
updateMetrics();
|
Thread.sleep(1000); // Wait for block to be marked corrupt
|
||||||
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
|
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
|
||||||
assertGauge("CorruptBlocks", 1L, rb);
|
assertGauge("CorruptBlocks", 1L, rb);
|
||||||
assertGauge("PendingReplicationBlocks", 1L, rb);
|
assertGauge("PendingReplicationBlocks", 1L, rb);
|
||||||
|
@ -201,7 +192,6 @@ public class TestNameNodeMetrics {
|
||||||
createFile(file, 100, (short)2);
|
createFile(file, 100, (short)2);
|
||||||
long totalBlocks = 1;
|
long totalBlocks = 1;
|
||||||
NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1);
|
NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1);
|
||||||
updateMetrics();
|
|
||||||
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
|
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
|
||||||
assertGauge("ExcessBlocks", totalBlocks, rb);
|
assertGauge("ExcessBlocks", totalBlocks, rb);
|
||||||
fs.delete(file, true);
|
fs.delete(file, true);
|
||||||
|
@ -224,7 +214,7 @@ public class TestNameNodeMetrics {
|
||||||
} finally {
|
} finally {
|
||||||
cluster.getNamesystem().writeUnlock();
|
cluster.getNamesystem().writeUnlock();
|
||||||
}
|
}
|
||||||
updateMetrics();
|
Thread.sleep(1000); // Wait for block to be marked corrupt
|
||||||
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
|
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
|
||||||
assertGauge("UnderReplicatedBlocks", 1L, rb);
|
assertGauge("UnderReplicatedBlocks", 1L, rb);
|
||||||
assertGauge("MissingBlocks", 1L, rb);
|
assertGauge("MissingBlocks", 1L, rb);
|
||||||
|
@ -246,7 +236,6 @@ public class TestNameNodeMetrics {
|
||||||
Path target = getTestPath("target");
|
Path target = getTestPath("target");
|
||||||
createFile(target, 100, (short)1);
|
createFile(target, 100, (short)1);
|
||||||
fs.rename(src, target, Rename.OVERWRITE);
|
fs.rename(src, target, Rename.OVERWRITE);
|
||||||
updateMetrics();
|
|
||||||
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
|
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
|
||||||
assertCounter("FilesRenamed", 1L, rb);
|
assertCounter("FilesRenamed", 1L, rb);
|
||||||
assertCounter("FilesDeleted", 1L, rb);
|
assertCounter("FilesDeleted", 1L, rb);
|
||||||
|
@ -274,7 +263,6 @@ public class TestNameNodeMetrics {
|
||||||
|
|
||||||
//Perform create file operation
|
//Perform create file operation
|
||||||
createFile(file1_Path,100,(short)2);
|
createFile(file1_Path,100,(short)2);
|
||||||
updateMetrics();
|
|
||||||
|
|
||||||
//Create file does not change numGetBlockLocations metric
|
//Create file does not change numGetBlockLocations metric
|
||||||
//expect numGetBlockLocations = 0 for previous and current interval
|
//expect numGetBlockLocations = 0 for previous and current interval
|
||||||
|
@ -283,14 +271,12 @@ public class TestNameNodeMetrics {
|
||||||
// Open and read file operation increments GetBlockLocations
|
// Open and read file operation increments GetBlockLocations
|
||||||
// Perform read file operation on earlier created file
|
// Perform read file operation on earlier created file
|
||||||
readFile(fs, file1_Path);
|
readFile(fs, file1_Path);
|
||||||
updateMetrics();
|
|
||||||
// Verify read file operation has incremented numGetBlockLocations by 1
|
// Verify read file operation has incremented numGetBlockLocations by 1
|
||||||
assertCounter("GetBlockLocations", 1L, getMetrics(NN_METRICS));
|
assertCounter("GetBlockLocations", 1L, getMetrics(NN_METRICS));
|
||||||
|
|
||||||
// opening and reading file twice will increment numGetBlockLocations by 2
|
// opening and reading file twice will increment numGetBlockLocations by 2
|
||||||
readFile(fs, file1_Path);
|
readFile(fs, file1_Path);
|
||||||
readFile(fs, file1_Path);
|
readFile(fs, file1_Path);
|
||||||
updateMetrics();
|
|
||||||
assertCounter("GetBlockLocations", 3L, getMetrics(NN_METRICS));
|
assertCounter("GetBlockLocations", 3L, getMetrics(NN_METRICS));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -308,7 +294,6 @@ public class TestNameNodeMetrics {
|
||||||
assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
|
assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
|
||||||
|
|
||||||
fs.mkdirs(new Path(TEST_ROOT_DIR_PATH, "/tmp"));
|
fs.mkdirs(new Path(TEST_ROOT_DIR_PATH, "/tmp"));
|
||||||
updateMetrics();
|
|
||||||
|
|
||||||
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
|
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
|
||||||
assertGauge("LastWrittenTransactionId", 2L, getMetrics(NS_METRICS));
|
assertGauge("LastWrittenTransactionId", 2L, getMetrics(NS_METRICS));
|
||||||
|
@ -316,7 +301,6 @@ public class TestNameNodeMetrics {
|
||||||
assertGauge("TransactionsSinceLastLogRoll", 2L, getMetrics(NS_METRICS));
|
assertGauge("TransactionsSinceLastLogRoll", 2L, getMetrics(NS_METRICS));
|
||||||
|
|
||||||
cluster.getNameNodeRpc().rollEditLog();
|
cluster.getNameNodeRpc().rollEditLog();
|
||||||
updateMetrics();
|
|
||||||
|
|
||||||
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
|
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
|
||||||
assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
|
assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
|
||||||
|
@ -326,7 +310,6 @@ public class TestNameNodeMetrics {
|
||||||
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||||
cluster.getNameNodeRpc().saveNamespace();
|
cluster.getNameNodeRpc().saveNamespace();
|
||||||
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||||
updateMetrics();
|
|
||||||
|
|
||||||
long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
|
long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
|
||||||
getMetrics(NS_METRICS));
|
getMetrics(NS_METRICS));
|
||||||
|
|
|
@ -35,10 +35,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
|
import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.hdfs.tools.GetConf;
|
import org.apache.hadoop.hdfs.tools.GetConf;
|
||||||
import org.apache.hadoop.hdfs.tools.GetConf.Command;
|
import org.apache.hadoop.hdfs.tools.GetConf.Command;
|
||||||
import org.apache.hadoop.hdfs.tools.GetConf.CommandHandler;
|
import org.apache.hadoop.hdfs.tools.GetConf.CommandHandler;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ public class TestGetConf {
|
||||||
private String[] toStringArray(List<ConfiguredNNAddress> list) {
|
private String[] toStringArray(List<ConfiguredNNAddress> list) {
|
||||||
String[] ret = new String[list.size()];
|
String[] ret = new String[list.size()];
|
||||||
for (int i = 0; i < list.size(); i++) {
|
for (int i = 0; i < list.size(); i++) {
|
||||||
ret[i] = NameNode.getHostPortString(list.get(i).getAddress());
|
ret[i] = NetUtils.getHostPortString(list.get(i).getAddress());
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,6 +27,7 @@ import java.net.URL;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import javax.servlet.http.HttpServletResponse;
|
import javax.servlet.http.HttpServletResponse;
|
||||||
|
import javax.ws.rs.core.MediaType;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.BlockLocation;
|
import org.apache.hadoop.fs.BlockLocation;
|
||||||
|
@ -314,6 +315,8 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
|
||||||
conn.setRequestMethod(op.getType().toString());
|
conn.setRequestMethod(op.getType().toString());
|
||||||
conn.connect();
|
conn.connect();
|
||||||
assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
|
assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
|
||||||
|
assertEquals(0, conn.getContentLength());
|
||||||
|
assertEquals(MediaType.APPLICATION_OCTET_STREAM, conn.getContentType());
|
||||||
assertEquals((short)0755, webhdfs.getFileStatus(dir).getPermission().toShort());
|
assertEquals((short)0755, webhdfs.getFileStatus(dir).getPermission().toShort());
|
||||||
conn.disconnect();
|
conn.disconnect();
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,6 +14,7 @@ Trunk (unreleased changes)
|
||||||
(Plamen Jeliazkov via shv)
|
(Plamen Jeliazkov via shv)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
||||||
MAPREDUCE-3787. [Gridmix] Optimize job monitoring and STRESS mode for
|
MAPREDUCE-3787. [Gridmix] Optimize job monitoring and STRESS mode for
|
||||||
faster job submission. (amarrk)
|
faster job submission. (amarrk)
|
||||||
|
|
||||||
|
@ -92,10 +93,23 @@ Trunk (unreleased changes)
|
||||||
MAPREDUCE-3818. Fixed broken compilation in TestSubmitJob after the patch
|
MAPREDUCE-3818. Fixed broken compilation in TestSubmitJob after the patch
|
||||||
for HDFS-2895. (Suresh Srinivas via vinodkv)
|
for HDFS-2895. (Suresh Srinivas via vinodkv)
|
||||||
|
|
||||||
Release 0.23.2 - UNRELEASED
|
Release 0.23.3 - UNRELEASED
|
||||||
|
|
||||||
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
NEW FEATURES
|
NEW FEATURES
|
||||||
|
|
||||||
|
IMPROVEMENTS
|
||||||
|
|
||||||
|
OPTIMIZATIONS
|
||||||
|
|
||||||
|
BUG FIXES
|
||||||
|
|
||||||
|
Release 0.23.2 - UNRELEASED
|
||||||
|
|
||||||
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
|
NEW FEATURES
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
||||||
MAPREDUCE-3849. Change TokenCache's reading of the binary token file
|
MAPREDUCE-3849. Change TokenCache's reading of the binary token file
|
||||||
|
@ -107,9 +121,20 @@ Release 0.23.2 - UNRELEASED
|
||||||
MAPREDUCE-3877 Add a test to formalise the current state transitions
|
MAPREDUCE-3877 Add a test to formalise the current state transitions
|
||||||
of the yarn lifecycle. (stevel)
|
of the yarn lifecycle. (stevel)
|
||||||
|
|
||||||
|
MAPREDUCE-3866. Fixed the bin/yarn script to not print the command line
|
||||||
|
unnecessarily. (vinodkv)
|
||||||
|
|
||||||
|
MAPREDUCE-3730. Modified RM to allow restarted NMs to be able to join the
|
||||||
|
cluster without waiting for expiry. (Jason Lowe via vinodkv)
|
||||||
|
|
||||||
|
MAPREDUCE-2793. Corrected AppIDs, JobIDs, TaskAttemptIDs to be of correct
|
||||||
|
format on the web pages. (Bikas Saha via vinodkv)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
MAPREDUCE-3918 proc_historyserver no longer in command line arguments for
|
||||||
|
HistoryServer (Jon Eagles via bobby)
|
||||||
|
|
||||||
MAPREDUCE-3862. Nodemanager can appear to hang on shutdown due to lingering
|
MAPREDUCE-3862. Nodemanager can appear to hang on shutdown due to lingering
|
||||||
DeletionService threads (Jason Lowe via bobby)
|
DeletionService threads (Jason Lowe via bobby)
|
||||||
|
@ -142,6 +167,10 @@ Release 0.23.2 - UNRELEASED
|
||||||
|
|
||||||
MAPREDUCE-3738. MM can hang during shutdown if AppLogAggregatorImpl thread
|
MAPREDUCE-3738. MM can hang during shutdown if AppLogAggregatorImpl thread
|
||||||
dies unexpectedly (Jason Lowe via sseth)
|
dies unexpectedly (Jason Lowe via sseth)
|
||||||
|
|
||||||
|
MAPREDUCE-3904 Job history produced with mapreduce.cluster.acls.enabled
|
||||||
|
false can not be viewed with mapreduce.cluster.acls.enabled true
|
||||||
|
(Jonathon Eagles via tgraves)
|
||||||
|
|
||||||
Release 0.23.1 - 2012-02-17
|
Release 0.23.1 - 2012-02-17
|
||||||
|
|
||||||
|
|
|
@ -136,4 +136,4 @@ fi
|
||||||
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
|
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
|
||||||
|
|
||||||
export CLASSPATH
|
export CLASSPATH
|
||||||
exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
|
exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
|
||||||
|
|
|
@ -438,6 +438,9 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
||||||
public boolean checkAccess(UserGroupInformation callerUGI,
|
public boolean checkAccess(UserGroupInformation callerUGI,
|
||||||
JobACL jobOperation) {
|
JobACL jobOperation) {
|
||||||
AccessControlList jobACL = jobACLs.get(jobOperation);
|
AccessControlList jobACL = jobACLs.get(jobOperation);
|
||||||
|
if (jobACL == null) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
return aclsManager.checkAccess(callerUGI, jobOperation, username, jobACL);
|
return aclsManager.checkAccess(callerUGI, jobOperation, username, jobACL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -99,6 +99,14 @@ public class AMWebServices {
|
||||||
try {
|
try {
|
||||||
jobId = MRApps.toJobID(jid);
|
jobId = MRApps.toJobID(jid);
|
||||||
} catch (YarnException e) {
|
} catch (YarnException e) {
|
||||||
|
// TODO: after MAPREDUCE-2793 YarnException is probably not expected here
|
||||||
|
// anymore but keeping it for now just in case other stuff starts failing.
|
||||||
|
// Also, the webservice should ideally return BadRequest (HTTP:400) when
|
||||||
|
// the id is malformed instead of NotFound (HTTP:404). The webserver on
|
||||||
|
// top of which AMWebServices is built seems to automatically do that for
|
||||||
|
// unhandled exceptions
|
||||||
|
throw new NotFoundException(e.getMessage());
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
throw new NotFoundException(e.getMessage());
|
throw new NotFoundException(e.getMessage());
|
||||||
}
|
}
|
||||||
if (jobId == null) {
|
if (jobId == null) {
|
||||||
|
@ -121,10 +129,18 @@ public class AMWebServices {
|
||||||
try {
|
try {
|
||||||
taskID = MRApps.toTaskID(tid);
|
taskID = MRApps.toTaskID(tid);
|
||||||
} catch (YarnException e) {
|
} catch (YarnException e) {
|
||||||
|
// TODO: after MAPREDUCE-2793 YarnException is probably not expected here
|
||||||
|
// anymore but keeping it for now just in case other stuff starts failing.
|
||||||
|
// Also, the webservice should ideally return BadRequest (HTTP:400) when
|
||||||
|
// the id is malformed instead of NotFound (HTTP:404). The webserver on
|
||||||
|
// top of which AMWebServices is built seems to automatically do that for
|
||||||
|
// unhandled exceptions
|
||||||
throw new NotFoundException(e.getMessage());
|
throw new NotFoundException(e.getMessage());
|
||||||
} catch (NumberFormatException ne) {
|
} catch (NumberFormatException ne) {
|
||||||
throw new NotFoundException(ne.getMessage());
|
throw new NotFoundException(ne.getMessage());
|
||||||
}
|
} catch (IllegalArgumentException e) {
|
||||||
|
throw new NotFoundException(e.getMessage());
|
||||||
|
}
|
||||||
if (taskID == null) {
|
if (taskID == null) {
|
||||||
throw new NotFoundException("taskid " + tid + " not found or invalid");
|
throw new NotFoundException("taskid " + tid + " not found or invalid");
|
||||||
}
|
}
|
||||||
|
@ -146,9 +162,17 @@ public class AMWebServices {
|
||||||
try {
|
try {
|
||||||
attemptId = MRApps.toTaskAttemptID(attId);
|
attemptId = MRApps.toTaskAttemptID(attId);
|
||||||
} catch (YarnException e) {
|
} catch (YarnException e) {
|
||||||
|
// TODO: after MAPREDUCE-2793 YarnException is probably not expected here
|
||||||
|
// anymore but keeping it for now just in case other stuff starts failing.
|
||||||
|
// Also, the webservice should ideally return BadRequest (HTTP:400) when
|
||||||
|
// the id is malformed instead of NotFound (HTTP:404). The webserver on
|
||||||
|
// top of which AMWebServices is built seems to automatically do that for
|
||||||
|
// unhandled exceptions
|
||||||
throw new NotFoundException(e.getMessage());
|
throw new NotFoundException(e.getMessage());
|
||||||
} catch (NumberFormatException ne) {
|
} catch (NumberFormatException ne) {
|
||||||
throw new NotFoundException(ne.getMessage());
|
throw new NotFoundException(ne.getMessage());
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
throw new NotFoundException(e.getMessage());
|
||||||
}
|
}
|
||||||
if (attemptId == null) {
|
if (attemptId == null) {
|
||||||
throw new NotFoundException("task attempt id " + attId
|
throw new NotFoundException("task attempt id " + attId
|
||||||
|
|
|
@ -106,6 +106,20 @@ public class MockJobs extends MockApps {
|
||||||
return newAppName();
|
return newAppName();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create numJobs in a map with jobs having appId==jobId
|
||||||
|
*/
|
||||||
|
public static Map<JobId, Job> newJobs(int numJobs, int numTasksPerJob,
|
||||||
|
int numAttemptsPerTask) {
|
||||||
|
Map<JobId, Job> map = Maps.newHashMap();
|
||||||
|
for (int j = 0; j < numJobs; ++j) {
|
||||||
|
ApplicationId appID = MockJobs.newAppID(j);
|
||||||
|
Job job = newJob(appID, j, numTasksPerJob, numAttemptsPerTask);
|
||||||
|
map.put(job.getID(), job);
|
||||||
|
}
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
|
||||||
public static Map<JobId, Job> newJobs(ApplicationId appID, int numJobsPerApp,
|
public static Map<JobId, Job> newJobs(ApplicationId appID, int numJobsPerApp,
|
||||||
int numTasksPerJob, int numAttemptsPerTask) {
|
int numTasksPerJob, int numAttemptsPerTask) {
|
||||||
Map<JobId, Job> map = Maps.newHashMap();
|
Map<JobId, Job> map = Maps.newHashMap();
|
||||||
|
|
|
@ -191,5 +191,16 @@ public class TestJobImpl {
|
||||||
null, null, null, true, null, 0, null);
|
null, null, null, true, null, 0, null);
|
||||||
Assert.assertTrue(job4.checkAccess(ugi1, JobACL.VIEW_JOB));
|
Assert.assertTrue(job4.checkAccess(ugi1, JobACL.VIEW_JOB));
|
||||||
Assert.assertTrue(job4.checkAccess(ugi2, JobACL.VIEW_JOB));
|
Assert.assertTrue(job4.checkAccess(ugi2, JobACL.VIEW_JOB));
|
||||||
|
|
||||||
|
// Setup configuration access without security enabled
|
||||||
|
Configuration conf5 = new Configuration();
|
||||||
|
conf5.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
|
||||||
|
conf5.set(MRJobConfig.JOB_ACL_VIEW_JOB, "");
|
||||||
|
|
||||||
|
// Verify access
|
||||||
|
JobImpl job5 = new JobImpl(jobId, null, conf5, null, null, null, null, null,
|
||||||
|
null, null, null, true, null, 0, null);
|
||||||
|
Assert.assertTrue(job5.checkAccess(ugi1, null));
|
||||||
|
Assert.assertTrue(job5.checkAccess(ugi2, null));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -396,36 +396,36 @@ public class TestAMWebServicesAttempts extends JerseyTest {
|
||||||
public void testTaskAttemptIdBogus() throws JSONException, Exception {
|
public void testTaskAttemptIdBogus() throws JSONException, Exception {
|
||||||
|
|
||||||
testTaskAttemptIdErrorGeneric("bogusid",
|
testTaskAttemptIdErrorGeneric("bogusid",
|
||||||
"java.lang.Exception: Error parsing attempt ID: bogusid");
|
"java.lang.Exception: TaskAttemptId string : bogusid is not properly formed");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testTaskAttemptIdNonExist() throws JSONException, Exception {
|
public void testTaskAttemptIdNonExist() throws JSONException, Exception {
|
||||||
|
|
||||||
testTaskAttemptIdErrorGeneric(
|
testTaskAttemptIdErrorGeneric(
|
||||||
"attempt_12345_0_0_r_1_0",
|
"attempt_0_12345_m_000000_0",
|
||||||
"java.lang.Exception: Error getting info on task attempt id attempt_12345_0_0_r_1_0");
|
"java.lang.Exception: Error getting info on task attempt id attempt_0_12345_m_000000_0");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testTaskAttemptIdInvalid() throws JSONException, Exception {
|
public void testTaskAttemptIdInvalid() throws JSONException, Exception {
|
||||||
|
|
||||||
testTaskAttemptIdErrorGeneric("attempt_12345_0_0_d_1_0",
|
testTaskAttemptIdErrorGeneric("attempt_0_12345_d_000000_0",
|
||||||
"java.lang.Exception: Unknown task symbol: d");
|
"java.lang.Exception: Bad TaskType identifier. TaskAttemptId string : attempt_0_12345_d_000000_0 is not properly formed.");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testTaskAttemptIdInvalid2() throws JSONException, Exception {
|
public void testTaskAttemptIdInvalid2() throws JSONException, Exception {
|
||||||
|
|
||||||
testTaskAttemptIdErrorGeneric("attempt_12345_0_r_1_0",
|
testTaskAttemptIdErrorGeneric("attempt_12345_m_000000_0",
|
||||||
"java.lang.Exception: For input string: \"r\"");
|
"java.lang.Exception: TaskAttemptId string : attempt_12345_m_000000_0 is not properly formed");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testTaskAttemptIdInvalid3() throws JSONException, Exception {
|
public void testTaskAttemptIdInvalid3() throws JSONException, Exception {
|
||||||
|
|
||||||
testTaskAttemptIdErrorGeneric("attempt_12345_0_0_r_1",
|
testTaskAttemptIdErrorGeneric("attempt_0_12345_m_000000",
|
||||||
"java.lang.Exception: Error parsing attempt ID: attempt_12345_0_0_r_1");
|
"java.lang.Exception: TaskAttemptId string : attempt_0_12345_m_000000 is not properly formed");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testTaskAttemptIdErrorGeneric(String attid, String error)
|
private void testTaskAttemptIdErrorGeneric(String attid, String error)
|
||||||
|
|
|
@ -320,7 +320,7 @@ public class TestAMWebServicesJobs extends JerseyTest {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
r.path("ws").path("v1").path("mapreduce").path("jobs")
|
r.path("ws").path("v1").path("mapreduce").path("jobs")
|
||||||
.path("job_1234_1_2").get(JSONObject.class);
|
.path("job_0_1234").get(JSONObject.class);
|
||||||
fail("should have thrown exception on invalid uri");
|
fail("should have thrown exception on invalid uri");
|
||||||
} catch (UniformInterfaceException ue) {
|
} catch (UniformInterfaceException ue) {
|
||||||
ClientResponse response = ue.getResponse();
|
ClientResponse response = ue.getResponse();
|
||||||
|
@ -333,7 +333,7 @@ public class TestAMWebServicesJobs extends JerseyTest {
|
||||||
String type = exception.getString("exception");
|
String type = exception.getString("exception");
|
||||||
String classname = exception.getString("javaClassName");
|
String classname = exception.getString("javaClassName");
|
||||||
WebServicesTestUtils.checkStringMatch("exception message",
|
WebServicesTestUtils.checkStringMatch("exception message",
|
||||||
"java.lang.Exception: job, job_1234_1_2, is not found", message);
|
"java.lang.Exception: job, job_0_1234, is not found", message);
|
||||||
WebServicesTestUtils.checkStringMatch("exception type",
|
WebServicesTestUtils.checkStringMatch("exception type",
|
||||||
"NotFoundException", type);
|
"NotFoundException", type);
|
||||||
WebServicesTestUtils.checkStringMatch("exception classname",
|
WebServicesTestUtils.checkStringMatch("exception classname",
|
||||||
|
@ -351,7 +351,7 @@ public class TestAMWebServicesJobs extends JerseyTest {
|
||||||
fail("should have thrown exception on invalid uri");
|
fail("should have thrown exception on invalid uri");
|
||||||
} catch (UniformInterfaceException ue) {
|
} catch (UniformInterfaceException ue) {
|
||||||
ClientResponse response = ue.getResponse();
|
ClientResponse response = ue.getResponse();
|
||||||
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
|
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
|
||||||
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
|
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
|
||||||
JSONObject msg = response.getEntity(JSONObject.class);
|
JSONObject msg = response.getEntity(JSONObject.class);
|
||||||
JSONObject exception = msg.getJSONObject("RemoteException");
|
JSONObject exception = msg.getJSONObject("RemoteException");
|
||||||
|
@ -374,7 +374,7 @@ public class TestAMWebServicesJobs extends JerseyTest {
|
||||||
fail("should have thrown exception on invalid uri");
|
fail("should have thrown exception on invalid uri");
|
||||||
} catch (UniformInterfaceException ue) {
|
} catch (UniformInterfaceException ue) {
|
||||||
ClientResponse response = ue.getResponse();
|
ClientResponse response = ue.getResponse();
|
||||||
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
|
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
|
||||||
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
|
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
|
||||||
JSONObject msg = response.getEntity(JSONObject.class);
|
JSONObject msg = response.getEntity(JSONObject.class);
|
||||||
JSONObject exception = msg.getJSONObject("RemoteException");
|
JSONObject exception = msg.getJSONObject("RemoteException");
|
||||||
|
@ -397,7 +397,7 @@ public class TestAMWebServicesJobs extends JerseyTest {
|
||||||
fail("should have thrown exception on invalid uri");
|
fail("should have thrown exception on invalid uri");
|
||||||
} catch (UniformInterfaceException ue) {
|
} catch (UniformInterfaceException ue) {
|
||||||
ClientResponse response = ue.getResponse();
|
ClientResponse response = ue.getResponse();
|
||||||
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
|
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
|
||||||
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
|
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
|
||||||
String msg = response.getEntity(String.class);
|
String msg = response.getEntity(String.class);
|
||||||
System.out.println(msg);
|
System.out.println(msg);
|
||||||
|
@ -418,11 +418,12 @@ public class TestAMWebServicesJobs extends JerseyTest {
|
||||||
|
|
||||||
private void verifyJobIdInvalid(String message, String type, String classname) {
|
private void verifyJobIdInvalid(String message, String type, String classname) {
|
||||||
WebServicesTestUtils.checkStringMatch("exception message",
|
WebServicesTestUtils.checkStringMatch("exception message",
|
||||||
"For input string: \"foo\"", message);
|
"java.lang.Exception: JobId string : job_foo is not properly formed",
|
||||||
|
message);
|
||||||
WebServicesTestUtils.checkStringMatch("exception type",
|
WebServicesTestUtils.checkStringMatch("exception type",
|
||||||
"NumberFormatException", type);
|
"NotFoundException", type);
|
||||||
WebServicesTestUtils.checkStringMatch("exception classname",
|
WebServicesTestUtils.checkStringMatch("exception classname",
|
||||||
"java.lang.NumberFormatException", classname);
|
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -443,8 +444,11 @@ public class TestAMWebServicesJobs extends JerseyTest {
|
||||||
String message = exception.getString("message");
|
String message = exception.getString("message");
|
||||||
String type = exception.getString("exception");
|
String type = exception.getString("exception");
|
||||||
String classname = exception.getString("javaClassName");
|
String classname = exception.getString("javaClassName");
|
||||||
WebServicesTestUtils.checkStringMatch("exception message",
|
WebServicesTestUtils
|
||||||
"java.lang.Exception: Error parsing job ID: bogusfoo", message);
|
.checkStringMatch(
|
||||||
|
"exception message",
|
||||||
|
"java.lang.Exception: JobId string : bogusfoo is not properly formed",
|
||||||
|
message);
|
||||||
WebServicesTestUtils.checkStringMatch("exception type",
|
WebServicesTestUtils.checkStringMatch("exception type",
|
||||||
"NotFoundException", type);
|
"NotFoundException", type);
|
||||||
WebServicesTestUtils.checkStringMatch("exception classname",
|
WebServicesTestUtils.checkStringMatch("exception classname",
|
||||||
|
|
|
@ -424,7 +424,8 @@ public class TestAMWebServicesTasks extends JerseyTest {
|
||||||
String type = exception.getString("exception");
|
String type = exception.getString("exception");
|
||||||
String classname = exception.getString("javaClassName");
|
String classname = exception.getString("javaClassName");
|
||||||
WebServicesTestUtils.checkStringMatch("exception message",
|
WebServicesTestUtils.checkStringMatch("exception message",
|
||||||
"java.lang.Exception: Error parsing task ID: bogustaskid", message);
|
"java.lang.Exception: TaskId string : "
|
||||||
|
+ "bogustaskid is not properly formed", message);
|
||||||
WebServicesTestUtils.checkStringMatch("exception type",
|
WebServicesTestUtils.checkStringMatch("exception type",
|
||||||
"NotFoundException", type);
|
"NotFoundException", type);
|
||||||
WebServicesTestUtils.checkStringMatch("exception classname",
|
WebServicesTestUtils.checkStringMatch("exception classname",
|
||||||
|
@ -439,7 +440,7 @@ public class TestAMWebServicesTasks extends JerseyTest {
|
||||||
Map<JobId, Job> jobsMap = appContext.getAllJobs();
|
Map<JobId, Job> jobsMap = appContext.getAllJobs();
|
||||||
for (JobId id : jobsMap.keySet()) {
|
for (JobId id : jobsMap.keySet()) {
|
||||||
String jobId = MRApps.toString(id);
|
String jobId = MRApps.toString(id);
|
||||||
String tid = "task_1234_0_0_m_0";
|
String tid = "task_0_0000_m_000000";
|
||||||
try {
|
try {
|
||||||
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
|
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
|
||||||
.path("tasks").path(tid).get(JSONObject.class);
|
.path("tasks").path(tid).get(JSONObject.class);
|
||||||
|
@ -455,7 +456,7 @@ public class TestAMWebServicesTasks extends JerseyTest {
|
||||||
String type = exception.getString("exception");
|
String type = exception.getString("exception");
|
||||||
String classname = exception.getString("javaClassName");
|
String classname = exception.getString("javaClassName");
|
||||||
WebServicesTestUtils.checkStringMatch("exception message",
|
WebServicesTestUtils.checkStringMatch("exception message",
|
||||||
"java.lang.Exception: task not found with id task_1234_0_0_m_0",
|
"java.lang.Exception: task not found with id task_0_0000_m_000000",
|
||||||
message);
|
message);
|
||||||
WebServicesTestUtils.checkStringMatch("exception type",
|
WebServicesTestUtils.checkStringMatch("exception type",
|
||||||
"NotFoundException", type);
|
"NotFoundException", type);
|
||||||
|
@ -471,7 +472,7 @@ public class TestAMWebServicesTasks extends JerseyTest {
|
||||||
Map<JobId, Job> jobsMap = appContext.getAllJobs();
|
Map<JobId, Job> jobsMap = appContext.getAllJobs();
|
||||||
for (JobId id : jobsMap.keySet()) {
|
for (JobId id : jobsMap.keySet()) {
|
||||||
String jobId = MRApps.toString(id);
|
String jobId = MRApps.toString(id);
|
||||||
String tid = "task_1234_0_0_d_0";
|
String tid = "task_0_0000_d_000000";
|
||||||
try {
|
try {
|
||||||
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
|
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
|
||||||
.path("tasks").path(tid).get(JSONObject.class);
|
.path("tasks").path(tid).get(JSONObject.class);
|
||||||
|
@ -487,7 +488,8 @@ public class TestAMWebServicesTasks extends JerseyTest {
|
||||||
String type = exception.getString("exception");
|
String type = exception.getString("exception");
|
||||||
String classname = exception.getString("javaClassName");
|
String classname = exception.getString("javaClassName");
|
||||||
WebServicesTestUtils.checkStringMatch("exception message",
|
WebServicesTestUtils.checkStringMatch("exception message",
|
||||||
"java.lang.Exception: Unknown task symbol: d", message);
|
"java.lang.Exception: Bad TaskType identifier. TaskId string : "
|
||||||
|
+ "task_0_0000_d_000000 is not properly formed.", message);
|
||||||
WebServicesTestUtils.checkStringMatch("exception type",
|
WebServicesTestUtils.checkStringMatch("exception type",
|
||||||
"NotFoundException", type);
|
"NotFoundException", type);
|
||||||
WebServicesTestUtils.checkStringMatch("exception classname",
|
WebServicesTestUtils.checkStringMatch("exception classname",
|
||||||
|
@ -502,7 +504,7 @@ public class TestAMWebServicesTasks extends JerseyTest {
|
||||||
Map<JobId, Job> jobsMap = appContext.getAllJobs();
|
Map<JobId, Job> jobsMap = appContext.getAllJobs();
|
||||||
for (JobId id : jobsMap.keySet()) {
|
for (JobId id : jobsMap.keySet()) {
|
||||||
String jobId = MRApps.toString(id);
|
String jobId = MRApps.toString(id);
|
||||||
String tid = "task_1234_0_m_0";
|
String tid = "task_0_m_000000";
|
||||||
try {
|
try {
|
||||||
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
|
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
|
||||||
.path("tasks").path(tid).get(JSONObject.class);
|
.path("tasks").path(tid).get(JSONObject.class);
|
||||||
|
@ -518,7 +520,8 @@ public class TestAMWebServicesTasks extends JerseyTest {
|
||||||
String type = exception.getString("exception");
|
String type = exception.getString("exception");
|
||||||
String classname = exception.getString("javaClassName");
|
String classname = exception.getString("javaClassName");
|
||||||
WebServicesTestUtils.checkStringMatch("exception message",
|
WebServicesTestUtils.checkStringMatch("exception message",
|
||||||
"java.lang.Exception: For input string: \"m\"", message);
|
"java.lang.Exception: TaskId string : "
|
||||||
|
+ "task_0_m_000000 is not properly formed", message);
|
||||||
WebServicesTestUtils.checkStringMatch("exception type",
|
WebServicesTestUtils.checkStringMatch("exception type",
|
||||||
"NotFoundException", type);
|
"NotFoundException", type);
|
||||||
WebServicesTestUtils.checkStringMatch("exception classname",
|
WebServicesTestUtils.checkStringMatch("exception classname",
|
||||||
|
@ -533,7 +536,7 @@ public class TestAMWebServicesTasks extends JerseyTest {
|
||||||
Map<JobId, Job> jobsMap = appContext.getAllJobs();
|
Map<JobId, Job> jobsMap = appContext.getAllJobs();
|
||||||
for (JobId id : jobsMap.keySet()) {
|
for (JobId id : jobsMap.keySet()) {
|
||||||
String jobId = MRApps.toString(id);
|
String jobId = MRApps.toString(id);
|
||||||
String tid = "task_1234_0_0_m";
|
String tid = "task_0_0000_m";
|
||||||
try {
|
try {
|
||||||
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
|
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
|
||||||
.path("tasks").path(tid).get(JSONObject.class);
|
.path("tasks").path(tid).get(JSONObject.class);
|
||||||
|
@ -549,8 +552,8 @@ public class TestAMWebServicesTasks extends JerseyTest {
|
||||||
String type = exception.getString("exception");
|
String type = exception.getString("exception");
|
||||||
String classname = exception.getString("javaClassName");
|
String classname = exception.getString("javaClassName");
|
||||||
WebServicesTestUtils.checkStringMatch("exception message",
|
WebServicesTestUtils.checkStringMatch("exception message",
|
||||||
"java.lang.Exception: Error parsing task ID: task_1234_0_0_m",
|
"java.lang.Exception: TaskId string : "
|
||||||
message);
|
+ "task_0_0000_m is not properly formed", message);
|
||||||
WebServicesTestUtils.checkStringMatch("exception type",
|
WebServicesTestUtils.checkStringMatch("exception type",
|
||||||
"NotFoundException", type);
|
"NotFoundException", type);
|
||||||
WebServicesTestUtils.checkStringMatch("exception classname",
|
WebServicesTestUtils.checkStringMatch("exception classname",
|
||||||
|
|
|
@ -506,11 +506,9 @@ public class JobHistoryUtils {
|
||||||
sb.append(address.getHostName());
|
sb.append(address.getHostName());
|
||||||
}
|
}
|
||||||
sb.append(":").append(address.getPort());
|
sb.append(":").append(address.getPort());
|
||||||
sb.append("/jobhistory/job/"); // TODO This will change when the history server
|
sb.append("/jobhistory/job/");
|
||||||
// understands apps.
|
JobID jobId = TypeConverter.fromYarn(appId);
|
||||||
// TOOD Use JobId toString once UI stops using _id_id
|
sb.append(jobId.toString());
|
||||||
sb.append("job_").append(appId.getClusterTimestamp());
|
|
||||||
sb.append("_").append(appId.getId()).append("_").append(appId.getId());
|
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,9 +18,6 @@
|
||||||
|
|
||||||
package org.apache.hadoop.mapreduce.v2.util;
|
package org.apache.hadoop.mapreduce.v2.util;
|
||||||
|
|
||||||
import static org.apache.hadoop.yarn.util.StringHelper._join;
|
|
||||||
import static org.apache.hadoop.yarn.util.StringHelper._split;
|
|
||||||
|
|
||||||
import java.io.BufferedReader;
|
import java.io.BufferedReader;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -30,7 +27,6 @@ import java.net.URI;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
@ -39,7 +35,11 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.mapreduce.JobID;
|
||||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||||
|
import org.apache.hadoop.mapreduce.TaskAttemptID;
|
||||||
|
import org.apache.hadoop.mapreduce.TaskID;
|
||||||
|
import org.apache.hadoop.mapreduce.TypeConverter;
|
||||||
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
|
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
||||||
|
@ -50,12 +50,10 @@ import org.apache.hadoop.yarn.ContainerLogAppender;
|
||||||
import org.apache.hadoop.yarn.YarnException;
|
import org.apache.hadoop.yarn.YarnException;
|
||||||
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
|
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
|
||||||
import org.apache.hadoop.yarn.api.ApplicationConstants;
|
import org.apache.hadoop.yarn.api.ApplicationConstants;
|
||||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
|
||||||
import org.apache.hadoop.yarn.api.records.LocalResource;
|
import org.apache.hadoop.yarn.api.records.LocalResource;
|
||||||
import org.apache.hadoop.yarn.api.records.LocalResourceType;
|
import org.apache.hadoop.yarn.api.records.LocalResourceType;
|
||||||
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
|
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
|
||||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
|
||||||
import org.apache.hadoop.yarn.util.Apps;
|
import org.apache.hadoop.yarn.util.Apps;
|
||||||
import org.apache.hadoop.yarn.util.BuilderUtils;
|
import org.apache.hadoop.yarn.util.BuilderUtils;
|
||||||
|
|
||||||
|
@ -65,64 +63,28 @@ import org.apache.hadoop.yarn.util.BuilderUtils;
|
||||||
@Private
|
@Private
|
||||||
@Unstable
|
@Unstable
|
||||||
public class MRApps extends Apps {
|
public class MRApps extends Apps {
|
||||||
public static final String JOB = "job";
|
|
||||||
public static final String TASK = "task";
|
|
||||||
public static final String ATTEMPT = "attempt";
|
|
||||||
|
|
||||||
public static String toString(JobId jid) {
|
public static String toString(JobId jid) {
|
||||||
return _join(JOB, jid.getAppId().getClusterTimestamp(), jid.getAppId().getId(), jid.getId());
|
return jid.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static JobId toJobID(String jid) {
|
public static JobId toJobID(String jid) {
|
||||||
Iterator<String> it = _split(jid).iterator();
|
return TypeConverter.toYarn(JobID.forName(jid));
|
||||||
return toJobID(JOB, jid, it);
|
|
||||||
}
|
|
||||||
|
|
||||||
// mostly useful for parsing task/attempt id like strings
|
|
||||||
public static JobId toJobID(String prefix, String s, Iterator<String> it) {
|
|
||||||
ApplicationId appId = toAppID(prefix, s, it);
|
|
||||||
shouldHaveNext(prefix, s, it);
|
|
||||||
JobId jobId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class);
|
|
||||||
jobId.setAppId(appId);
|
|
||||||
jobId.setId(Integer.parseInt(it.next()));
|
|
||||||
return jobId;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String toString(TaskId tid) {
|
public static String toString(TaskId tid) {
|
||||||
return _join("task", tid.getJobId().getAppId().getClusterTimestamp(), tid.getJobId().getAppId().getId(),
|
return tid.toString();
|
||||||
tid.getJobId().getId(), taskSymbol(tid.getTaskType()), tid.getId());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static TaskId toTaskID(String tid) {
|
public static TaskId toTaskID(String tid) {
|
||||||
Iterator<String> it = _split(tid).iterator();
|
return TypeConverter.toYarn(TaskID.forName(tid));
|
||||||
return toTaskID(TASK, tid, it);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static TaskId toTaskID(String prefix, String s, Iterator<String> it) {
|
|
||||||
JobId jid = toJobID(prefix, s, it);
|
|
||||||
shouldHaveNext(prefix, s, it);
|
|
||||||
TaskId tid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class);
|
|
||||||
tid.setJobId(jid);
|
|
||||||
tid.setTaskType(taskType(it.next()));
|
|
||||||
shouldHaveNext(prefix, s, it);
|
|
||||||
tid.setId(Integer.parseInt(it.next()));
|
|
||||||
return tid;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String toString(TaskAttemptId taid) {
|
public static String toString(TaskAttemptId taid) {
|
||||||
return _join("attempt", taid.getTaskId().getJobId().getAppId().getClusterTimestamp(),
|
return taid.toString();
|
||||||
taid.getTaskId().getJobId().getAppId().getId(), taid.getTaskId().getJobId().getId(),
|
|
||||||
taskSymbol(taid.getTaskId().getTaskType()), taid.getTaskId().getId(), taid.getId());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static TaskAttemptId toTaskAttemptID(String taid) {
|
public static TaskAttemptId toTaskAttemptID(String taid) {
|
||||||
Iterator<String> it = _split(taid).iterator();
|
return TypeConverter.toYarn(TaskAttemptID.forName(taid));
|
||||||
TaskId tid = toTaskID(ATTEMPT, taid, it);
|
|
||||||
shouldHaveNext(ATTEMPT, taid, it);
|
|
||||||
TaskAttemptId taId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskAttemptId.class);
|
|
||||||
taId.setTaskId(tid);
|
|
||||||
taId.setId(Integer.parseInt(it.next()));
|
|
||||||
return taId;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String taskSymbol(TaskType type) {
|
public static String taskSymbol(TaskType type) {
|
||||||
|
|
|
@ -43,18 +43,18 @@ public class TestMRApps {
|
||||||
@Test public void testJobIDtoString() {
|
@Test public void testJobIDtoString() {
|
||||||
JobId jid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class);
|
JobId jid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class);
|
||||||
jid.setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class));
|
jid.setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class));
|
||||||
assertEquals("job_0_0_0", MRApps.toString(jid));
|
assertEquals("job_0_0000", MRApps.toString(jid));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test public void testToJobID() {
|
@Test public void testToJobID() {
|
||||||
JobId jid = MRApps.toJobID("job_1_1_1");
|
JobId jid = MRApps.toJobID("job_1_1");
|
||||||
assertEquals(1, jid.getAppId().getClusterTimestamp());
|
assertEquals(1, jid.getAppId().getClusterTimestamp());
|
||||||
assertEquals(1, jid.getAppId().getId());
|
assertEquals(1, jid.getAppId().getId());
|
||||||
assertEquals(1, jid.getId());
|
assertEquals(1, jid.getId()); // tests against some proto.id and not a job.id field
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(expected=YarnException.class) public void testJobIDShort() {
|
@Test(expected=IllegalArgumentException.class) public void testJobIDShort() {
|
||||||
MRApps.toJobID("job_0_0");
|
MRApps.toJobID("job_0_0_0");
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO_get.set
|
//TODO_get.set
|
||||||
|
@ -68,29 +68,29 @@ public class TestMRApps {
|
||||||
type = TaskType.REDUCE;
|
type = TaskType.REDUCE;
|
||||||
System.err.println(type);
|
System.err.println(type);
|
||||||
System.err.println(tid.getTaskType());
|
System.err.println(tid.getTaskType());
|
||||||
assertEquals("task_0_0_0_m_0", MRApps.toString(tid));
|
assertEquals("task_0_0000_m_000000", MRApps.toString(tid));
|
||||||
tid.setTaskType(TaskType.REDUCE);
|
tid.setTaskType(TaskType.REDUCE);
|
||||||
assertEquals("task_0_0_0_r_0", MRApps.toString(tid));
|
assertEquals("task_0_0000_r_000000", MRApps.toString(tid));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test public void testToTaskID() {
|
@Test public void testToTaskID() {
|
||||||
TaskId tid = MRApps.toTaskID("task_1_2_3_r_4");
|
TaskId tid = MRApps.toTaskID("task_1_2_r_3");
|
||||||
assertEquals(1, tid.getJobId().getAppId().getClusterTimestamp());
|
assertEquals(1, tid.getJobId().getAppId().getClusterTimestamp());
|
||||||
assertEquals(2, tid.getJobId().getAppId().getId());
|
assertEquals(2, tid.getJobId().getAppId().getId());
|
||||||
assertEquals(3, tid.getJobId().getId());
|
assertEquals(2, tid.getJobId().getId());
|
||||||
assertEquals(TaskType.REDUCE, tid.getTaskType());
|
assertEquals(TaskType.REDUCE, tid.getTaskType());
|
||||||
assertEquals(4, tid.getId());
|
assertEquals(3, tid.getId());
|
||||||
|
|
||||||
tid = MRApps.toTaskID("task_1_2_3_m_4");
|
tid = MRApps.toTaskID("task_1_2_m_3");
|
||||||
assertEquals(TaskType.MAP, tid.getTaskType());
|
assertEquals(TaskType.MAP, tid.getTaskType());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(expected=YarnException.class) public void testTaskIDShort() {
|
@Test(expected=IllegalArgumentException.class) public void testTaskIDShort() {
|
||||||
MRApps.toTaskID("task_0_0_0_m");
|
MRApps.toTaskID("task_0_0000_m");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(expected=YarnException.class) public void testTaskIDBadType() {
|
@Test(expected=IllegalArgumentException.class) public void testTaskIDBadType() {
|
||||||
MRApps.toTaskID("task_0_0_0_x_0");
|
MRApps.toTaskID("task_0_0000_x_000000");
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO_get.set
|
//TODO_get.set
|
||||||
|
@ -100,19 +100,19 @@ public class TestMRApps {
|
||||||
taid.getTaskId().setTaskType(TaskType.MAP);
|
taid.getTaskId().setTaskType(TaskType.MAP);
|
||||||
taid.getTaskId().setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class));
|
taid.getTaskId().setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class));
|
||||||
taid.getTaskId().getJobId().setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class));
|
taid.getTaskId().getJobId().setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class));
|
||||||
assertEquals("attempt_0_0_0_m_0_0", MRApps.toString(taid));
|
assertEquals("attempt_0_0000_m_000000_0", MRApps.toString(taid));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test public void testToTaskAttemptID() {
|
@Test public void testToTaskAttemptID() {
|
||||||
TaskAttemptId taid = MRApps.toTaskAttemptID("attempt_0_1_2_m_3_4");
|
TaskAttemptId taid = MRApps.toTaskAttemptID("attempt_0_1_m_2_3");
|
||||||
assertEquals(0, taid.getTaskId().getJobId().getAppId().getClusterTimestamp());
|
assertEquals(0, taid.getTaskId().getJobId().getAppId().getClusterTimestamp());
|
||||||
assertEquals(1, taid.getTaskId().getJobId().getAppId().getId());
|
assertEquals(1, taid.getTaskId().getJobId().getAppId().getId());
|
||||||
assertEquals(2, taid.getTaskId().getJobId().getId());
|
assertEquals(1, taid.getTaskId().getJobId().getId());
|
||||||
assertEquals(3, taid.getTaskId().getId());
|
assertEquals(2, taid.getTaskId().getId());
|
||||||
assertEquals(4, taid.getId());
|
assertEquals(3, taid.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(expected=YarnException.class) public void testTaskAttemptIDShort() {
|
@Test(expected=IllegalArgumentException.class) public void testTaskAttemptIDShort() {
|
||||||
MRApps.toTaskAttemptID("attempt_0_0_0_m_0");
|
MRApps.toTaskAttemptID("attempt_0_0_0_m_0");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -159,6 +159,7 @@ public class TaskAttemptID extends org.apache.hadoop.mapred.ID {
|
||||||
) throws IllegalArgumentException {
|
) throws IllegalArgumentException {
|
||||||
if(str == null)
|
if(str == null)
|
||||||
return null;
|
return null;
|
||||||
|
String exceptionMsg = null;
|
||||||
try {
|
try {
|
||||||
String[] parts = str.split(Character.toString(SEPARATOR));
|
String[] parts = str.split(Character.toString(SEPARATOR));
|
||||||
if(parts.length == 6) {
|
if(parts.length == 6) {
|
||||||
|
@ -171,14 +172,19 @@ public class TaskAttemptID extends org.apache.hadoop.mapred.ID {
|
||||||
Integer.parseInt(parts[2]),
|
Integer.parseInt(parts[2]),
|
||||||
t, Integer.parseInt(parts[4]),
|
t, Integer.parseInt(parts[4]),
|
||||||
Integer.parseInt(parts[5]));
|
Integer.parseInt(parts[5]));
|
||||||
} else throw new Exception();
|
} else
|
||||||
|
exceptionMsg = "Bad TaskType identifier. TaskAttemptId string : "
|
||||||
|
+ str + " is not properly formed.";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
//fall below
|
//fall below
|
||||||
}
|
}
|
||||||
throw new IllegalArgumentException("TaskAttemptId string : " + str
|
if (exceptionMsg == null) {
|
||||||
+ " is not properly formed");
|
exceptionMsg = "TaskAttemptId string : " + str
|
||||||
|
+ " is not properly formed";
|
||||||
|
}
|
||||||
|
throw new IllegalArgumentException(exceptionMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -184,6 +184,7 @@ public class TaskID extends org.apache.hadoop.mapred.ID {
|
||||||
throws IllegalArgumentException {
|
throws IllegalArgumentException {
|
||||||
if(str == null)
|
if(str == null)
|
||||||
return null;
|
return null;
|
||||||
|
String exceptionMsg = null;
|
||||||
try {
|
try {
|
||||||
String[] parts = str.split("_");
|
String[] parts = str.split("_");
|
||||||
if(parts.length == 5) {
|
if(parts.length == 5) {
|
||||||
|
@ -196,13 +197,17 @@ public class TaskID extends org.apache.hadoop.mapred.ID {
|
||||||
Integer.parseInt(parts[2]),
|
Integer.parseInt(parts[2]),
|
||||||
t,
|
t,
|
||||||
Integer.parseInt(parts[4]));
|
Integer.parseInt(parts[4]));
|
||||||
} else throw new Exception();
|
} else
|
||||||
|
exceptionMsg = "Bad TaskType identifier. TaskId string : " + str
|
||||||
|
+ " is not properly formed.";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}catch (Exception ex) {//fall below
|
}catch (Exception ex) {//fall below
|
||||||
}
|
}
|
||||||
throw new IllegalArgumentException("TaskId string : " + str
|
if (exceptionMsg == null) {
|
||||||
+ " is not properly formed");
|
exceptionMsg = "TaskId string : " + str + " is not properly formed";
|
||||||
|
}
|
||||||
|
throw new IllegalArgumentException(exceptionMsg);
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* Gets the character representing the {@link TaskType}
|
* Gets the character representing the {@link TaskType}
|
||||||
|
|
|
@ -330,6 +330,9 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
|
||||||
boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) {
|
boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) {
|
||||||
Map<JobACL, AccessControlList> jobACLs = jobInfo.getJobACLs();
|
Map<JobACL, AccessControlList> jobACLs = jobInfo.getJobACLs();
|
||||||
AccessControlList jobACL = jobACLs.get(jobOperation);
|
AccessControlList jobACL = jobACLs.get(jobOperation);
|
||||||
|
if (jobACL == null) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
return aclsMgr.checkAccess(callerUGI, jobOperation,
|
return aclsMgr.checkAccess(callerUGI, jobOperation,
|
||||||
jobInfo.getUsername(), jobACL);
|
jobInfo.getUsername(), jobACL);
|
||||||
}
|
}
|
||||||
|
|
|
@ -408,36 +408,40 @@ public class TestHsWebServicesAttempts extends JerseyTest {
|
||||||
public void testTaskAttemptIdBogus() throws JSONException, Exception {
|
public void testTaskAttemptIdBogus() throws JSONException, Exception {
|
||||||
|
|
||||||
testTaskAttemptIdErrorGeneric("bogusid",
|
testTaskAttemptIdErrorGeneric("bogusid",
|
||||||
"java.lang.Exception: Error parsing attempt ID: bogusid");
|
"java.lang.Exception: TaskAttemptId string : "
|
||||||
|
+ "bogusid is not properly formed");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testTaskAttemptIdNonExist() throws JSONException, Exception {
|
public void testTaskAttemptIdNonExist() throws JSONException, Exception {
|
||||||
|
|
||||||
testTaskAttemptIdErrorGeneric(
|
testTaskAttemptIdErrorGeneric(
|
||||||
"attempt_12345_0_0_r_1_0",
|
"attempt_0_1234_m_000000_0",
|
||||||
"java.lang.Exception: Error getting info on task attempt id attempt_12345_0_0_r_1_0");
|
"java.lang.Exception: Error getting info on task attempt id attempt_0_1234_m_000000_0");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testTaskAttemptIdInvalid() throws JSONException, Exception {
|
public void testTaskAttemptIdInvalid() throws JSONException, Exception {
|
||||||
|
|
||||||
testTaskAttemptIdErrorGeneric("attempt_12345_0_0_d_1_0",
|
testTaskAttemptIdErrorGeneric("attempt_0_1234_d_000000_0",
|
||||||
"java.lang.Exception: Unknown task symbol: d");
|
"java.lang.Exception: Bad TaskType identifier. TaskAttemptId string : "
|
||||||
|
+ "attempt_0_1234_d_000000_0 is not properly formed.");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testTaskAttemptIdInvalid2() throws JSONException, Exception {
|
public void testTaskAttemptIdInvalid2() throws JSONException, Exception {
|
||||||
|
|
||||||
testTaskAttemptIdErrorGeneric("attempt_12345_0_r_1_0",
|
testTaskAttemptIdErrorGeneric("attempt_1234_m_000000_0",
|
||||||
"java.lang.Exception: For input string: \"r\"");
|
"java.lang.Exception: TaskAttemptId string : "
|
||||||
|
+ "attempt_1234_m_000000_0 is not properly formed");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testTaskAttemptIdInvalid3() throws JSONException, Exception {
|
public void testTaskAttemptIdInvalid3() throws JSONException, Exception {
|
||||||
|
|
||||||
testTaskAttemptIdErrorGeneric("attempt_12345_0_0_r_1",
|
testTaskAttemptIdErrorGeneric("attempt_0_1234_m_000000",
|
||||||
"java.lang.Exception: Error parsing attempt ID: attempt_12345_0_0_r_1");
|
"java.lang.Exception: TaskAttemptId string : "
|
||||||
|
+ "attempt_0_1234_m_000000 is not properly formed");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testTaskAttemptIdErrorGeneric(String attid, String error)
|
private void testTaskAttemptIdErrorGeneric(String attid, String error)
|
||||||
|
|
|
@ -367,7 +367,7 @@ public class TestHsWebServicesJobs extends JerseyTest {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
|
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
|
||||||
.path("job_1234_1_2").get(JSONObject.class);
|
.path("job_0_1234").get(JSONObject.class);
|
||||||
fail("should have thrown exception on invalid uri");
|
fail("should have thrown exception on invalid uri");
|
||||||
} catch (UniformInterfaceException ue) {
|
} catch (UniformInterfaceException ue) {
|
||||||
ClientResponse response = ue.getResponse();
|
ClientResponse response = ue.getResponse();
|
||||||
|
@ -380,7 +380,7 @@ public class TestHsWebServicesJobs extends JerseyTest {
|
||||||
String type = exception.getString("exception");
|
String type = exception.getString("exception");
|
||||||
String classname = exception.getString("javaClassName");
|
String classname = exception.getString("javaClassName");
|
||||||
WebServicesTestUtils.checkStringMatch("exception message",
|
WebServicesTestUtils.checkStringMatch("exception message",
|
||||||
"java.lang.Exception: job, job_1234_1_2, is not found", message);
|
"java.lang.Exception: job, job_0_1234, is not found", message);
|
||||||
WebServicesTestUtils.checkStringMatch("exception type",
|
WebServicesTestUtils.checkStringMatch("exception type",
|
||||||
"NotFoundException", type);
|
"NotFoundException", type);
|
||||||
WebServicesTestUtils.checkStringMatch("exception classname",
|
WebServicesTestUtils.checkStringMatch("exception classname",
|
||||||
|
@ -399,7 +399,7 @@ public class TestHsWebServicesJobs extends JerseyTest {
|
||||||
fail("should have thrown exception on invalid uri");
|
fail("should have thrown exception on invalid uri");
|
||||||
} catch (UniformInterfaceException ue) {
|
} catch (UniformInterfaceException ue) {
|
||||||
ClientResponse response = ue.getResponse();
|
ClientResponse response = ue.getResponse();
|
||||||
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
|
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
|
||||||
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
|
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
|
||||||
JSONObject msg = response.getEntity(JSONObject.class);
|
JSONObject msg = response.getEntity(JSONObject.class);
|
||||||
JSONObject exception = msg.getJSONObject("RemoteException");
|
JSONObject exception = msg.getJSONObject("RemoteException");
|
||||||
|
@ -423,7 +423,7 @@ public class TestHsWebServicesJobs extends JerseyTest {
|
||||||
fail("should have thrown exception on invalid uri");
|
fail("should have thrown exception on invalid uri");
|
||||||
} catch (UniformInterfaceException ue) {
|
} catch (UniformInterfaceException ue) {
|
||||||
ClientResponse response = ue.getResponse();
|
ClientResponse response = ue.getResponse();
|
||||||
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
|
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
|
||||||
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
|
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
|
||||||
JSONObject msg = response.getEntity(JSONObject.class);
|
JSONObject msg = response.getEntity(JSONObject.class);
|
||||||
JSONObject exception = msg.getJSONObject("RemoteException");
|
JSONObject exception = msg.getJSONObject("RemoteException");
|
||||||
|
@ -447,7 +447,7 @@ public class TestHsWebServicesJobs extends JerseyTest {
|
||||||
fail("should have thrown exception on invalid uri");
|
fail("should have thrown exception on invalid uri");
|
||||||
} catch (UniformInterfaceException ue) {
|
} catch (UniformInterfaceException ue) {
|
||||||
ClientResponse response = ue.getResponse();
|
ClientResponse response = ue.getResponse();
|
||||||
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
|
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
|
||||||
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
|
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
|
||||||
String msg = response.getEntity(String.class);
|
String msg = response.getEntity(String.class);
|
||||||
System.out.println(msg);
|
System.out.println(msg);
|
||||||
|
@ -468,11 +468,12 @@ public class TestHsWebServicesJobs extends JerseyTest {
|
||||||
|
|
||||||
private void verifyJobIdInvalid(String message, String type, String classname) {
|
private void verifyJobIdInvalid(String message, String type, String classname) {
|
||||||
WebServicesTestUtils.checkStringMatch("exception message",
|
WebServicesTestUtils.checkStringMatch("exception message",
|
||||||
"For input string: \"foo\"", message);
|
"java.lang.Exception: JobId string : job_foo is not properly formed",
|
||||||
|
message);
|
||||||
WebServicesTestUtils.checkStringMatch("exception type",
|
WebServicesTestUtils.checkStringMatch("exception type",
|
||||||
"NumberFormatException", type);
|
"NotFoundException", type);
|
||||||
WebServicesTestUtils.checkStringMatch("exception classname",
|
WebServicesTestUtils.checkStringMatch("exception classname",
|
||||||
"java.lang.NumberFormatException", classname);
|
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -494,7 +495,8 @@ public class TestHsWebServicesJobs extends JerseyTest {
|
||||||
String type = exception.getString("exception");
|
String type = exception.getString("exception");
|
||||||
String classname = exception.getString("javaClassName");
|
String classname = exception.getString("javaClassName");
|
||||||
WebServicesTestUtils.checkStringMatch("exception message",
|
WebServicesTestUtils.checkStringMatch("exception message",
|
||||||
"java.lang.Exception: Error parsing job ID: bogusfoo", message);
|
"java.lang.Exception: JobId string : "
|
||||||
|
+ "bogusfoo is not properly formed", message);
|
||||||
WebServicesTestUtils.checkStringMatch("exception type",
|
WebServicesTestUtils.checkStringMatch("exception type",
|
||||||
"NotFoundException", type);
|
"NotFoundException", type);
|
||||||
WebServicesTestUtils.checkStringMatch("exception classname",
|
WebServicesTestUtils.checkStringMatch("exception classname",
|
||||||
|
|
|
@ -72,30 +72,26 @@ public class TestHsWebServicesJobsQuery extends JerseyTest {
|
||||||
private static HsWebApp webApp;
|
private static HsWebApp webApp;
|
||||||
|
|
||||||
static class TestAppContext implements AppContext {
|
static class TestAppContext implements AppContext {
|
||||||
final ApplicationAttemptId appAttemptID;
|
|
||||||
final ApplicationId appID;
|
|
||||||
final String user = MockJobs.newUserName();
|
final String user = MockJobs.newUserName();
|
||||||
final Map<JobId, Job> jobs;
|
final Map<JobId, Job> jobs;
|
||||||
final long startTime = System.currentTimeMillis();
|
final long startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
TestAppContext(int appid, int numJobs, int numTasks, int numAttempts) {
|
TestAppContext(int numJobs, int numTasks, int numAttempts) {
|
||||||
appID = MockJobs.newAppID(appid);
|
jobs = MockJobs.newJobs(numJobs, numTasks, numAttempts);
|
||||||
appAttemptID = MockJobs.newAppAttemptID(appID, 0);
|
|
||||||
jobs = MockJobs.newJobs(appID, numJobs, numTasks, numAttempts);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TestAppContext() {
|
TestAppContext() {
|
||||||
this(0, 3, 2, 1);
|
this(3, 2, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ApplicationAttemptId getApplicationAttemptId() {
|
public ApplicationAttemptId getApplicationAttemptId() {
|
||||||
return appAttemptID;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ApplicationId getApplicationID() {
|
public ApplicationId getApplicationID() {
|
||||||
return appID;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -435,7 +435,8 @@ public class TestHsWebServicesTasks extends JerseyTest {
|
||||||
String type = exception.getString("exception");
|
String type = exception.getString("exception");
|
||||||
String classname = exception.getString("javaClassName");
|
String classname = exception.getString("javaClassName");
|
||||||
WebServicesTestUtils.checkStringMatch("exception message",
|
WebServicesTestUtils.checkStringMatch("exception message",
|
||||||
"java.lang.Exception: Error parsing task ID: bogustaskid", message);
|
"java.lang.Exception: TaskId string : "
|
||||||
|
+ "bogustaskid is not properly formed", message);
|
||||||
WebServicesTestUtils.checkStringMatch("exception type",
|
WebServicesTestUtils.checkStringMatch("exception type",
|
||||||
"NotFoundException", type);
|
"NotFoundException", type);
|
||||||
WebServicesTestUtils.checkStringMatch("exception classname",
|
WebServicesTestUtils.checkStringMatch("exception classname",
|
||||||
|
@ -450,7 +451,7 @@ public class TestHsWebServicesTasks extends JerseyTest {
|
||||||
Map<JobId, Job> jobsMap = appContext.getAllJobs();
|
Map<JobId, Job> jobsMap = appContext.getAllJobs();
|
||||||
for (JobId id : jobsMap.keySet()) {
|
for (JobId id : jobsMap.keySet()) {
|
||||||
String jobId = MRApps.toString(id);
|
String jobId = MRApps.toString(id);
|
||||||
String tid = "task_1234_0_0_m_0";
|
String tid = "task_0_0000_m_000000";
|
||||||
try {
|
try {
|
||||||
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
|
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
|
||||||
.path(jobId).path("tasks").path(tid).get(JSONObject.class);
|
.path(jobId).path("tasks").path(tid).get(JSONObject.class);
|
||||||
|
@ -466,7 +467,7 @@ public class TestHsWebServicesTasks extends JerseyTest {
|
||||||
String type = exception.getString("exception");
|
String type = exception.getString("exception");
|
||||||
String classname = exception.getString("javaClassName");
|
String classname = exception.getString("javaClassName");
|
||||||
WebServicesTestUtils.checkStringMatch("exception message",
|
WebServicesTestUtils.checkStringMatch("exception message",
|
||||||
"java.lang.Exception: task not found with id task_1234_0_0_m_0",
|
"java.lang.Exception: task not found with id task_0_0000_m_000000",
|
||||||
message);
|
message);
|
||||||
WebServicesTestUtils.checkStringMatch("exception type",
|
WebServicesTestUtils.checkStringMatch("exception type",
|
||||||
"NotFoundException", type);
|
"NotFoundException", type);
|
||||||
|
@ -482,7 +483,7 @@ public class TestHsWebServicesTasks extends JerseyTest {
|
||||||
Map<JobId, Job> jobsMap = appContext.getAllJobs();
|
Map<JobId, Job> jobsMap = appContext.getAllJobs();
|
||||||
for (JobId id : jobsMap.keySet()) {
|
for (JobId id : jobsMap.keySet()) {
|
||||||
String jobId = MRApps.toString(id);
|
String jobId = MRApps.toString(id);
|
||||||
String tid = "task_1234_0_0_d_0";
|
String tid = "task_0_0000_d_000000";
|
||||||
try {
|
try {
|
||||||
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
|
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
|
||||||
.path(jobId).path("tasks").path(tid).get(JSONObject.class);
|
.path(jobId).path("tasks").path(tid).get(JSONObject.class);
|
||||||
|
@ -498,7 +499,8 @@ public class TestHsWebServicesTasks extends JerseyTest {
|
||||||
String type = exception.getString("exception");
|
String type = exception.getString("exception");
|
||||||
String classname = exception.getString("javaClassName");
|
String classname = exception.getString("javaClassName");
|
||||||
WebServicesTestUtils.checkStringMatch("exception message",
|
WebServicesTestUtils.checkStringMatch("exception message",
|
||||||
"java.lang.Exception: Unknown task symbol: d", message);
|
"java.lang.Exception: Bad TaskType identifier. TaskId string : "
|
||||||
|
+ "task_0_0000_d_000000 is not properly formed.", message);
|
||||||
WebServicesTestUtils.checkStringMatch("exception type",
|
WebServicesTestUtils.checkStringMatch("exception type",
|
||||||
"NotFoundException", type);
|
"NotFoundException", type);
|
||||||
WebServicesTestUtils.checkStringMatch("exception classname",
|
WebServicesTestUtils.checkStringMatch("exception classname",
|
||||||
|
@ -513,7 +515,7 @@ public class TestHsWebServicesTasks extends JerseyTest {
|
||||||
Map<JobId, Job> jobsMap = appContext.getAllJobs();
|
Map<JobId, Job> jobsMap = appContext.getAllJobs();
|
||||||
for (JobId id : jobsMap.keySet()) {
|
for (JobId id : jobsMap.keySet()) {
|
||||||
String jobId = MRApps.toString(id);
|
String jobId = MRApps.toString(id);
|
||||||
String tid = "task_1234_0_m_0";
|
String tid = "task_0000_m_000000";
|
||||||
try {
|
try {
|
||||||
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
|
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
|
||||||
.path(jobId).path("tasks").path(tid).get(JSONObject.class);
|
.path(jobId).path("tasks").path(tid).get(JSONObject.class);
|
||||||
|
@ -529,7 +531,8 @@ public class TestHsWebServicesTasks extends JerseyTest {
|
||||||
String type = exception.getString("exception");
|
String type = exception.getString("exception");
|
||||||
String classname = exception.getString("javaClassName");
|
String classname = exception.getString("javaClassName");
|
||||||
WebServicesTestUtils.checkStringMatch("exception message",
|
WebServicesTestUtils.checkStringMatch("exception message",
|
||||||
"java.lang.Exception: For input string: \"m\"", message);
|
"java.lang.Exception: TaskId string : "
|
||||||
|
+ "task_0000_m_000000 is not properly formed", message);
|
||||||
WebServicesTestUtils.checkStringMatch("exception type",
|
WebServicesTestUtils.checkStringMatch("exception type",
|
||||||
"NotFoundException", type);
|
"NotFoundException", type);
|
||||||
WebServicesTestUtils.checkStringMatch("exception classname",
|
WebServicesTestUtils.checkStringMatch("exception classname",
|
||||||
|
@ -544,7 +547,7 @@ public class TestHsWebServicesTasks extends JerseyTest {
|
||||||
Map<JobId, Job> jobsMap = appContext.getAllJobs();
|
Map<JobId, Job> jobsMap = appContext.getAllJobs();
|
||||||
for (JobId id : jobsMap.keySet()) {
|
for (JobId id : jobsMap.keySet()) {
|
||||||
String jobId = MRApps.toString(id);
|
String jobId = MRApps.toString(id);
|
||||||
String tid = "task_1234_0_0_m";
|
String tid = "task_0_0000_m";
|
||||||
try {
|
try {
|
||||||
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
|
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
|
||||||
.path(jobId).path("tasks").path(tid).get(JSONObject.class);
|
.path(jobId).path("tasks").path(tid).get(JSONObject.class);
|
||||||
|
@ -560,8 +563,8 @@ public class TestHsWebServicesTasks extends JerseyTest {
|
||||||
String type = exception.getString("exception");
|
String type = exception.getString("exception");
|
||||||
String classname = exception.getString("javaClassName");
|
String classname = exception.getString("javaClassName");
|
||||||
WebServicesTestUtils.checkStringMatch("exception message",
|
WebServicesTestUtils.checkStringMatch("exception message",
|
||||||
"java.lang.Exception: Error parsing task ID: task_1234_0_0_m",
|
"java.lang.Exception: TaskId string : "
|
||||||
message);
|
+ "task_0_0000_m is not properly formed", message);
|
||||||
WebServicesTestUtils.checkStringMatch("exception type",
|
WebServicesTestUtils.checkStringMatch("exception type",
|
||||||
"NotFoundException", type);
|
"NotFoundException", type);
|
||||||
WebServicesTestUtils.checkStringMatch("exception classname",
|
WebServicesTestUtils.checkStringMatch("exception classname",
|
||||||
|
|
|
@ -221,6 +221,5 @@ if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
|
||||||
YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
|
YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $YARN_OPTS -classpath "$CLASSPATH" $CLASS "$@"
|
|
||||||
exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $YARN_OPTS -classpath "$CLASSPATH" $CLASS "$@"
|
exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $YARN_OPTS -classpath "$CLASSPATH" $CLASS "$@"
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -51,6 +51,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
|
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
|
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
|
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
|
||||||
|
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeReconnectEvent;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent;
|
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider;
|
import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider;
|
||||||
import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
|
import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
|
||||||
|
@ -177,17 +178,17 @@ public class ResourceTrackerService extends AbstractService implements
|
||||||
RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort, httpPort,
|
RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort, httpPort,
|
||||||
resolve(host), capability);
|
resolve(host), capability);
|
||||||
|
|
||||||
if (this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode) != null) {
|
RMNode oldNode = this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode);
|
||||||
LOG.info("Duplicate registration from the node at: " + host
|
if (oldNode == null) {
|
||||||
+ ", Sending SHUTDOWN Signal to the NodeManager");
|
this.rmContext.getDispatcher().getEventHandler().handle(
|
||||||
regResponse.setNodeAction(NodeAction.SHUTDOWN);
|
new RMNodeEvent(nodeId, RMNodeEventType.STARTED));
|
||||||
response.setRegistrationResponse(regResponse);
|
} else {
|
||||||
return response;
|
LOG.info("Reconnect from the node at: " + host);
|
||||||
|
this.nmLivelinessMonitor.unregister(nodeId);
|
||||||
|
this.rmContext.getDispatcher().getEventHandler().handle(
|
||||||
|
new RMNodeReconnectEvent(nodeId, rmNode));
|
||||||
}
|
}
|
||||||
|
|
||||||
this.rmContext.getDispatcher().getEventHandler().handle(
|
|
||||||
new RMNodeEvent(nodeId, RMNodeEventType.STARTED));
|
|
||||||
|
|
||||||
this.nmLivelinessMonitor.register(nodeId);
|
this.nmLivelinessMonitor.register(nodeId);
|
||||||
|
|
||||||
LOG.info("NodeManager from node " + host + "(cmPort: " + cmPort
|
LOG.info("NodeManager from node " + host + "(cmPort: " + cmPort
|
||||||
|
|
|
@ -28,6 +28,7 @@ public enum RMNodeEventType {
|
||||||
// ResourceTrackerService
|
// ResourceTrackerService
|
||||||
STATUS_UPDATE,
|
STATUS_UPDATE,
|
||||||
REBOOTING,
|
REBOOTING,
|
||||||
|
RECONNECTED,
|
||||||
|
|
||||||
// Source: Application
|
// Source: Application
|
||||||
CLEANUP_APP,
|
CLEANUP_APP,
|
||||||
|
|
|
@ -110,9 +110,11 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
|
||||||
RMNodeEventType,
|
RMNodeEventType,
|
||||||
RMNodeEvent>(RMNodeState.NEW)
|
RMNodeEvent>(RMNodeState.NEW)
|
||||||
|
|
||||||
//Transitions from RUNNING state
|
//Transitions from NEW state
|
||||||
.addTransition(RMNodeState.NEW, RMNodeState.RUNNING,
|
.addTransition(RMNodeState.NEW, RMNodeState.RUNNING,
|
||||||
RMNodeEventType.STARTED, new AddNodeTransition())
|
RMNodeEventType.STARTED, new AddNodeTransition())
|
||||||
|
|
||||||
|
//Transitions from RUNNING state
|
||||||
.addTransition(RMNodeState.RUNNING,
|
.addTransition(RMNodeState.RUNNING,
|
||||||
EnumSet.of(RMNodeState.RUNNING, RMNodeState.UNHEALTHY),
|
EnumSet.of(RMNodeState.RUNNING, RMNodeState.UNHEALTHY),
|
||||||
RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenHealthyTransition())
|
RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenHealthyTransition())
|
||||||
|
@ -129,11 +131,15 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
|
||||||
RMNodeEventType.CLEANUP_APP, new CleanUpAppTransition())
|
RMNodeEventType.CLEANUP_APP, new CleanUpAppTransition())
|
||||||
.addTransition(RMNodeState.RUNNING, RMNodeState.RUNNING,
|
.addTransition(RMNodeState.RUNNING, RMNodeState.RUNNING,
|
||||||
RMNodeEventType.CLEANUP_CONTAINER, new CleanUpContainerTransition())
|
RMNodeEventType.CLEANUP_CONTAINER, new CleanUpContainerTransition())
|
||||||
|
.addTransition(RMNodeState.RUNNING, RMNodeState.RUNNING,
|
||||||
|
RMNodeEventType.RECONNECTED, new ReconnectNodeTransition())
|
||||||
|
|
||||||
//Transitions from UNHEALTHY state
|
//Transitions from UNHEALTHY state
|
||||||
.addTransition(RMNodeState.UNHEALTHY,
|
.addTransition(RMNodeState.UNHEALTHY,
|
||||||
EnumSet.of(RMNodeState.UNHEALTHY, RMNodeState.RUNNING),
|
EnumSet.of(RMNodeState.UNHEALTHY, RMNodeState.RUNNING),
|
||||||
RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenUnHealthyTransition())
|
RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenUnHealthyTransition())
|
||||||
|
.addTransition(RMNodeState.UNHEALTHY, RMNodeState.UNHEALTHY,
|
||||||
|
RMNodeEventType.RECONNECTED, new ReconnectNodeTransition())
|
||||||
|
|
||||||
// create the topology tables
|
// create the topology tables
|
||||||
.installTopology();
|
.installTopology();
|
||||||
|
@ -372,6 +378,39 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static class ReconnectNodeTransition implements
|
||||||
|
SingleArcTransition<RMNodeImpl, RMNodeEvent> {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void transition(RMNodeImpl rmNode, RMNodeEvent event) {
|
||||||
|
// Kill containers since node is rejoining.
|
||||||
|
rmNode.context.getDispatcher().getEventHandler().handle(
|
||||||
|
new NodeRemovedSchedulerEvent(rmNode));
|
||||||
|
|
||||||
|
RMNode newNode = ((RMNodeReconnectEvent)event).getReconnectedNode();
|
||||||
|
if (rmNode.getTotalCapability().equals(newNode.getTotalCapability())
|
||||||
|
&& rmNode.getHttpPort() == newNode.getHttpPort()) {
|
||||||
|
// Reset heartbeat ID since node just restarted.
|
||||||
|
rmNode.getLastHeartBeatResponse().setResponseId(0);
|
||||||
|
rmNode.context.getDispatcher().getEventHandler().handle(
|
||||||
|
new NodeAddedSchedulerEvent(rmNode));
|
||||||
|
} else {
|
||||||
|
// Reconnected node differs, so replace old node and start new node
|
||||||
|
switch (rmNode.getState()) {
|
||||||
|
case RUNNING:
|
||||||
|
ClusterMetrics.getMetrics().decrNumActiveNodes();
|
||||||
|
break;
|
||||||
|
case UNHEALTHY:
|
||||||
|
ClusterMetrics.getMetrics().decrNumUnhealthyNMs();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
rmNode.context.getRMNodes().put(newNode.getNodeID(), newNode);
|
||||||
|
rmNode.context.getDispatcher().getEventHandler().handle(
|
||||||
|
new RMNodeEvent(newNode.getNodeID(), RMNodeEventType.STARTED));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static class CleanUpAppTransition
|
public static class CleanUpAppTransition
|
||||||
implements SingleArcTransition<RMNodeImpl, RMNodeEvent> {
|
implements SingleArcTransition<RMNodeImpl, RMNodeEvent> {
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,34 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.yarn.server.resourcemanager.rmnode;
|
||||||
|
|
||||||
|
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||||
|
|
||||||
|
public class RMNodeReconnectEvent extends RMNodeEvent {
|
||||||
|
private RMNode reconnectedNode;
|
||||||
|
|
||||||
|
public RMNodeReconnectEvent(NodeId nodeId, RMNode newNode) {
|
||||||
|
super(nodeId, RMNodeEventType.RECONNECTED);
|
||||||
|
reconnectedNode = newNode;
|
||||||
|
}
|
||||||
|
|
||||||
|
public RMNode getReconnectedNode() {
|
||||||
|
return reconnectedNode;
|
||||||
|
}
|
||||||
|
}
|
|
@ -666,7 +666,10 @@ implements ResourceScheduler, CapacitySchedulerContext {
|
||||||
|
|
||||||
private synchronized void removeNode(RMNode nodeInfo) {
|
private synchronized void removeNode(RMNode nodeInfo) {
|
||||||
SchedulerNode node = this.nodes.get(nodeInfo.getNodeID());
|
SchedulerNode node = this.nodes.get(nodeInfo.getNodeID());
|
||||||
Resources.subtractFrom(clusterResource, nodeInfo.getTotalCapability());
|
if (node == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Resources.subtractFrom(clusterResource, node.getRMNode().getTotalCapability());
|
||||||
root.updateClusterResource(clusterResource);
|
root.updateClusterResource(clusterResource);
|
||||||
--numNodeManagers;
|
--numNodeManagers;
|
||||||
|
|
||||||
|
|
|
@ -731,6 +731,9 @@ public class FifoScheduler implements ResourceScheduler {
|
||||||
|
|
||||||
private synchronized void removeNode(RMNode nodeInfo) {
|
private synchronized void removeNode(RMNode nodeInfo) {
|
||||||
SchedulerNode node = getNode(nodeInfo.getNodeID());
|
SchedulerNode node = getNode(nodeInfo.getNodeID());
|
||||||
|
if (node == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
// Kill running containers
|
// Kill running containers
|
||||||
for(RMContainer container : node.getRunningContainers()) {
|
for(RMContainer container : node.getRunningContainers()) {
|
||||||
containerCompleted(container,
|
containerCompleted(container,
|
||||||
|
@ -744,7 +747,7 @@ public class FifoScheduler implements ResourceScheduler {
|
||||||
this.nodes.remove(nodeInfo.getNodeID());
|
this.nodes.remove(nodeInfo.getNodeID());
|
||||||
|
|
||||||
// Update cluster metrics
|
// Update cluster metrics
|
||||||
Resources.subtractFrom(clusterResource, nodeInfo.getTotalCapability());
|
Resources.subtractFrom(clusterResource, node.getRMNode().getTotalCapability());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -19,23 +19,18 @@
|
||||||
package org.apache.hadoop.yarn.server.resourcemanager;
|
package org.apache.hadoop.yarn.server.resourcemanager;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
import org.apache.hadoop.net.Node;
|
import org.apache.hadoop.net.Node;
|
||||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||||
import org.apache.hadoop.yarn.api.records.Container;
|
|
||||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||||
import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
|
import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
|
||||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||||
import org.apache.hadoop.yarn.api.records.Priority;
|
|
||||||
import org.apache.hadoop.yarn.api.records.Resource;
|
import org.apache.hadoop.yarn.api.records.Resource;
|
||||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||||
import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
|
import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
|
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
|
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeResponse;
|
|
||||||
|
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
|
|
||||||
|
@ -195,8 +190,12 @@ public class MockNodes {
|
||||||
};
|
};
|
||||||
|
|
||||||
private static RMNode buildRMNode(int rack, final Resource perNode, RMNodeState state, String httpAddr) {
|
private static RMNode buildRMNode(int rack, final Resource perNode, RMNodeState state, String httpAddr) {
|
||||||
|
return buildRMNode(rack, perNode, state, httpAddr, NODE_ID++);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static RMNode buildRMNode(int rack, final Resource perNode, RMNodeState state, String httpAddr, int hostnum) {
|
||||||
final String rackName = "rack"+ rack;
|
final String rackName = "rack"+ rack;
|
||||||
final int nid = NODE_ID++;
|
final int nid = hostnum;
|
||||||
final String hostName = "host"+ nid;
|
final String hostName = "host"+ nid;
|
||||||
final int port = 123;
|
final int port = 123;
|
||||||
final NodeId nodeID = newNodeID(hostName, port);
|
final NodeId nodeID = newNodeID(hostName, port);
|
||||||
|
@ -219,4 +218,8 @@ public class MockNodes {
|
||||||
public static RMNode newNodeInfo(int rack, final Resource perNode) {
|
public static RMNode newNodeInfo(int rack, final Resource perNode) {
|
||||||
return buildRMNode(rack, perNode, RMNodeState.RUNNING, "localhost:0");
|
return buildRMNode(rack, perNode, RMNodeState.RUNNING, "localhost:0");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static RMNode newNodeInfo(int rack, final Resource perNode, int hostnum) {
|
||||||
|
return buildRMNode(rack, perNode, null, "localhost:0", hostnum);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
|
|
||||||
package org.apache.hadoop.yarn.server.resourcemanager;
|
package org.apache.hadoop.yarn.server.resourcemanager;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import junit.framework.Assert;
|
import junit.framework.Assert;
|
||||||
|
@ -27,10 +28,17 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.yarn.api.records.AMResponse;
|
import org.apache.hadoop.yarn.api.records.AMResponse;
|
||||||
import org.apache.hadoop.yarn.api.records.Container;
|
import org.apache.hadoop.yarn.api.records.Container;
|
||||||
import org.apache.hadoop.yarn.api.records.ContainerState;
|
import org.apache.hadoop.yarn.api.records.ContainerState;
|
||||||
|
import org.apache.hadoop.yarn.api.records.ContainerStatus;
|
||||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
|
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
|
||||||
|
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
|
||||||
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
|
||||||
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
|
||||||
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
|
||||||
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
|
||||||
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.apache.log4j.LogManager;
|
import org.apache.log4j.LogManager;
|
||||||
import org.apache.log4j.Logger;
|
import org.apache.log4j.Logger;
|
||||||
|
@ -167,10 +175,37 @@ public class TestFifoScheduler {
|
||||||
testMinimumAllocation(conf);
|
testMinimumAllocation(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testReconnectedNode() throws Exception {
|
||||||
|
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
|
||||||
|
conf.setQueues("default", new String[] {"default"});
|
||||||
|
conf.setCapacity("default", 100);
|
||||||
|
FifoScheduler fs = new FifoScheduler();
|
||||||
|
fs.reinitialize(conf, null, null);
|
||||||
|
|
||||||
|
RMNode n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1);
|
||||||
|
RMNode n2 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 2);
|
||||||
|
|
||||||
|
fs.handle(new NodeAddedSchedulerEvent(n1));
|
||||||
|
fs.handle(new NodeAddedSchedulerEvent(n2));
|
||||||
|
List<ContainerStatus> emptyList = new ArrayList<ContainerStatus>();
|
||||||
|
fs.handle(new NodeUpdateSchedulerEvent(n1, emptyList, emptyList));
|
||||||
|
Assert.assertEquals(6 * GB, fs.getRootQueueMetrics().getAvailableMB());
|
||||||
|
|
||||||
|
// reconnect n1 with downgraded memory
|
||||||
|
n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 1);
|
||||||
|
fs.handle(new NodeRemovedSchedulerEvent(n1));
|
||||||
|
fs.handle(new NodeAddedSchedulerEvent(n1));
|
||||||
|
fs.handle(new NodeUpdateSchedulerEvent(n1, emptyList, emptyList));
|
||||||
|
|
||||||
|
Assert.assertEquals(4 * GB, fs.getRootQueueMetrics().getAvailableMB());
|
||||||
|
}
|
||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
TestFifoScheduler t = new TestFifoScheduler();
|
TestFifoScheduler t = new TestFifoScheduler();
|
||||||
t.test();
|
t.test();
|
||||||
t.testDefaultMinimumAllocation();
|
t.testDefaultMinimumAllocation();
|
||||||
t.testNonDefaultMinimumAllocation();
|
t.testNonDefaultMinimumAllocation();
|
||||||
|
t.testReconnectedNode();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,12 +31,17 @@ import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||||
import org.apache.hadoop.yarn.api.records.ContainerStatus;
|
import org.apache.hadoop.yarn.api.records.ContainerStatus;
|
||||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||||
|
import org.apache.hadoop.yarn.event.Dispatcher;
|
||||||
|
import org.apache.hadoop.yarn.event.DrainDispatcher;
|
||||||
|
import org.apache.hadoop.yarn.event.EventHandler;
|
||||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||||
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
|
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
|
||||||
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
|
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
|
||||||
import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
|
import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
|
||||||
import org.apache.hadoop.yarn.server.api.records.NodeAction;
|
import org.apache.hadoop.yarn.server.api.records.NodeAction;
|
||||||
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
|
||||||
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
|
||||||
import org.apache.hadoop.yarn.util.Records;
|
import org.apache.hadoop.yarn.util.Records;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -189,7 +194,7 @@ public class TestResourceTrackerService {
|
||||||
conf.set("yarn.resourcemanager.nodes.exclude-path", hostFile
|
conf.set("yarn.resourcemanager.nodes.exclude-path", hostFile
|
||||||
.getAbsolutePath());
|
.getAbsolutePath());
|
||||||
|
|
||||||
MockRM rm = new MockRM(conf);
|
rm = new MockRM(conf);
|
||||||
rm.start();
|
rm.start();
|
||||||
|
|
||||||
MockNM nm1 = rm.registerNode("host1:1234", 5120);
|
MockNM nm1 = rm.registerNode("host1:1234", 5120);
|
||||||
|
@ -223,6 +228,61 @@ public class TestResourceTrackerService {
|
||||||
ClusterMetrics.getMetrics().getUnhealthyNMs());
|
ClusterMetrics.getMetrics().getUnhealthyNMs());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testReconnectNode() throws Exception {
|
||||||
|
final DrainDispatcher dispatcher = new DrainDispatcher();
|
||||||
|
MockRM rm = new MockRM() {
|
||||||
|
@Override
|
||||||
|
protected EventHandler<SchedulerEvent> createSchedulerEventDispatcher() {
|
||||||
|
return new SchedulerEventDispatcher(this.scheduler) {
|
||||||
|
@Override
|
||||||
|
public void handle(SchedulerEvent event) {
|
||||||
|
scheduler.handle(event);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Dispatcher createDispatcher() {
|
||||||
|
return dispatcher;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
rm.start();
|
||||||
|
|
||||||
|
MockNM nm1 = rm.registerNode("host1:1234", 5120);
|
||||||
|
MockNM nm2 = rm.registerNode("host2:5678", 5120);
|
||||||
|
nm1.nodeHeartbeat(true);
|
||||||
|
nm2.nodeHeartbeat(false);
|
||||||
|
checkUnealthyNMCount(rm, nm2, true, 1);
|
||||||
|
final int expectedNMs = ClusterMetrics.getMetrics().getNumActiveNMs();
|
||||||
|
QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics();
|
||||||
|
Assert.assertEquals(5120 + 5120, metrics.getAvailableMB());
|
||||||
|
|
||||||
|
// reconnect of healthy node
|
||||||
|
nm1 = rm.registerNode("host1:1234", 5120);
|
||||||
|
HeartbeatResponse response = nm1.nodeHeartbeat(true);
|
||||||
|
Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction()));
|
||||||
|
dispatcher.await();
|
||||||
|
Assert.assertEquals(expectedNMs, ClusterMetrics.getMetrics().getNumActiveNMs());
|
||||||
|
checkUnealthyNMCount(rm, nm2, true, 1);
|
||||||
|
|
||||||
|
// reconnect of unhealthy node
|
||||||
|
nm2 = rm.registerNode("host2:5678", 5120);
|
||||||
|
response = nm2.nodeHeartbeat(false);
|
||||||
|
Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction()));
|
||||||
|
dispatcher.await();
|
||||||
|
Assert.assertEquals(expectedNMs, ClusterMetrics.getMetrics().getNumActiveNMs());
|
||||||
|
checkUnealthyNMCount(rm, nm2, true, 1);
|
||||||
|
|
||||||
|
// reconnect of node with changed capability
|
||||||
|
nm1 = rm.registerNode("host2:5678", 10240);
|
||||||
|
dispatcher.await();
|
||||||
|
response = nm2.nodeHeartbeat(true);
|
||||||
|
dispatcher.await();
|
||||||
|
Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction()));
|
||||||
|
Assert.assertEquals(5120 + 10240, metrics.getAvailableMB());
|
||||||
|
}
|
||||||
|
|
||||||
private void writeToHostsFile(String... hosts) throws IOException {
|
private void writeToHostsFile(String... hosts) throws IOException {
|
||||||
if (!hostFile.exists()) {
|
if (!hostFile.exists()) {
|
||||||
TEMP_DIR.mkdirs();
|
TEMP_DIR.mkdirs();
|
||||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
|
||||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||||
import org.apache.hadoop.yarn.event.AsyncDispatcher;
|
import org.apache.hadoop.yarn.event.AsyncDispatcher;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.Application;
|
import org.apache.hadoop.yarn.server.resourcemanager.Application;
|
||||||
|
import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.Task;
|
import org.apache.hadoop.yarn.server.resourcemanager.Task;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
|
import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
|
||||||
|
@ -41,12 +42,15 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
|
import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
|
||||||
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
|
||||||
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
public class TestCapacityScheduler {
|
public class TestCapacityScheduler {
|
||||||
private static final Log LOG = LogFactory.getLog(TestCapacityScheduler.class);
|
private static final Log LOG = LogFactory.getLog(TestCapacityScheduler.class);
|
||||||
|
private final int GB = 1024;
|
||||||
|
|
||||||
private static final String A = CapacitySchedulerConfiguration.ROOT + ".a";
|
private static final String A = CapacitySchedulerConfiguration.ROOT + ".a";
|
||||||
private static final String B = CapacitySchedulerConfiguration.ROOT + ".b";
|
private static final String B = CapacitySchedulerConfiguration.ROOT + ".b";
|
||||||
|
@ -97,8 +101,6 @@ public class TestCapacityScheduler {
|
||||||
|
|
||||||
LOG.info("--- START: testCapacityScheduler ---");
|
LOG.info("--- START: testCapacityScheduler ---");
|
||||||
|
|
||||||
final int GB = 1024;
|
|
||||||
|
|
||||||
// Register node1
|
// Register node1
|
||||||
String host_0 = "host_0";
|
String host_0 = "host_0";
|
||||||
org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 =
|
org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 =
|
||||||
|
@ -340,4 +342,27 @@ public class TestCapacityScheduler {
|
||||||
cs.reinitialize(conf, null, null);
|
cs.reinitialize(conf, null, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testReconnectedNode() throws Exception {
|
||||||
|
CapacitySchedulerConfiguration csConf =
|
||||||
|
new CapacitySchedulerConfiguration();
|
||||||
|
setupQueueConfiguration(csConf);
|
||||||
|
CapacityScheduler cs = new CapacityScheduler();
|
||||||
|
cs.reinitialize(csConf, null, null);
|
||||||
|
|
||||||
|
RMNode n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1);
|
||||||
|
RMNode n2 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 2);
|
||||||
|
|
||||||
|
cs.handle(new NodeAddedSchedulerEvent(n1));
|
||||||
|
cs.handle(new NodeAddedSchedulerEvent(n2));
|
||||||
|
|
||||||
|
Assert.assertEquals(6 * GB, cs.getClusterResources().getMemory());
|
||||||
|
|
||||||
|
// reconnect n1 with downgraded memory
|
||||||
|
n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 1);
|
||||||
|
cs.handle(new NodeRemovedSchedulerEvent(n1));
|
||||||
|
cs.handle(new NodeAddedSchedulerEvent(n1));
|
||||||
|
|
||||||
|
Assert.assertEquals(4 * GB, cs.getClusterResources().getMemory());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue