From 62c7e2edfc0cd5baeda71d7c3737398302aea795 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Fri, 24 Feb 2012 17:03:17 +0000 Subject: [PATCH 01/13] HDFS-3003. Remove getHostPortString() from NameNode, replace it with NetUtils.getHostPortString(). Contributed by Brandon Li. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1293338 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 5 ++++- .../main/java/org/apache/hadoop/hdfs/DFSUtil.java | 2 +- .../hadoop/hdfs/server/namenode/BackupNode.java | 10 +++++----- .../server/namenode/FileChecksumServlets.java | 2 +- .../hdfs/server/namenode/FileDataServlet.java | 3 ++- .../hadoop/hdfs/server/namenode/NameNode.java | 15 ++++----------- .../hdfs/server/namenode/NamenodeJspHelper.java | 3 ++- .../org/apache/hadoop/hdfs/MiniDFSCluster.java | 6 +++--- .../hdfs/server/datanode/TestDatanodeJsp.java | 3 ++- .../hdfs/server/namenode/TestBackupNode.java | 3 ++- .../hdfs/server/namenode/TestCheckpoint.java | 3 ++- .../hdfs/server/namenode/TestStreamFile.java | 3 ++- .../hdfs/server/namenode/TestTransferFsImage.java | 5 +++-- .../org/apache/hadoop/hdfs/tools/TestGetConf.java | 4 ++-- 14 files changed, 35 insertions(+), 32 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index bc22d0a1120..713a861973e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -133,7 +133,10 @@ Trunk (unreleased changes) (todd) HDFS-2655. BlockReaderLocal#skip performs unnecessary IO. (Brandon Li - via jitendra) + via jitendra) + + HDFS-3003. Remove getHostPortString() from NameNode, replace it with + NetUtils.getHostPortString(). (Brandon Li via atm) OPTIMIZATIONS HDFS-2477. Optimize computing the diff between a block report and the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 7ec7ca39b44..e44e2481405 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -425,7 +425,7 @@ public class DFSUtil { // Use default address as fall back String defaultAddress; try { - defaultAddress = NameNode.getHostPortString(NameNode.getAddress(conf)); + defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf)); } catch (IllegalArgumentException e) { defaultAddress = null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index 84d1c9f8a64..9cc0b68c1e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -107,13 +107,13 @@ public class BackupNode extends NameNode { @Override // NameNode protected void setRpcServerAddress(Configuration conf, InetSocketAddress addr) { - conf.set(BN_ADDRESS_NAME_KEY, getHostPortString(addr)); + conf.set(BN_ADDRESS_NAME_KEY, NetUtils.getHostPortString(addr)); } @Override // Namenode protected void setRpcServiceServerAddress(Configuration conf, InetSocketAddress addr) { - conf.set(BN_SERVICE_RPC_ADDRESS_KEY, getHostPortString(addr)); + conf.set(BN_SERVICE_RPC_ADDRESS_KEY, NetUtils.getHostPortString(addr)); } @Override // NameNode @@ -125,7 +125,7 @@ public class BackupNode extends NameNode { @Override // NameNode protected void setHttpServerAddress(Configuration conf){ - conf.set(BN_HTTP_ADDRESS_NAME_KEY, getHostPortString(getHttpAddress())); + conf.set(BN_HTTP_ADDRESS_NAME_KEY, NetUtils.getHostPortString(getHttpAddress())); } @Override // NameNode @@ -307,8 +307,8 @@ public class BackupNode extends NameNode { InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true); this.namenode = new NamenodeProtocolTranslatorPB(nnAddress, conf, UserGroupInformation.getCurrentUser()); - this.nnRpcAddress = getHostPortString(nnAddress); - this.nnHttpAddress = getHostPortString(super.getHttpServerAddress(conf)); + this.nnRpcAddress = NetUtils.getHostPortString(nnAddress); + this.nnHttpAddress = NetUtils.getHostPortString(super.getHttpServerAddress(conf)); // get version and id info from the name-node NamespaceInfo nsInfo = null; while(!isStopRequested()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java index 7bd1f271834..321d0398c5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java @@ -71,7 +71,7 @@ public class FileChecksumServlets { String tokenString = ugi.getTokens().iterator().next().encodeToUrlString(); dtParam = JspHelper.getDelegationTokenUrlParam(tokenString); } - String addr = NameNode.getHostPortString(nn.getNameNodeAddress()); + String addr = NetUtils.getHostPortString(nn.getNameNodeAddress()); String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr); return new URL(scheme, hostname, port, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java index 543033b3412..1604ec128bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.common.JspHelper; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ServletUtil; @@ -72,7 +73,7 @@ public class FileDataServlet extends DfsServlet { // Add namenode address to the url params NameNode nn = NameNodeHttpServer.getNameNodeFromContext( getServletContext()); - String addr = NameNode.getHostPortString(nn.getNameNodeAddress()); + String addr = NetUtils.getHostPortString(nn.getNameNodeAddress()); String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr); return new URL(scheme, hostname, port, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 7a51f2ed301..a0f4d4b763d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -236,13 +236,6 @@ public class NameNode { + namenode.getHostName()+portString); } - /** - * Compose a "host:port" string from the address. - */ - public static String getHostPortString(InetSocketAddress addr) { - return addr.getHostName() + ":" + addr.getPort(); - } - // // Common NameNode methods implementation for the active name-node role. // @@ -273,7 +266,7 @@ public class NameNode { */ protected void setRpcServiceServerAddress(Configuration conf, InetSocketAddress serviceRPCAddress) { - setServiceAddress(conf, getHostPortString(serviceRPCAddress)); + setServiceAddress(conf, NetUtils.getHostPortString(serviceRPCAddress)); } protected void setRpcServerAddress(Configuration conf, @@ -293,7 +286,7 @@ public class NameNode { protected void setHttpServerAddress(Configuration conf) { conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, - getHostPortString(getHttpAddress())); + NetUtils.getHostPortString(getHttpAddress())); } protected void loadNamesystem(Configuration conf) throws IOException { @@ -306,8 +299,8 @@ public class NameNode { NamenodeRegistration setRegistration() { nodeRegistration = new NamenodeRegistration( - getHostPortString(rpcServer.getRpcAddress()), - getHostPortString(getHttpAddress()), + NetUtils.getHostPortString(rpcServer.getRpcAddress()), + NetUtils.getHostPortString(getHttpAddress()), getFSImage().getStorage(), getRole()); return nodeRegistration; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 496423d4a67..64b27239889 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.Text; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -401,7 +402,7 @@ class NamenodeJspHelper { nodeToRedirect = nn.getHttpAddress().getHostName(); redirectPort = nn.getHttpAddress().getPort(); } - String addr = NameNode.getHostPortString(nn.getNameNodeAddress()); + String addr = NetUtils.getHostPortString(nn.getNameNodeAddress()); String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName(); redirectLocation = "http://" + fqdn + ":" + redirectPort + "/browseDirectory.jsp?namenodeInfoPort=" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index a701f081825..c2052723062 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -620,10 +620,10 @@ public class MiniDFSCluster { NameNode nn = createNameNode(nnIndex, conf, numDataNodes, manageNameDfsDirs, format, operation, clusterId); conf.set(DFSUtil.getNameServiceIdKey( - DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId), NameNode + DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId), NetUtils .getHostPortString(nn.getNameNodeAddress())); conf.set(DFSUtil.getNameServiceIdKey( - DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NameNode + DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId), NetUtils .getHostPortString(nn.getHttpAddress())); DFSUtil.setGenericConf(conf, nameserviceId, DFS_NAMENODE_HTTP_ADDRESS_KEY); @@ -643,7 +643,7 @@ public class MiniDFSCluster { */ public URI getURI(int nnIndex) { InetSocketAddress addr = nameNodes[nnIndex].nameNode.getNameNodeAddress(); - String hostPort = NameNode.getHostPortString(addr); + String hostPort = NetUtils.getHostPortString(addr); URI uri = null; try { uri = new URI("hdfs://" + hostPort); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java index d8a7f38ba3f..973f05b61c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.ServletUtil; import org.junit.Test; import org.mockito.Mockito; @@ -134,7 +135,7 @@ public class TestDatanodeJsp { Mockito.doReturn("100").when(reqMock).getParameter("chunkSizeToView"); Mockito.doReturn("1").when(reqMock).getParameter("startOffset"); Mockito.doReturn("1024").when(reqMock).getParameter("blockSize"); - Mockito.doReturn(NameNode.getHostPortString(NameNode.getAddress(CONF))) + Mockito.doReturn(NetUtils.getHostPortString(NameNode.getAddress(CONF))) .when(reqMock).getParameter("nnaddr"); Mockito.doReturn(testFile.toString()).when(reqMock).getPathInfo(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java index 350304c0c1a..2d8a115a9a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.Before; @@ -330,7 +331,7 @@ public class TestBackupNode { InetSocketAddress add = backup.getNameNodeAddress(); // Write to BN FileSystem bnFS = FileSystem.get(new Path("hdfs://" - + NameNode.getHostPortString(add)).toUri(), conf); + + NetUtils.getHostPortString(add)).toUri(), conf); boolean canWrite = true; try { TestCheckpoint.writeFile(bnFS, file3, replication); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 52da95725ad..19f481212c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.hdfs.tools.DFSAdmin; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.util.StringUtils; @@ -1432,7 +1433,7 @@ public class TestCheckpoint extends TestCase { .format(true).build(); NamenodeProtocols nn = cluster.getNameNodeRpc(); - String fsName = NameNode.getHostPortString( + String fsName = NetUtils.getHostPortString( cluster.getNameNode().getHttpAddress()); // Make a finalized log on the server side. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java index 989e0b13571..c5184d9897c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.DFSInputStream; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.JspHelper; +import org.apache.hadoop.net.NetUtils; import org.junit.Test; import org.mockito.Mockito; import org.mortbay.jetty.InclusiveByteRange; @@ -263,7 +264,7 @@ public class TestStreamFile { Mockito.doReturn(CONF).when(mockServletContext).getAttribute( JspHelper.CURRENT_CONF); - Mockito.doReturn(NameNode.getHostPortString(NameNode.getAddress(CONF))) + Mockito.doReturn(NetUtils.getHostPortString(NameNode.getAddress(CONF))) .when(mockHttpServletRequest).getParameter("nnaddr"); Mockito.doReturn(testFile.toString()).when(mockHttpServletRequest) .getPathInfo(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java index 1bc340c3081..2ac39151bb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java @@ -27,6 +27,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.StringUtils; import org.junit.Test; import org.mockito.Mockito; @@ -54,7 +55,7 @@ public class TestTransferFsImage { new File("/xxxxx-does-not-exist/blah")); try { - String fsName = NameNode.getHostPortString( + String fsName = NetUtils.getHostPortString( cluster.getNameNode().getHttpAddress()); String id = "getimage=1&txid=0"; @@ -86,7 +87,7 @@ public class TestTransferFsImage { ); try { - String fsName = NameNode.getHostPortString( + String fsName = NetUtils.getHostPortString( cluster.getNameNode().getHttpAddress()); String id = "getimage=1&txid=0"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java index d7be23a97aa..7152e128d4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java @@ -33,10 +33,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.tools.GetConf; import org.apache.hadoop.hdfs.tools.GetConf.Command; import org.apache.hadoop.hdfs.tools.GetConf.CommandHandler; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.ToolRunner; import org.junit.Test; @@ -86,7 +86,7 @@ public class TestGetConf { private String[] toStringArray(List list) { String[] ret = new String[list.size()]; for (int i = 0; i < list.size(); i++) { - ret[i] = NameNode.getHostPortString(list.get(i)); + ret[i] = NetUtils.getHostPortString(list.get(i)); } return ret; } From 0e79131981297c29f866f325d63ffd42a4435f88 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Fri, 24 Feb 2012 19:29:41 +0000 Subject: [PATCH 02/13] HDFS-3009. Remove duplicate code in DFSClient#isLocalAddress by using NetUtils. Contributed by Hari Mankude. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1293390 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 7 +++++-- .../main/java/org/apache/hadoop/hdfs/DFSClient.java | 13 ++----------- 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 713a861973e..5d7c0096764 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -132,12 +132,15 @@ Trunk (unreleased changes) HDFS-2878. Fix TestBlockRecovery and move it back into main test directory. (todd) - HDFS-2655. BlockReaderLocal#skip performs unnecessary IO. (Brandon Li - via jitendra) + HDFS-2655. BlockReaderLocal#skip performs unnecessary IO. + (Brandon Li via jitendra) HDFS-3003. Remove getHostPortString() from NameNode, replace it with NetUtils.getHostPortString(). (Brandon Li via atm) + HDFS-3009. Remove duplicate code in DFSClient#isLocalAddress by using + NetUtils. (Hari Mankude via suresh) + OPTIMIZATIONS HDFS-2477. Optimize computing the diff between a block report and the namenode state. (Tomasz Nykiel via hairong) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index be52b48f116..2fae52a8507 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -548,18 +548,9 @@ public class DFSClient implements java.io.Closeable { } return true; } + + boolean local = NetUtils.isLocalAddress(addr); - // Check if the address is any local or loop back - boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress(); - - // Check if the address is defined on any interface - if (!local) { - try { - local = NetworkInterface.getByInetAddress(addr) != null; - } catch (SocketException e) { - local = false; - } - } if (LOG.isTraceEnabled()) { LOG.trace("Address " + targetAddr + " is local"); } From 78f22bc525679a286285dd5e70a68f9b5a6f6ed7 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 24 Feb 2012 21:14:04 +0000 Subject: [PATCH 03/13] HDFS-3008. Negative caching of local addrs doesn't work. Contributed by Eli Collins git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1293419 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../main/java/org/apache/hadoop/hdfs/DFSClient.java | 10 ++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5d7c0096764..a5398891e33 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -280,6 +280,8 @@ Release 0.23.2 - UNRELEASED dfs.client.block.write.replace-datanode-on-failure.enable should be true. (szetszwo) + HDFS-3008. Negative caching of local addrs doesn't work. (eli) + Release 0.23.1 - 2012-02-17 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 2fae52a8507..491cd7e989a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -542,17 +542,19 @@ public class DFSClient implements java.io.Closeable { private static boolean isLocalAddress(InetSocketAddress targetAddr) { InetAddress addr = targetAddr.getAddress(); Boolean cached = localAddrMap.get(addr.getHostAddress()); - if (cached != null && cached) { + if (cached != null) { if (LOG.isTraceEnabled()) { - LOG.trace("Address " + targetAddr + " is local"); + LOG.trace("Address " + targetAddr + + (cached ? " is local" : " is not local")); } - return true; + return cached; } boolean local = NetUtils.isLocalAddress(addr); if (LOG.isTraceEnabled()) { - LOG.trace("Address " + targetAddr + " is local"); + LOG.trace("Address " + targetAddr + + (local ? " is local" : " is not local")); } localAddrMap.put(addr.getHostAddress(), local); return local; From 969b3809e471fd4fe88fdf85dc0356e6b7aac79e Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Fri, 24 Feb 2012 21:21:20 +0000 Subject: [PATCH 04/13] MAPREDUCE-3866. Fixed the bin/yarn script to not print the command line unnecessarily. (vinodkv) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1293425 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 4 ++++ hadoop-mapreduce-project/hadoop-yarn/bin/yarn | 1 - 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index b37b2e17300..1e1392ac375 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -14,6 +14,7 @@ Trunk (unreleased changes) (Plamen Jeliazkov via shv) IMPROVEMENTS + MAPREDUCE-3787. [Gridmix] Optimize job monitoring and STRESS mode for faster job submission. (amarrk) @@ -107,6 +108,9 @@ Release 0.23.2 - UNRELEASED MAPREDUCE-3877 Add a test to formalise the current state transitions of the yarn lifecycle. (stevel) + MAPREDUCE-3866. Fixed the bin/yarn script to not print the command line + unnecessarily. (vinodkv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn index d7dae8b8d86..fe8006101d0 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn +++ b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn @@ -221,6 +221,5 @@ if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" fi -echo "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $YARN_OPTS -classpath "$CLASSPATH" $CLASS "$@" exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $YARN_OPTS -classpath "$CLASSPATH" $CLASS "$@" fi From c0572656ced07a885f848c1134edd7b1c291d246 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Fri, 24 Feb 2012 21:39:31 +0000 Subject: [PATCH 05/13] MAPREDUCE-3730. Modified RM to allow restarted NMs to be able to join the cluster without waiting for expiry. Contributed by Jason Lowe. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1293436 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../ResourceTrackerService.java | 19 +++--- .../rmnode/RMNodeEventType.java | 1 + .../resourcemanager/rmnode/RMNodeImpl.java | 41 +++++++++++- .../rmnode/RMNodeReconnectEvent.java | 34 ++++++++++ .../scheduler/capacity/CapacityScheduler.java | 5 +- .../scheduler/fifo/FifoScheduler.java | 5 +- .../server/resourcemanager/MockNodes.java | 15 +++-- .../resourcemanager/TestFifoScheduler.java | 35 +++++++++++ .../TestResourceTrackerService.java | 62 ++++++++++++++++++- .../capacity/TestCapacityScheduler.java | 29 ++++++++- 11 files changed, 228 insertions(+), 21 deletions(-) create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeReconnectEvent.java diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 1e1392ac375..54b84812066 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -111,6 +111,9 @@ Release 0.23.2 - UNRELEASED MAPREDUCE-3866. Fixed the bin/yarn script to not print the command line unnecessarily. (vinodkv) + MAPREDUCE-3730. Modified RM to allow restarted NMs to be able to join the + cluster without waiting for expiry. (Jason Lowe via vinodkv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index 75c91aa83f2..d762766efcb 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -51,6 +51,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeReconnectEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent; import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider; import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; @@ -177,17 +178,17 @@ public class ResourceTrackerService extends AbstractService implements RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort, httpPort, resolve(host), capability); - if (this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode) != null) { - LOG.info("Duplicate registration from the node at: " + host - + ", Sending SHUTDOWN Signal to the NodeManager"); - regResponse.setNodeAction(NodeAction.SHUTDOWN); - response.setRegistrationResponse(regResponse); - return response; + RMNode oldNode = this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode); + if (oldNode == null) { + this.rmContext.getDispatcher().getEventHandler().handle( + new RMNodeEvent(nodeId, RMNodeEventType.STARTED)); + } else { + LOG.info("Reconnect from the node at: " + host); + this.nmLivelinessMonitor.unregister(nodeId); + this.rmContext.getDispatcher().getEventHandler().handle( + new RMNodeReconnectEvent(nodeId, rmNode)); } - this.rmContext.getDispatcher().getEventHandler().handle( - new RMNodeEvent(nodeId, RMNodeEventType.STARTED)); - this.nmLivelinessMonitor.register(nodeId); LOG.info("NodeManager from node " + host + "(cmPort: " + cmPort diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java index d5628361013..ef644be7000 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java @@ -28,6 +28,7 @@ public enum RMNodeEventType { // ResourceTrackerService STATUS_UPDATE, REBOOTING, + RECONNECTED, // Source: Application CLEANUP_APP, diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index 9b8892a6dcd..f0384da703b 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -110,9 +110,11 @@ public class RMNodeImpl implements RMNode, EventHandler { RMNodeEventType, RMNodeEvent>(RMNodeState.NEW) - //Transitions from RUNNING state + //Transitions from NEW state .addTransition(RMNodeState.NEW, RMNodeState.RUNNING, RMNodeEventType.STARTED, new AddNodeTransition()) + + //Transitions from RUNNING state .addTransition(RMNodeState.RUNNING, EnumSet.of(RMNodeState.RUNNING, RMNodeState.UNHEALTHY), RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenHealthyTransition()) @@ -129,11 +131,15 @@ public class RMNodeImpl implements RMNode, EventHandler { RMNodeEventType.CLEANUP_APP, new CleanUpAppTransition()) .addTransition(RMNodeState.RUNNING, RMNodeState.RUNNING, RMNodeEventType.CLEANUP_CONTAINER, new CleanUpContainerTransition()) + .addTransition(RMNodeState.RUNNING, RMNodeState.RUNNING, + RMNodeEventType.RECONNECTED, new ReconnectNodeTransition()) //Transitions from UNHEALTHY state .addTransition(RMNodeState.UNHEALTHY, EnumSet.of(RMNodeState.UNHEALTHY, RMNodeState.RUNNING), RMNodeEventType.STATUS_UPDATE, new StatusUpdateWhenUnHealthyTransition()) + .addTransition(RMNodeState.UNHEALTHY, RMNodeState.UNHEALTHY, + RMNodeEventType.RECONNECTED, new ReconnectNodeTransition()) // create the topology tables .installTopology(); @@ -372,6 +378,39 @@ public class RMNodeImpl implements RMNode, EventHandler { } } + public static class ReconnectNodeTransition implements + SingleArcTransition { + + @Override + public void transition(RMNodeImpl rmNode, RMNodeEvent event) { + // Kill containers since node is rejoining. + rmNode.context.getDispatcher().getEventHandler().handle( + new NodeRemovedSchedulerEvent(rmNode)); + + RMNode newNode = ((RMNodeReconnectEvent)event).getReconnectedNode(); + if (rmNode.getTotalCapability().equals(newNode.getTotalCapability()) + && rmNode.getHttpPort() == newNode.getHttpPort()) { + // Reset heartbeat ID since node just restarted. + rmNode.getLastHeartBeatResponse().setResponseId(0); + rmNode.context.getDispatcher().getEventHandler().handle( + new NodeAddedSchedulerEvent(rmNode)); + } else { + // Reconnected node differs, so replace old node and start new node + switch (rmNode.getState()) { + case RUNNING: + ClusterMetrics.getMetrics().decrNumActiveNodes(); + break; + case UNHEALTHY: + ClusterMetrics.getMetrics().decrNumUnhealthyNMs(); + break; + } + rmNode.context.getRMNodes().put(newNode.getNodeID(), newNode); + rmNode.context.getDispatcher().getEventHandler().handle( + new RMNodeEvent(newNode.getNodeID(), RMNodeEventType.STARTED)); + } + } + } + public static class CleanUpAppTransition implements SingleArcTransition { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeReconnectEvent.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeReconnectEvent.java new file mode 100644 index 00000000000..b1fa0ad8c0c --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeReconnectEvent.java @@ -0,0 +1,34 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.resourcemanager.rmnode; + +import org.apache.hadoop.yarn.api.records.NodeId; + +public class RMNodeReconnectEvent extends RMNodeEvent { + private RMNode reconnectedNode; + + public RMNodeReconnectEvent(NodeId nodeId, RMNode newNode) { + super(nodeId, RMNodeEventType.RECONNECTED); + reconnectedNode = newNode; + } + + public RMNode getReconnectedNode() { + return reconnectedNode; + } +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index aa7d23ed91e..0f90f6c4e2c 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -666,7 +666,10 @@ implements ResourceScheduler, CapacitySchedulerContext { private synchronized void removeNode(RMNode nodeInfo) { SchedulerNode node = this.nodes.get(nodeInfo.getNodeID()); - Resources.subtractFrom(clusterResource, nodeInfo.getTotalCapability()); + if (node == null) { + return; + } + Resources.subtractFrom(clusterResource, node.getRMNode().getTotalCapability()); root.updateClusterResource(clusterResource); --numNodeManagers; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index 6fcf1fedd9d..152668318d7 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -731,6 +731,9 @@ public class FifoScheduler implements ResourceScheduler { private synchronized void removeNode(RMNode nodeInfo) { SchedulerNode node = getNode(nodeInfo.getNodeID()); + if (node == null) { + return; + } // Kill running containers for(RMContainer container : node.getRunningContainers()) { containerCompleted(container, @@ -744,7 +747,7 @@ public class FifoScheduler implements ResourceScheduler { this.nodes.remove(nodeInfo.getNodeID()); // Update cluster metrics - Resources.subtractFrom(clusterResource, nodeInfo.getTotalCapability()); + Resources.subtractFrom(clusterResource, node.getRMNode().getTotalCapability()); } @Override diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java index 05b17a367fe..f30883f1bad 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java @@ -19,23 +19,18 @@ package org.apache.hadoop.yarn.server.resourcemanager; import java.util.List; -import java.util.Map; import org.apache.hadoop.net.Node; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.api.records.NodeId; -import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeResponse; import com.google.common.collect.Lists; @@ -195,8 +190,12 @@ public class MockNodes { }; private static RMNode buildRMNode(int rack, final Resource perNode, RMNodeState state, String httpAddr) { + return buildRMNode(rack, perNode, state, httpAddr, NODE_ID++); + } + + private static RMNode buildRMNode(int rack, final Resource perNode, RMNodeState state, String httpAddr, int hostnum) { final String rackName = "rack"+ rack; - final int nid = NODE_ID++; + final int nid = hostnum; final String hostName = "host"+ nid; final int port = 123; final NodeId nodeID = newNodeID(hostName, port); @@ -219,4 +218,8 @@ public class MockNodes { public static RMNode newNodeInfo(int rack, final Resource perNode) { return buildRMNode(rack, perNode, RMNodeState.RUNNING, "localhost:0"); } + + public static RMNode newNodeInfo(int rack, final Resource perNode, int hostnum) { + return buildRMNode(rack, perNode, null, "localhost:0", hostnum); + } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java index 2cca6f09ad8..e995e5153aa 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; +import java.util.ArrayList; import java.util.List; import junit.framework.Assert; @@ -27,10 +28,17 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.api.records.AMResponse; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; import org.apache.log4j.Level; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; @@ -167,10 +175,37 @@ public class TestFifoScheduler { testMinimumAllocation(conf); } + @Test + public void testReconnectedNode() throws Exception { + CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(); + conf.setQueues("default", new String[] {"default"}); + conf.setCapacity("default", 100); + FifoScheduler fs = new FifoScheduler(); + fs.reinitialize(conf, null, null); + + RMNode n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1); + RMNode n2 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 2); + + fs.handle(new NodeAddedSchedulerEvent(n1)); + fs.handle(new NodeAddedSchedulerEvent(n2)); + List emptyList = new ArrayList(); + fs.handle(new NodeUpdateSchedulerEvent(n1, emptyList, emptyList)); + Assert.assertEquals(6 * GB, fs.getRootQueueMetrics().getAvailableMB()); + + // reconnect n1 with downgraded memory + n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 1); + fs.handle(new NodeRemovedSchedulerEvent(n1)); + fs.handle(new NodeAddedSchedulerEvent(n1)); + fs.handle(new NodeUpdateSchedulerEvent(n1, emptyList, emptyList)); + + Assert.assertEquals(4 * GB, fs.getRootQueueMetrics().getAvailableMB()); + } + public static void main(String[] args) throws Exception { TestFifoScheduler t = new TestFifoScheduler(); t.test(); t.testDefaultMinimumAllocation(); t.testNonDefaultMinimumAllocation(); + t.testReconnectedNode(); } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java index 8b3f4a08e90..7826819a112 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java @@ -31,12 +31,17 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.event.Dispatcher; +import org.apache.hadoop.yarn.event.DrainDispatcher; +import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse; import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; import org.apache.hadoop.yarn.server.api.records.NodeAction; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.util.Records; import org.junit.After; import org.junit.Test; @@ -189,7 +194,7 @@ public class TestResourceTrackerService { conf.set("yarn.resourcemanager.nodes.exclude-path", hostFile .getAbsolutePath()); - MockRM rm = new MockRM(conf); + rm = new MockRM(conf); rm.start(); MockNM nm1 = rm.registerNode("host1:1234", 5120); @@ -223,6 +228,61 @@ public class TestResourceTrackerService { ClusterMetrics.getMetrics().getUnhealthyNMs()); } + @Test + public void testReconnectNode() throws Exception { + final DrainDispatcher dispatcher = new DrainDispatcher(); + MockRM rm = new MockRM() { + @Override + protected EventHandler createSchedulerEventDispatcher() { + return new SchedulerEventDispatcher(this.scheduler) { + @Override + public void handle(SchedulerEvent event) { + scheduler.handle(event); + } + }; + } + + @Override + protected Dispatcher createDispatcher() { + return dispatcher; + } + }; + rm.start(); + + MockNM nm1 = rm.registerNode("host1:1234", 5120); + MockNM nm2 = rm.registerNode("host2:5678", 5120); + nm1.nodeHeartbeat(true); + nm2.nodeHeartbeat(false); + checkUnealthyNMCount(rm, nm2, true, 1); + final int expectedNMs = ClusterMetrics.getMetrics().getNumActiveNMs(); + QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics(); + Assert.assertEquals(5120 + 5120, metrics.getAvailableMB()); + + // reconnect of healthy node + nm1 = rm.registerNode("host1:1234", 5120); + HeartbeatResponse response = nm1.nodeHeartbeat(true); + Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction())); + dispatcher.await(); + Assert.assertEquals(expectedNMs, ClusterMetrics.getMetrics().getNumActiveNMs()); + checkUnealthyNMCount(rm, nm2, true, 1); + + // reconnect of unhealthy node + nm2 = rm.registerNode("host2:5678", 5120); + response = nm2.nodeHeartbeat(false); + Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction())); + dispatcher.await(); + Assert.assertEquals(expectedNMs, ClusterMetrics.getMetrics().getNumActiveNMs()); + checkUnealthyNMCount(rm, nm2, true, 1); + + // reconnect of node with changed capability + nm1 = rm.registerNode("host2:5678", 10240); + dispatcher.await(); + response = nm2.nodeHeartbeat(true); + dispatcher.await(); + Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction())); + Assert.assertEquals(5120 + 10240, metrics.getAvailableMB()); + } + private void writeToHostsFile(String... hosts) throws IOException { if (!hostFile.exists()) { TEMP_DIR.mkdirs(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index bcfd09d3c8f..dbe21d1c6ce 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.server.resourcemanager.Application; +import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.Task; import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store; @@ -41,12 +42,15 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory; import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; import org.junit.After; import org.junit.Before; import org.junit.Test; public class TestCapacityScheduler { private static final Log LOG = LogFactory.getLog(TestCapacityScheduler.class); + private final int GB = 1024; private static final String A = CapacitySchedulerConfiguration.ROOT + ".a"; private static final String B = CapacitySchedulerConfiguration.ROOT + ".b"; @@ -97,8 +101,6 @@ public class TestCapacityScheduler { LOG.info("--- START: testCapacityScheduler ---"); - final int GB = 1024; - // Register node1 String host_0 = "host_0"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = @@ -340,4 +342,27 @@ public class TestCapacityScheduler { cs.reinitialize(conf, null, null); } + @Test + public void testReconnectedNode() throws Exception { + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(); + setupQueueConfiguration(csConf); + CapacityScheduler cs = new CapacityScheduler(); + cs.reinitialize(csConf, null, null); + + RMNode n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1); + RMNode n2 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 2); + + cs.handle(new NodeAddedSchedulerEvent(n1)); + cs.handle(new NodeAddedSchedulerEvent(n2)); + + Assert.assertEquals(6 * GB, cs.getClusterResources().getMemory()); + + // reconnect n1 with downgraded memory + n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 1); + cs.handle(new NodeRemovedSchedulerEvent(n1)); + cs.handle(new NodeAddedSchedulerEvent(n1)); + + Assert.assertEquals(4 * GB, cs.getClusterResources().getMemory()); + } } From 582b97c3e75d3e7535a6cdf32a53582e89380490 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Fri, 24 Feb 2012 22:30:47 +0000 Subject: [PATCH 06/13] MAPREDUCE-3904. Job history produced with mapreduce.cluster.acls.enabled false can not be viewed with mapreduce.cluster.acls.enabled true (Jonathon Eagles via tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1293456 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 4 ++++ .../hadoop/mapreduce/v2/app/job/impl/JobImpl.java | 3 +++ .../hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java | 11 +++++++++++ .../apache/hadoop/mapreduce/v2/hs/CompletedJob.java | 3 +++ 4 files changed, 21 insertions(+) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 54b84812066..be03da3b964 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -149,6 +149,10 @@ Release 0.23.2 - UNRELEASED MAPREDUCE-3738. MM can hang during shutdown if AppLogAggregatorImpl thread dies unexpectedly (Jason Lowe via sseth) + + MAPREDUCE-3904 Job history produced with mapreduce.cluster.acls.enabled + false can not be viewed with mapreduce.cluster.acls.enabled true + (Jonathon Eagles via tgraves) Release 0.23.1 - 2012-02-17 diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java index cd357a23da1..0abccc4510c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java @@ -438,6 +438,9 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, public boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) { AccessControlList jobACL = jobACLs.get(jobOperation); + if (jobACL == null) { + return true; + } return aclsManager.checkAccess(callerUGI, jobOperation, username, jobACL); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java index 2461760dd3d..81387a0be40 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java @@ -191,5 +191,16 @@ public class TestJobImpl { null, null, null, true, null, 0, null); Assert.assertTrue(job4.checkAccess(ugi1, JobACL.VIEW_JOB)); Assert.assertTrue(job4.checkAccess(ugi2, JobACL.VIEW_JOB)); + + // Setup configuration access without security enabled + Configuration conf5 = new Configuration(); + conf5.setBoolean(MRConfig.MR_ACLS_ENABLED, true); + conf5.set(MRJobConfig.JOB_ACL_VIEW_JOB, ""); + + // Verify access + JobImpl job5 = new JobImpl(jobId, null, conf5, null, null, null, null, null, + null, null, null, true, null, 0, null); + Assert.assertTrue(job5.checkAccess(ugi1, null)); + Assert.assertTrue(job5.checkAccess(ugi2, null)); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java index d0465d37aef..9584d05dcd7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java @@ -330,6 +330,9 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) { Map jobACLs = jobInfo.getJobACLs(); AccessControlList jobACL = jobACLs.get(jobOperation); + if (jobACL == null) { + return true; + } return aclsMgr.checkAccess(callerUGI, jobOperation, jobInfo.getUsername(), jobACL); } From 4e64d7b447fb106de28d6512462a254240bfaf5c Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Fri, 24 Feb 2012 23:00:17 +0000 Subject: [PATCH 07/13] MAPREDUCE-3918 proc_historyserver no longer in command line arguments for HistoryServer (Jon Eagles via bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1293469 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 ++ hadoop-mapreduce-project/bin/mapred | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index be03da3b964..8bcd0d68e53 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -117,6 +117,8 @@ Release 0.23.2 - UNRELEASED OPTIMIZATIONS BUG FIXES + MAPREDUCE-3918 proc_historyserver no longer in command line arguments for + HistoryServer (Jon Eagles via bobby) MAPREDUCE-3862. Nodemanager can appear to hang on shutdown due to lingering DeletionService threads (Jason Lowe via bobby) diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred index 898e053afdf..eb13e60782d 100755 --- a/hadoop-mapreduce-project/bin/mapred +++ b/hadoop-mapreduce-project/bin/mapred @@ -136,4 +136,4 @@ fi HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}" export CLASSPATH -exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@" +exec "$JAVA" -Dproc_$COMMAND $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@" From 5293e5d1c5e6c4040299cfce849e757514280fe0 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Fri, 24 Feb 2012 23:52:20 +0000 Subject: [PATCH 08/13] HDFS-3002. TestNameNodeMetrics need not wait for metrics update. Contributed by Suresh Srinivas. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1293482 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../namenode/metrics/TestNameNodeMetrics.java | 21 ++----------------- 2 files changed, 5 insertions(+), 19 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a5398891e33..91bbe7b01a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -141,6 +141,9 @@ Trunk (unreleased changes) HDFS-3009. Remove duplicate code in DFSClient#isLocalAddress by using NetUtils. (Hari Mankude via suresh) + HDFS-3002. TestNameNodeMetrics need not wait for metrics update. + (suresh) + OPTIMIZATIONS HDFS-2477. Optimize computing the diff between a block report and the namenode state. (Tomasz Nykiel via hairong) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index 8626584c55f..c993f6c9ae2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -103,12 +103,6 @@ public class TestNameNodeMetrics { DFSTestUtil.createFile(fs, file, fileLen, replicas, rand.nextLong()); } - private void updateMetrics() throws Exception { - // Wait for metrics update (corresponds to dfs.namenode.replication.interval - // for some block related metrics to get updated) - Thread.sleep(1000); - } - private void readFile(FileSystem fileSys,Path name) throws IOException { //Just read file so that getNumBlockLocations are incremented DataInputStream stm = fileSys.open(name); @@ -125,7 +119,6 @@ public class TestNameNodeMetrics { createFile(file, 3200, (short)3); final long blockCount = 32; int blockCapacity = namesystem.getBlockCapacity(); - updateMetrics(); assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS)); MetricsRecordBuilder rb = getMetrics(NN_METRICS); @@ -140,7 +133,6 @@ public class TestNameNodeMetrics { while (threshold < blockCount) { blockCapacity <<= 1; } - updateMetrics(); long filesTotal = file.depth() + 1; // Add 1 for root rb = getMetrics(NS_METRICS); assertGauge("FilesTotal", filesTotal, rb); @@ -150,7 +142,6 @@ public class TestNameNodeMetrics { filesTotal--; // reduce the filecount for deleted file waitForDeletion(); - updateMetrics(); rb = getMetrics(NS_METRICS); assertGauge("FilesTotal", filesTotal, rb); assertGauge("BlocksTotal", 0L, rb); @@ -174,7 +165,7 @@ public class TestNameNodeMetrics { cluster.getNameNode(), file.toString(), 0, 1).get(0); bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0], "TEST"); - updateMetrics(); + Thread.sleep(1000); // Wait for block to be marked corrupt MetricsRecordBuilder rb = getMetrics(NS_METRICS); assertGauge("CorruptBlocks", 1L, rb); assertGauge("PendingReplicationBlocks", 1L, rb); @@ -196,7 +187,6 @@ public class TestNameNodeMetrics { createFile(file, 100, (short)2); long totalBlocks = 1; NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1); - updateMetrics(); MetricsRecordBuilder rb = getMetrics(NS_METRICS); assertGauge("ExcessBlocks", totalBlocks, rb); fs.delete(file, true); @@ -214,7 +204,7 @@ public class TestNameNodeMetrics { cluster.getNameNode(), file.toString(), 0, 1).get(0); bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0], "TEST"); - updateMetrics(); + Thread.sleep(1000); // Wait for block to be marked corrupt MetricsRecordBuilder rb = getMetrics(NS_METRICS); assertGauge("UnderReplicatedBlocks", 1L, rb); assertGauge("MissingBlocks", 1L, rb); @@ -236,7 +226,6 @@ public class TestNameNodeMetrics { Path target = getTestPath("target"); createFile(target, 100, (short)1); fs.rename(src, target, Rename.OVERWRITE); - updateMetrics(); MetricsRecordBuilder rb = getMetrics(NN_METRICS); assertCounter("FilesRenamed", 1L, rb); assertCounter("FilesDeleted", 1L, rb); @@ -264,7 +253,6 @@ public class TestNameNodeMetrics { //Perform create file operation createFile(file1_Path,100,(short)2); - updateMetrics(); //Create file does not change numGetBlockLocations metric //expect numGetBlockLocations = 0 for previous and current interval @@ -273,14 +261,12 @@ public class TestNameNodeMetrics { // Open and read file operation increments GetBlockLocations // Perform read file operation on earlier created file readFile(fs, file1_Path); - updateMetrics(); // Verify read file operation has incremented numGetBlockLocations by 1 assertCounter("GetBlockLocations", 1L, getMetrics(NN_METRICS)); // opening and reading file twice will increment numGetBlockLocations by 2 readFile(fs, file1_Path); readFile(fs, file1_Path); - updateMetrics(); assertCounter("GetBlockLocations", 3L, getMetrics(NN_METRICS)); } @@ -298,7 +284,6 @@ public class TestNameNodeMetrics { assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS)); fs.mkdirs(new Path(TEST_ROOT_DIR_PATH, "/tmp")); - updateMetrics(); assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS)); assertGauge("LastWrittenTransactionId", 2L, getMetrics(NS_METRICS)); @@ -306,7 +291,6 @@ public class TestNameNodeMetrics { assertGauge("TransactionsSinceLastLogRoll", 2L, getMetrics(NS_METRICS)); cluster.getNameNodeRpc().rollEditLog(); - updateMetrics(); assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS)); assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS)); @@ -316,7 +300,6 @@ public class TestNameNodeMetrics { cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER); cluster.getNameNodeRpc().saveNamespace(); cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE); - updateMetrics(); long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime", getMetrics(NS_METRICS)); From dacbeb5f6711b83bd293928b5329f7b846f2e66e Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Sat, 25 Feb 2012 00:15:37 +0000 Subject: [PATCH 09/13] HDFS-3006. In WebHDFS, when the return body is empty, set the Content-Type to application/octet-stream instead of application/json. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1293487 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../web/resources/DatanodeWebHdfsMethods.java | 12 ++++---- .../web/resources/NamenodeWebHdfsMethods.java | 28 +++++++++---------- .../web/TestWebHdfsFileSystemContract.java | 3 ++ 4 files changed, 26 insertions(+), 20 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 91bbe7b01a0..fc74f3cbdf1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -285,6 +285,9 @@ Release 0.23.2 - UNRELEASED HDFS-3008. Negative caching of local addrs doesn't work. (eli) + HDFS-3006. In WebHDFS, when the return body is empty, set the Content-Type + to application/octet-stream instead of application/json. (szetszwo) + Release 0.23.1 - 2012-02-17 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java index 093cd9c863e..44ef5273386 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java @@ -117,7 +117,7 @@ public class DatanodeWebHdfsMethods { @PUT @Path("/") @Consumes({"*/*"}) - @Produces({MediaType.APPLICATION_JSON}) + @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON}) public Response putRoot( final InputStream in, @Context final UserGroupInformation ugi, @@ -147,7 +147,7 @@ public class DatanodeWebHdfsMethods { @PUT @Path("{" + UriFsPathParam.NAME + ":.*}") @Consumes({"*/*"}) - @Produces({MediaType.APPLICATION_JSON}) + @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON}) public Response put( final InputStream in, @Context final UserGroupInformation ugi, @@ -209,7 +209,7 @@ public class DatanodeWebHdfsMethods { final InetSocketAddress nnHttpAddr = NameNode.getHttpAddress(conf); final URI uri = new URI(WebHdfsFileSystem.SCHEME, null, nnHttpAddr.getHostName(), nnHttpAddr.getPort(), fullpath, null, null); - return Response.created(uri).type(MediaType.APPLICATION_JSON).build(); + return Response.created(uri).type(MediaType.APPLICATION_OCTET_STREAM).build(); } default: throw new UnsupportedOperationException(op + " is not supported"); @@ -222,7 +222,7 @@ public class DatanodeWebHdfsMethods { @POST @Path("/") @Consumes({"*/*"}) - @Produces({MediaType.APPLICATION_JSON}) + @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON}) public Response postRoot( final InputStream in, @Context final UserGroupInformation ugi, @@ -243,7 +243,7 @@ public class DatanodeWebHdfsMethods { @POST @Path("{" + UriFsPathParam.NAME + ":.*}") @Consumes({"*/*"}) - @Produces({MediaType.APPLICATION_JSON}) + @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON}) public Response post( final InputStream in, @Context final UserGroupInformation ugi, @@ -287,7 +287,7 @@ public class DatanodeWebHdfsMethods { IOUtils.cleanup(LOG, out); IOUtils.cleanup(LOG, dfsclient); } - return Response.ok().type(MediaType.APPLICATION_JSON).build(); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); } default: throw new UnsupportedOperationException(op + " is not supported"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 44e5e13bc8a..e041b034082 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -215,7 +215,7 @@ public class NamenodeWebHdfsMethods { @PUT @Path("/") @Consumes({"*/*"}) - @Produces({MediaType.APPLICATION_JSON}) + @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON}) public Response putRoot( @Context final UserGroupInformation ugi, @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) @@ -263,7 +263,7 @@ public class NamenodeWebHdfsMethods { @PUT @Path("{" + UriFsPathParam.NAME + ":.*}") @Consumes({"*/*"}) - @Produces({MediaType.APPLICATION_JSON}) + @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON}) public Response put( @Context final UserGroupInformation ugi, @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) @@ -324,7 +324,7 @@ public class NamenodeWebHdfsMethods { final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser, fullpath, op.getValue(), -1L, permission, overwrite, bufferSize, replication, blockSize); - return Response.temporaryRedirect(uri).build(); + return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build(); } case MKDIRS: { @@ -336,7 +336,7 @@ public class NamenodeWebHdfsMethods { { np.createSymlink(destination.getValue(), fullpath, PermissionParam.getDefaultFsPermission(), createParent.getValue()); - return Response.ok().type(MediaType.APPLICATION_JSON).build(); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); } case RENAME: { @@ -348,7 +348,7 @@ public class NamenodeWebHdfsMethods { } else { np.rename2(fullpath, destination.getValue(), s.toArray(new Options.Rename[s.size()])); - return Response.ok().type(MediaType.APPLICATION_JSON).build(); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); } } case SETREPLICATION: @@ -364,17 +364,17 @@ public class NamenodeWebHdfsMethods { } np.setOwner(fullpath, owner.getValue(), group.getValue()); - return Response.ok().type(MediaType.APPLICATION_JSON).build(); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); } case SETPERMISSION: { np.setPermission(fullpath, permission.getFsPermission()); - return Response.ok().type(MediaType.APPLICATION_JSON).build(); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); } case SETTIMES: { np.setTimes(fullpath, modificationTime.getValue(), accessTime.getValue()); - return Response.ok().type(MediaType.APPLICATION_JSON).build(); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); } case RENEWDELEGATIONTOKEN: { @@ -389,7 +389,7 @@ public class NamenodeWebHdfsMethods { final Token token = new Token(); token.decodeFromUrlString(delegationTokenArgument.getValue()); np.cancelDelegationToken(token); - return Response.ok().type(MediaType.APPLICATION_JSON).build(); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); } default: throw new UnsupportedOperationException(op + " is not supported"); @@ -406,7 +406,7 @@ public class NamenodeWebHdfsMethods { @POST @Path("/") @Consumes({"*/*"}) - @Produces({MediaType.APPLICATION_JSON}) + @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON}) public Response postRoot( @Context final UserGroupInformation ugi, @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) @@ -427,7 +427,7 @@ public class NamenodeWebHdfsMethods { @POST @Path("{" + UriFsPathParam.NAME + ":.*}") @Consumes({"*/*"}) - @Produces({MediaType.APPLICATION_JSON}) + @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON}) public Response post( @Context final UserGroupInformation ugi, @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) @@ -459,7 +459,7 @@ public class NamenodeWebHdfsMethods { { final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser, fullpath, op.getValue(), -1L, bufferSize); - return Response.temporaryRedirect(uri).build(); + return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build(); } default: throw new UnsupportedOperationException(op + " is not supported"); @@ -542,7 +542,7 @@ public class NamenodeWebHdfsMethods { { final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser, fullpath, op.getValue(), offset.getValue(), offset, length, bufferSize); - return Response.temporaryRedirect(uri).build(); + return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build(); } case GET_BLOCK_LOCATIONS: { @@ -578,7 +578,7 @@ public class NamenodeWebHdfsMethods { { final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser, fullpath, op.getValue(), -1L); - return Response.temporaryRedirect(uri).build(); + return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build(); } case GETDELEGATIONTOKEN: { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java index 0c2372c4ed5..b551dd1927e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java @@ -27,6 +27,7 @@ import java.net.URL; import java.util.Map; import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.core.MediaType; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; @@ -314,6 +315,8 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest { conn.setRequestMethod(op.getType().toString()); conn.connect(); assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode()); + assertEquals(0, conn.getContentLength()); + assertEquals(MediaType.APPLICATION_OCTET_STREAM, conn.getContentType()); assertEquals((short)0755, webhdfs.getFileStatus(dir).getPermission().toShort()); conn.disconnect(); } From 1de3fa8653d3bfbea3772f2a81a069aad6ef6aa4 Mon Sep 17 00:00:00 2001 From: Arun Murthy Date: Sat, 25 Feb 2012 01:37:07 +0000 Subject: [PATCH 10/13] Preparing for 0.23.3 release. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1293506 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 15 +++++++++++++++ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 15 +++++++++++++++ hadoop-mapreduce-project/CHANGES.txt | 15 ++++++++++++++- 3 files changed, 44 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 3aea617ed12..694cebe0012 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -3,6 +3,7 @@ Hadoop Change Log Trunk (unreleased changes) INCOMPATIBLE CHANGES + HADOOP-7920. Remove Avro Rpc. (suresh) NEW FEATURES @@ -160,8 +161,22 @@ Trunk (unreleased changes) HADOOP-7761. Improve the performance of raw comparisons. (todd) +Release 0.23.3 - UNRELEASED + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + OPTIMIZATIONS + + BUG FIXES + Release 0.23.2 - UNRELEASED + INCOMPATIBLE CHANGES + NEW FEATURES IMPROVEMENTS diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index fc74f3cbdf1..da55e2e93e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1,10 +1,13 @@ Hadoop HDFS Change Log Trunk (unreleased changes) + INCOMPATIBLE CHANGES + HDFS-2676. Remove Avro RPC. (suresh) NEW FEATURES + HDFS-395. DFS Scalability: Incremental block reports. (Tomasz Nykiel via hairong) @@ -221,6 +224,18 @@ Trunk (unreleased changes) HDFS-2968. Protocol translator for BlockRecoveryCommand broken when multiple blocks need recovery. (todd) +Release 0.23.3 - UNRELEASED + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + OPTIMIZATIONS + + BUG FIXES + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 8bcd0d68e53..f54150973a6 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -93,10 +93,23 @@ Trunk (unreleased changes) MAPREDUCE-3818. Fixed broken compilation in TestSubmitJob after the patch for HDFS-2895. (Suresh Srinivas via vinodkv) -Release 0.23.2 - UNRELEASED +Release 0.23.3 - UNRELEASED + + INCOMPATIBLE CHANGES NEW FEATURES + + IMPROVEMENTS + OPTIMIZATIONS + + BUG FIXES + +Release 0.23.2 - UNRELEASED + + INCOMPATIBLE CHANGES + + NEW FEATURES IMPROVEMENTS MAPREDUCE-3849. Change TokenCache's reading of the binary token file From 7a082ec2bd29d04abe0dc86349d163d6e03250eb Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Sat, 25 Feb 2012 02:03:59 +0000 Subject: [PATCH 11/13] MAPREDUCE-2793. Corrected AppIDs, JobIDs, TaskAttemptIDs to be of correct format on the web pages. Contributed by Bikas Saha. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1293517 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../v2/app/webapp/AMWebServices.java | 26 ++++++++- .../hadoop/mapreduce/v2/app/MockJobs.java | 14 +++++ .../app/webapp/TestAMWebServicesAttempts.java | 18 +++--- .../v2/app/webapp/TestAMWebServicesJobs.java | 24 ++++---- .../v2/app/webapp/TestAMWebServicesTasks.java | 23 ++++---- .../v2/jobhistory/JobHistoryUtils.java | 8 +-- .../hadoop/mapreduce/v2/util/MRApps.java | 58 ++++--------------- .../hadoop/mapreduce/v2/util/TestMRApps.java | 42 +++++++------- .../hadoop/mapreduce/TaskAttemptID.java | 12 +++- .../org/apache/hadoop/mapreduce/TaskID.java | 11 +++- .../hs/webapp/TestHsWebServicesAttempts.java | 22 ++++--- .../v2/hs/webapp/TestHsWebServicesJobs.java | 20 ++++--- .../hs/webapp/TestHsWebServicesJobsQuery.java | 57 +++++++++--------- .../v2/hs/webapp/TestHsWebServicesTasks.java | 23 ++++---- 15 files changed, 193 insertions(+), 168 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index f54150973a6..ac7fdaae94c 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -127,6 +127,9 @@ Release 0.23.2 - UNRELEASED MAPREDUCE-3730. Modified RM to allow restarted NMs to be able to join the cluster without waiting for expiry. (Jason Lowe via vinodkv) + MAPREDUCE-2793. Corrected AppIDs, JobIDs, TaskAttemptIDs to be of correct + format on the web pages. (Bikas Saha via vinodkv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java index 01c5c95e177..f62cba0ccec 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java @@ -99,6 +99,14 @@ public class AMWebServices { try { jobId = MRApps.toJobID(jid); } catch (YarnException e) { + // TODO: after MAPREDUCE-2793 YarnException is probably not expected here + // anymore but keeping it for now just in case other stuff starts failing. + // Also, the webservice should ideally return BadRequest (HTTP:400) when + // the id is malformed instead of NotFound (HTTP:404). The webserver on + // top of which AMWebServices is built seems to automatically do that for + // unhandled exceptions + throw new NotFoundException(e.getMessage()); + } catch (IllegalArgumentException e) { throw new NotFoundException(e.getMessage()); } if (jobId == null) { @@ -121,10 +129,18 @@ public class AMWebServices { try { taskID = MRApps.toTaskID(tid); } catch (YarnException e) { + // TODO: after MAPREDUCE-2793 YarnException is probably not expected here + // anymore but keeping it for now just in case other stuff starts failing. + // Also, the webservice should ideally return BadRequest (HTTP:400) when + // the id is malformed instead of NotFound (HTTP:404). The webserver on + // top of which AMWebServices is built seems to automatically do that for + // unhandled exceptions throw new NotFoundException(e.getMessage()); } catch (NumberFormatException ne) { throw new NotFoundException(ne.getMessage()); - } + } catch (IllegalArgumentException e) { + throw new NotFoundException(e.getMessage()); + } if (taskID == null) { throw new NotFoundException("taskid " + tid + " not found or invalid"); } @@ -146,9 +162,17 @@ public class AMWebServices { try { attemptId = MRApps.toTaskAttemptID(attId); } catch (YarnException e) { + // TODO: after MAPREDUCE-2793 YarnException is probably not expected here + // anymore but keeping it for now just in case other stuff starts failing. + // Also, the webservice should ideally return BadRequest (HTTP:400) when + // the id is malformed instead of NotFound (HTTP:404). The webserver on + // top of which AMWebServices is built seems to automatically do that for + // unhandled exceptions throw new NotFoundException(e.getMessage()); } catch (NumberFormatException ne) { throw new NotFoundException(ne.getMessage()); + } catch (IllegalArgumentException e) { + throw new NotFoundException(e.getMessage()); } if (attemptId == null) { throw new NotFoundException("task attempt id " + attId diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java index c8e20f64577..cfecb3268e9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java @@ -106,6 +106,20 @@ public class MockJobs extends MockApps { return newAppName(); } + /** + * Create numJobs in a map with jobs having appId==jobId + */ + public static Map newJobs(int numJobs, int numTasksPerJob, + int numAttemptsPerTask) { + Map map = Maps.newHashMap(); + for (int j = 0; j < numJobs; ++j) { + ApplicationId appID = MockJobs.newAppID(j); + Job job = newJob(appID, j, numTasksPerJob, numAttemptsPerTask); + map.put(job.getID(), job); + } + return map; + } + public static Map newJobs(ApplicationId appID, int numJobsPerApp, int numTasksPerJob, int numAttemptsPerTask) { Map map = Maps.newHashMap(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java index e33a50671c8..9be01d5bb61 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java @@ -396,36 +396,36 @@ public class TestAMWebServicesAttempts extends JerseyTest { public void testTaskAttemptIdBogus() throws JSONException, Exception { testTaskAttemptIdErrorGeneric("bogusid", - "java.lang.Exception: Error parsing attempt ID: bogusid"); + "java.lang.Exception: TaskAttemptId string : bogusid is not properly formed"); } @Test public void testTaskAttemptIdNonExist() throws JSONException, Exception { testTaskAttemptIdErrorGeneric( - "attempt_12345_0_0_r_1_0", - "java.lang.Exception: Error getting info on task attempt id attempt_12345_0_0_r_1_0"); + "attempt_0_12345_m_000000_0", + "java.lang.Exception: Error getting info on task attempt id attempt_0_12345_m_000000_0"); } @Test public void testTaskAttemptIdInvalid() throws JSONException, Exception { - testTaskAttemptIdErrorGeneric("attempt_12345_0_0_d_1_0", - "java.lang.Exception: Unknown task symbol: d"); + testTaskAttemptIdErrorGeneric("attempt_0_12345_d_000000_0", + "java.lang.Exception: Bad TaskType identifier. TaskAttemptId string : attempt_0_12345_d_000000_0 is not properly formed."); } @Test public void testTaskAttemptIdInvalid2() throws JSONException, Exception { - testTaskAttemptIdErrorGeneric("attempt_12345_0_r_1_0", - "java.lang.Exception: For input string: \"r\""); + testTaskAttemptIdErrorGeneric("attempt_12345_m_000000_0", + "java.lang.Exception: TaskAttemptId string : attempt_12345_m_000000_0 is not properly formed"); } @Test public void testTaskAttemptIdInvalid3() throws JSONException, Exception { - testTaskAttemptIdErrorGeneric("attempt_12345_0_0_r_1", - "java.lang.Exception: Error parsing attempt ID: attempt_12345_0_0_r_1"); + testTaskAttemptIdErrorGeneric("attempt_0_12345_m_000000", + "java.lang.Exception: TaskAttemptId string : attempt_0_12345_m_000000 is not properly formed"); } private void testTaskAttemptIdErrorGeneric(String attid, String error) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobs.java index a0846e4ac35..1ede67276df 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobs.java @@ -320,7 +320,7 @@ public class TestAMWebServicesJobs extends JerseyTest { try { r.path("ws").path("v1").path("mapreduce").path("jobs") - .path("job_1234_1_2").get(JSONObject.class); + .path("job_0_1234").get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch (UniformInterfaceException ue) { ClientResponse response = ue.getResponse(); @@ -333,7 +333,7 @@ public class TestAMWebServicesJobs extends JerseyTest { String type = exception.getString("exception"); String classname = exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message", - "java.lang.Exception: job, job_1234_1_2, is not found", message); + "java.lang.Exception: job, job_0_1234, is not found", message); WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type); WebServicesTestUtils.checkStringMatch("exception classname", @@ -351,7 +351,7 @@ public class TestAMWebServicesJobs extends JerseyTest { fail("should have thrown exception on invalid uri"); } catch (UniformInterfaceException ue) { ClientResponse response = ue.getResponse(); - assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus()); + assertEquals(Status.NOT_FOUND, response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject msg = response.getEntity(JSONObject.class); JSONObject exception = msg.getJSONObject("RemoteException"); @@ -374,7 +374,7 @@ public class TestAMWebServicesJobs extends JerseyTest { fail("should have thrown exception on invalid uri"); } catch (UniformInterfaceException ue) { ClientResponse response = ue.getResponse(); - assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus()); + assertEquals(Status.NOT_FOUND, response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject msg = response.getEntity(JSONObject.class); JSONObject exception = msg.getJSONObject("RemoteException"); @@ -397,7 +397,7 @@ public class TestAMWebServicesJobs extends JerseyTest { fail("should have thrown exception on invalid uri"); } catch (UniformInterfaceException ue) { ClientResponse response = ue.getResponse(); - assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus()); + assertEquals(Status.NOT_FOUND, response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType()); String msg = response.getEntity(String.class); System.out.println(msg); @@ -418,11 +418,12 @@ public class TestAMWebServicesJobs extends JerseyTest { private void verifyJobIdInvalid(String message, String type, String classname) { WebServicesTestUtils.checkStringMatch("exception message", - "For input string: \"foo\"", message); + "java.lang.Exception: JobId string : job_foo is not properly formed", + message); WebServicesTestUtils.checkStringMatch("exception type", - "NumberFormatException", type); + "NotFoundException", type); WebServicesTestUtils.checkStringMatch("exception classname", - "java.lang.NumberFormatException", classname); + "org.apache.hadoop.yarn.webapp.NotFoundException", classname); } @Test @@ -443,8 +444,11 @@ public class TestAMWebServicesJobs extends JerseyTest { String message = exception.getString("message"); String type = exception.getString("exception"); String classname = exception.getString("javaClassName"); - WebServicesTestUtils.checkStringMatch("exception message", - "java.lang.Exception: Error parsing job ID: bogusfoo", message); + WebServicesTestUtils + .checkStringMatch( + "exception message", + "java.lang.Exception: JobId string : bogusfoo is not properly formed", + message); WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type); WebServicesTestUtils.checkStringMatch("exception classname", diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesTasks.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesTasks.java index e3fdd932cf7..0fdcba8c135 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesTasks.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesTasks.java @@ -424,7 +424,8 @@ public class TestAMWebServicesTasks extends JerseyTest { String type = exception.getString("exception"); String classname = exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message", - "java.lang.Exception: Error parsing task ID: bogustaskid", message); + "java.lang.Exception: TaskId string : " + + "bogustaskid is not properly formed", message); WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type); WebServicesTestUtils.checkStringMatch("exception classname", @@ -439,7 +440,7 @@ public class TestAMWebServicesTasks extends JerseyTest { Map jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); - String tid = "task_1234_0_0_m_0"; + String tid = "task_0_0000_m_000000"; try { r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId) .path("tasks").path(tid).get(JSONObject.class); @@ -455,7 +456,7 @@ public class TestAMWebServicesTasks extends JerseyTest { String type = exception.getString("exception"); String classname = exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message", - "java.lang.Exception: task not found with id task_1234_0_0_m_0", + "java.lang.Exception: task not found with id task_0_0000_m_000000", message); WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type); @@ -471,7 +472,7 @@ public class TestAMWebServicesTasks extends JerseyTest { Map jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); - String tid = "task_1234_0_0_d_0"; + String tid = "task_0_0000_d_000000"; try { r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId) .path("tasks").path(tid).get(JSONObject.class); @@ -487,7 +488,8 @@ public class TestAMWebServicesTasks extends JerseyTest { String type = exception.getString("exception"); String classname = exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message", - "java.lang.Exception: Unknown task symbol: d", message); + "java.lang.Exception: Bad TaskType identifier. TaskId string : " + + "task_0_0000_d_000000 is not properly formed.", message); WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type); WebServicesTestUtils.checkStringMatch("exception classname", @@ -502,7 +504,7 @@ public class TestAMWebServicesTasks extends JerseyTest { Map jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); - String tid = "task_1234_0_m_0"; + String tid = "task_0_m_000000"; try { r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId) .path("tasks").path(tid).get(JSONObject.class); @@ -518,7 +520,8 @@ public class TestAMWebServicesTasks extends JerseyTest { String type = exception.getString("exception"); String classname = exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message", - "java.lang.Exception: For input string: \"m\"", message); + "java.lang.Exception: TaskId string : " + + "task_0_m_000000 is not properly formed", message); WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type); WebServicesTestUtils.checkStringMatch("exception classname", @@ -533,7 +536,7 @@ public class TestAMWebServicesTasks extends JerseyTest { Map jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); - String tid = "task_1234_0_0_m"; + String tid = "task_0_0000_m"; try { r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId) .path("tasks").path(tid).get(JSONObject.class); @@ -549,8 +552,8 @@ public class TestAMWebServicesTasks extends JerseyTest { String type = exception.getString("exception"); String classname = exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message", - "java.lang.Exception: Error parsing task ID: task_1234_0_0_m", - message); + "java.lang.Exception: TaskId string : " + + "task_0_0000_m is not properly formed", message); WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type); WebServicesTestUtils.checkStringMatch("exception classname", diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java index c1da2fe6a94..a42f9ebe320 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java @@ -506,11 +506,9 @@ public class JobHistoryUtils { sb.append(address.getHostName()); } sb.append(":").append(address.getPort()); - sb.append("/jobhistory/job/"); // TODO This will change when the history server - // understands apps. - // TOOD Use JobId toString once UI stops using _id_id - sb.append("job_").append(appId.getClusterTimestamp()); - sb.append("_").append(appId.getId()).append("_").append(appId.getId()); + sb.append("/jobhistory/job/"); + JobID jobId = TypeConverter.fromYarn(appId); + sb.append(jobId.toString()); return sb.toString(); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java index 572ebde5515..38497951aba 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java @@ -18,9 +18,6 @@ package org.apache.hadoop.mapreduce.v2.util; -import static org.apache.hadoop.yarn.util.StringHelper._join; -import static org.apache.hadoop.yarn.util.StringHelper._split; - import java.io.BufferedReader; import java.io.File; import java.io.IOException; @@ -30,7 +27,6 @@ import java.net.URI; import java.net.URL; import java.util.Arrays; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; @@ -39,7 +35,11 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.MRJobConfig; +import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.TaskID; +import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.filecache.DistributedCache; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; @@ -50,12 +50,10 @@ import org.apache.hadoop.yarn.ContainerLogAppender; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.ApplicationConstants; -import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.util.Apps; import org.apache.hadoop.yarn.util.BuilderUtils; @@ -65,64 +63,28 @@ import org.apache.hadoop.yarn.util.BuilderUtils; @Private @Unstable public class MRApps extends Apps { - public static final String JOB = "job"; - public static final String TASK = "task"; - public static final String ATTEMPT = "attempt"; - public static String toString(JobId jid) { - return _join(JOB, jid.getAppId().getClusterTimestamp(), jid.getAppId().getId(), jid.getId()); + return jid.toString(); } public static JobId toJobID(String jid) { - Iterator it = _split(jid).iterator(); - return toJobID(JOB, jid, it); - } - - // mostly useful for parsing task/attempt id like strings - public static JobId toJobID(String prefix, String s, Iterator it) { - ApplicationId appId = toAppID(prefix, s, it); - shouldHaveNext(prefix, s, it); - JobId jobId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class); - jobId.setAppId(appId); - jobId.setId(Integer.parseInt(it.next())); - return jobId; + return TypeConverter.toYarn(JobID.forName(jid)); } public static String toString(TaskId tid) { - return _join("task", tid.getJobId().getAppId().getClusterTimestamp(), tid.getJobId().getAppId().getId(), - tid.getJobId().getId(), taskSymbol(tid.getTaskType()), tid.getId()); + return tid.toString(); } public static TaskId toTaskID(String tid) { - Iterator it = _split(tid).iterator(); - return toTaskID(TASK, tid, it); - } - - public static TaskId toTaskID(String prefix, String s, Iterator it) { - JobId jid = toJobID(prefix, s, it); - shouldHaveNext(prefix, s, it); - TaskId tid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class); - tid.setJobId(jid); - tid.setTaskType(taskType(it.next())); - shouldHaveNext(prefix, s, it); - tid.setId(Integer.parseInt(it.next())); - return tid; + return TypeConverter.toYarn(TaskID.forName(tid)); } public static String toString(TaskAttemptId taid) { - return _join("attempt", taid.getTaskId().getJobId().getAppId().getClusterTimestamp(), - taid.getTaskId().getJobId().getAppId().getId(), taid.getTaskId().getJobId().getId(), - taskSymbol(taid.getTaskId().getTaskType()), taid.getTaskId().getId(), taid.getId()); + return taid.toString(); } public static TaskAttemptId toTaskAttemptID(String taid) { - Iterator it = _split(taid).iterator(); - TaskId tid = toTaskID(ATTEMPT, taid, it); - shouldHaveNext(ATTEMPT, taid, it); - TaskAttemptId taId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskAttemptId.class); - taId.setTaskId(tid); - taId.setId(Integer.parseInt(it.next())); - return taId; + return TypeConverter.toYarn(TaskAttemptID.forName(taid)); } public static String taskSymbol(TaskType type) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java index 715b6c626d9..94ce417396e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java @@ -43,18 +43,18 @@ public class TestMRApps { @Test public void testJobIDtoString() { JobId jid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class); jid.setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class)); - assertEquals("job_0_0_0", MRApps.toString(jid)); + assertEquals("job_0_0000", MRApps.toString(jid)); } @Test public void testToJobID() { - JobId jid = MRApps.toJobID("job_1_1_1"); + JobId jid = MRApps.toJobID("job_1_1"); assertEquals(1, jid.getAppId().getClusterTimestamp()); assertEquals(1, jid.getAppId().getId()); - assertEquals(1, jid.getId()); + assertEquals(1, jid.getId()); // tests against some proto.id and not a job.id field } - @Test(expected=YarnException.class) public void testJobIDShort() { - MRApps.toJobID("job_0_0"); + @Test(expected=IllegalArgumentException.class) public void testJobIDShort() { + MRApps.toJobID("job_0_0_0"); } //TODO_get.set @@ -68,29 +68,29 @@ public class TestMRApps { type = TaskType.REDUCE; System.err.println(type); System.err.println(tid.getTaskType()); - assertEquals("task_0_0_0_m_0", MRApps.toString(tid)); + assertEquals("task_0_0000_m_000000", MRApps.toString(tid)); tid.setTaskType(TaskType.REDUCE); - assertEquals("task_0_0_0_r_0", MRApps.toString(tid)); + assertEquals("task_0_0000_r_000000", MRApps.toString(tid)); } @Test public void testToTaskID() { - TaskId tid = MRApps.toTaskID("task_1_2_3_r_4"); + TaskId tid = MRApps.toTaskID("task_1_2_r_3"); assertEquals(1, tid.getJobId().getAppId().getClusterTimestamp()); assertEquals(2, tid.getJobId().getAppId().getId()); - assertEquals(3, tid.getJobId().getId()); + assertEquals(2, tid.getJobId().getId()); assertEquals(TaskType.REDUCE, tid.getTaskType()); - assertEquals(4, tid.getId()); + assertEquals(3, tid.getId()); - tid = MRApps.toTaskID("task_1_2_3_m_4"); + tid = MRApps.toTaskID("task_1_2_m_3"); assertEquals(TaskType.MAP, tid.getTaskType()); } - @Test(expected=YarnException.class) public void testTaskIDShort() { - MRApps.toTaskID("task_0_0_0_m"); + @Test(expected=IllegalArgumentException.class) public void testTaskIDShort() { + MRApps.toTaskID("task_0_0000_m"); } - @Test(expected=YarnException.class) public void testTaskIDBadType() { - MRApps.toTaskID("task_0_0_0_x_0"); + @Test(expected=IllegalArgumentException.class) public void testTaskIDBadType() { + MRApps.toTaskID("task_0_0000_x_000000"); } //TODO_get.set @@ -100,19 +100,19 @@ public class TestMRApps { taid.getTaskId().setTaskType(TaskType.MAP); taid.getTaskId().setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class)); taid.getTaskId().getJobId().setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class)); - assertEquals("attempt_0_0_0_m_0_0", MRApps.toString(taid)); + assertEquals("attempt_0_0000_m_000000_0", MRApps.toString(taid)); } @Test public void testToTaskAttemptID() { - TaskAttemptId taid = MRApps.toTaskAttemptID("attempt_0_1_2_m_3_4"); + TaskAttemptId taid = MRApps.toTaskAttemptID("attempt_0_1_m_2_3"); assertEquals(0, taid.getTaskId().getJobId().getAppId().getClusterTimestamp()); assertEquals(1, taid.getTaskId().getJobId().getAppId().getId()); - assertEquals(2, taid.getTaskId().getJobId().getId()); - assertEquals(3, taid.getTaskId().getId()); - assertEquals(4, taid.getId()); + assertEquals(1, taid.getTaskId().getJobId().getId()); + assertEquals(2, taid.getTaskId().getId()); + assertEquals(3, taid.getId()); } - @Test(expected=YarnException.class) public void testTaskAttemptIDShort() { + @Test(expected=IllegalArgumentException.class) public void testTaskAttemptIDShort() { MRApps.toTaskAttemptID("attempt_0_0_0_m_0"); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskAttemptID.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskAttemptID.java index 70fcf1024c9..f2467f02a3b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskAttemptID.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskAttemptID.java @@ -159,6 +159,7 @@ public class TaskAttemptID extends org.apache.hadoop.mapred.ID { ) throws IllegalArgumentException { if(str == null) return null; + String exceptionMsg = null; try { String[] parts = str.split(Character.toString(SEPARATOR)); if(parts.length == 6) { @@ -171,14 +172,19 @@ public class TaskAttemptID extends org.apache.hadoop.mapred.ID { Integer.parseInt(parts[2]), t, Integer.parseInt(parts[4]), Integer.parseInt(parts[5])); - } else throw new Exception(); + } else + exceptionMsg = "Bad TaskType identifier. TaskAttemptId string : " + + str + " is not properly formed."; } } } catch (Exception ex) { //fall below } - throw new IllegalArgumentException("TaskAttemptId string : " + str - + " is not properly formed"); + if (exceptionMsg == null) { + exceptionMsg = "TaskAttemptId string : " + str + + " is not properly formed"; + } + throw new IllegalArgumentException(exceptionMsg); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java index c98ec904fa3..3dc2babcbe6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskID.java @@ -184,6 +184,7 @@ public class TaskID extends org.apache.hadoop.mapred.ID { throws IllegalArgumentException { if(str == null) return null; + String exceptionMsg = null; try { String[] parts = str.split("_"); if(parts.length == 5) { @@ -196,13 +197,17 @@ public class TaskID extends org.apache.hadoop.mapred.ID { Integer.parseInt(parts[2]), t, Integer.parseInt(parts[4])); - } else throw new Exception(); + } else + exceptionMsg = "Bad TaskType identifier. TaskId string : " + str + + " is not properly formed."; } } }catch (Exception ex) {//fall below } - throw new IllegalArgumentException("TaskId string : " + str - + " is not properly formed"); + if (exceptionMsg == null) { + exceptionMsg = "TaskId string : " + str + " is not properly formed"; + } + throw new IllegalArgumentException(exceptionMsg); } /** * Gets the character representing the {@link TaskType} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAttempts.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAttempts.java index 7ba200fcc53..94a9f7a809e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAttempts.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAttempts.java @@ -408,36 +408,40 @@ public class TestHsWebServicesAttempts extends JerseyTest { public void testTaskAttemptIdBogus() throws JSONException, Exception { testTaskAttemptIdErrorGeneric("bogusid", - "java.lang.Exception: Error parsing attempt ID: bogusid"); + "java.lang.Exception: TaskAttemptId string : " + + "bogusid is not properly formed"); } @Test public void testTaskAttemptIdNonExist() throws JSONException, Exception { testTaskAttemptIdErrorGeneric( - "attempt_12345_0_0_r_1_0", - "java.lang.Exception: Error getting info on task attempt id attempt_12345_0_0_r_1_0"); + "attempt_0_1234_m_000000_0", + "java.lang.Exception: Error getting info on task attempt id attempt_0_1234_m_000000_0"); } @Test public void testTaskAttemptIdInvalid() throws JSONException, Exception { - testTaskAttemptIdErrorGeneric("attempt_12345_0_0_d_1_0", - "java.lang.Exception: Unknown task symbol: d"); + testTaskAttemptIdErrorGeneric("attempt_0_1234_d_000000_0", + "java.lang.Exception: Bad TaskType identifier. TaskAttemptId string : " + + "attempt_0_1234_d_000000_0 is not properly formed."); } @Test public void testTaskAttemptIdInvalid2() throws JSONException, Exception { - testTaskAttemptIdErrorGeneric("attempt_12345_0_r_1_0", - "java.lang.Exception: For input string: \"r\""); + testTaskAttemptIdErrorGeneric("attempt_1234_m_000000_0", + "java.lang.Exception: TaskAttemptId string : " + + "attempt_1234_m_000000_0 is not properly formed"); } @Test public void testTaskAttemptIdInvalid3() throws JSONException, Exception { - testTaskAttemptIdErrorGeneric("attempt_12345_0_0_r_1", - "java.lang.Exception: Error parsing attempt ID: attempt_12345_0_0_r_1"); + testTaskAttemptIdErrorGeneric("attempt_0_1234_m_000000", + "java.lang.Exception: TaskAttemptId string : " + + "attempt_0_1234_m_000000 is not properly formed"); } private void testTaskAttemptIdErrorGeneric(String attid, String error) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobs.java index fd811809567..ec7df9be552 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobs.java @@ -367,7 +367,7 @@ public class TestHsWebServicesJobs extends JerseyTest { try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs") - .path("job_1234_1_2").get(JSONObject.class); + .path("job_0_1234").get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch (UniformInterfaceException ue) { ClientResponse response = ue.getResponse(); @@ -380,7 +380,7 @@ public class TestHsWebServicesJobs extends JerseyTest { String type = exception.getString("exception"); String classname = exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message", - "java.lang.Exception: job, job_1234_1_2, is not found", message); + "java.lang.Exception: job, job_0_1234, is not found", message); WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type); WebServicesTestUtils.checkStringMatch("exception classname", @@ -399,7 +399,7 @@ public class TestHsWebServicesJobs extends JerseyTest { fail("should have thrown exception on invalid uri"); } catch (UniformInterfaceException ue) { ClientResponse response = ue.getResponse(); - assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus()); + assertEquals(Status.NOT_FOUND, response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject msg = response.getEntity(JSONObject.class); JSONObject exception = msg.getJSONObject("RemoteException"); @@ -423,7 +423,7 @@ public class TestHsWebServicesJobs extends JerseyTest { fail("should have thrown exception on invalid uri"); } catch (UniformInterfaceException ue) { ClientResponse response = ue.getResponse(); - assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus()); + assertEquals(Status.NOT_FOUND, response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject msg = response.getEntity(JSONObject.class); JSONObject exception = msg.getJSONObject("RemoteException"); @@ -447,7 +447,7 @@ public class TestHsWebServicesJobs extends JerseyTest { fail("should have thrown exception on invalid uri"); } catch (UniformInterfaceException ue) { ClientResponse response = ue.getResponse(); - assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus()); + assertEquals(Status.NOT_FOUND, response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType()); String msg = response.getEntity(String.class); System.out.println(msg); @@ -468,11 +468,12 @@ public class TestHsWebServicesJobs extends JerseyTest { private void verifyJobIdInvalid(String message, String type, String classname) { WebServicesTestUtils.checkStringMatch("exception message", - "For input string: \"foo\"", message); + "java.lang.Exception: JobId string : job_foo is not properly formed", + message); WebServicesTestUtils.checkStringMatch("exception type", - "NumberFormatException", type); + "NotFoundException", type); WebServicesTestUtils.checkStringMatch("exception classname", - "java.lang.NumberFormatException", classname); + "org.apache.hadoop.yarn.webapp.NotFoundException", classname); } @Test @@ -494,7 +495,8 @@ public class TestHsWebServicesJobs extends JerseyTest { String type = exception.getString("exception"); String classname = exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message", - "java.lang.Exception: Error parsing job ID: bogusfoo", message); + "java.lang.Exception: JobId string : " + + "bogusfoo is not properly formed", message); WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type); WebServicesTestUtils.checkStringMatch("exception classname", diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java index 74af1f6d767..ed58a6f3b77 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java @@ -72,30 +72,26 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { private static HsWebApp webApp; static class TestAppContext implements AppContext { - final ApplicationAttemptId appAttemptID; - final ApplicationId appID; final String user = MockJobs.newUserName(); final Map jobs; final long startTime = System.currentTimeMillis(); - TestAppContext(int appid, int numJobs, int numTasks, int numAttempts) { - appID = MockJobs.newAppID(appid); - appAttemptID = MockJobs.newAppAttemptID(appID, 0); - jobs = MockJobs.newJobs(appID, numJobs, numTasks, numAttempts); + TestAppContext(int numJobs, int numTasks, int numAttempts) { + jobs = MockJobs.newJobs(numJobs, numTasks, numAttempts); } TestAppContext() { - this(0, 3, 2, 1); + this(3, 2, 1); } @Override public ApplicationAttemptId getApplicationAttemptId() { - return appAttemptID; + return null; } @Override public ApplicationId getApplicationID() { - return appID; + return null; } @Override @@ -177,7 +173,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { .contextPath("jersey-guice-filter").servletPath("/").build()); } - @Test + //@Test public void testJobsQueryUserNone() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history") @@ -191,6 +187,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { @Test public void testJobsQueryUser() throws JSONException, Exception { + System.out.println("###test start"); WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history") .path("mapreduce").path("jobs").queryParam("user", "mock") @@ -207,7 +204,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { VerifyJobsUtils.verifyHsJob(info, job); } - @Test + //@Test public void testJobsQueryLimit() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history") @@ -222,7 +219,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { assertEquals("incorrect number of elements", 2, arr.length()); } - @Test + //@Test public void testJobsQueryLimitInvalid() throws JSONException, Exception { WebResource r = resource(); @@ -246,7 +243,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - @Test + //@Test public void testJobsQueryQueue() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history") @@ -260,7 +257,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { assertEquals("incorrect number of elements", 3, arr.length()); } - @Test + //@Test public void testJobsQueryQueueNonExist() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history") @@ -272,7 +269,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs")); } - @Test + //@Test public void testJobsQueryStartTimeEnd() throws JSONException, Exception { WebResource r = resource(); // the mockJobs start time is the current time - some random amount @@ -289,7 +286,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { assertEquals("incorrect number of elements", 3, arr.length()); } - @Test + //@Test public void testJobsQueryStartTimeBegin() throws JSONException, Exception { WebResource r = resource(); // the mockJobs start time is the current time - some random amount @@ -304,7 +301,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs")); } - @Test + //@Test public void testJobsQueryStartTimeBeginEnd() throws JSONException, Exception { WebResource r = resource(); Map jobsMap = appContext.getAllJobs(); @@ -332,7 +329,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { assertEquals("incorrect number of elements", size - 1, arr.length()); } - @Test + //@Test public void testJobsQueryStartTimeBeginEndInvalid() throws JSONException, Exception { WebResource r = resource(); @@ -361,7 +358,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - @Test + //@Test public void testJobsQueryStartTimeInvalidformat() throws JSONException, Exception { WebResource r = resource(); @@ -387,7 +384,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - @Test + //@Test public void testJobsQueryStartTimeEndInvalidformat() throws JSONException, Exception { WebResource r = resource(); @@ -413,7 +410,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - @Test + //@Test public void testJobsQueryStartTimeNegative() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history") @@ -438,7 +435,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - @Test + //@Test public void testJobsQueryStartTimeEndNegative() throws JSONException, Exception { WebResource r = resource(); @@ -462,7 +459,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - @Test + //@Test public void testJobsQueryFinishTimeEndNegative() throws JSONException, Exception { WebResource r = resource(); @@ -486,7 +483,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - @Test + //@Test public void testJobsQueryFinishTimeBeginNegative() throws JSONException, Exception { WebResource r = resource(); @@ -511,7 +508,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - @Test + //@Test public void testJobsQueryFinishTimeBeginEndInvalid() throws JSONException, Exception { WebResource r = resource(); @@ -540,7 +537,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - @Test + //@Test public void testJobsQueryFinishTimeInvalidformat() throws JSONException, Exception { WebResource r = resource(); @@ -566,7 +563,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - @Test + //@Test public void testJobsQueryFinishTimeEndInvalidformat() throws JSONException, Exception { WebResource r = resource(); @@ -592,7 +589,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - @Test + //@Test public void testJobsQueryFinishTimeBegin() throws JSONException, Exception { WebResource r = resource(); // the mockJobs finish time is the current time + some random amount @@ -609,7 +606,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { assertEquals("incorrect number of elements", 3, arr.length()); } - @Test + //@Test public void testJobsQueryFinishTimeEnd() throws JSONException, Exception { WebResource r = resource(); // the mockJobs finish time is the current time + some random amount @@ -624,7 +621,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs")); } - @Test + //@Test public void testJobsQueryFinishTimeBeginEnd() throws JSONException, Exception { WebResource r = resource(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesTasks.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesTasks.java index b0780aff5a1..3ada5bee7d9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesTasks.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesTasks.java @@ -435,7 +435,8 @@ public class TestHsWebServicesTasks extends JerseyTest { String type = exception.getString("exception"); String classname = exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message", - "java.lang.Exception: Error parsing task ID: bogustaskid", message); + "java.lang.Exception: TaskId string : " + + "bogustaskid is not properly formed", message); WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type); WebServicesTestUtils.checkStringMatch("exception classname", @@ -450,7 +451,7 @@ public class TestHsWebServicesTasks extends JerseyTest { Map jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); - String tid = "task_1234_0_0_m_0"; + String tid = "task_0_0000_m_000000"; try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs") .path(jobId).path("tasks").path(tid).get(JSONObject.class); @@ -466,7 +467,7 @@ public class TestHsWebServicesTasks extends JerseyTest { String type = exception.getString("exception"); String classname = exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message", - "java.lang.Exception: task not found with id task_1234_0_0_m_0", + "java.lang.Exception: task not found with id task_0_0000_m_000000", message); WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type); @@ -482,7 +483,7 @@ public class TestHsWebServicesTasks extends JerseyTest { Map jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); - String tid = "task_1234_0_0_d_0"; + String tid = "task_0_0000_d_000000"; try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs") .path(jobId).path("tasks").path(tid).get(JSONObject.class); @@ -498,7 +499,8 @@ public class TestHsWebServicesTasks extends JerseyTest { String type = exception.getString("exception"); String classname = exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message", - "java.lang.Exception: Unknown task symbol: d", message); + "java.lang.Exception: Bad TaskType identifier. TaskId string : " + + "task_0_0000_d_000000 is not properly formed.", message); WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type); WebServicesTestUtils.checkStringMatch("exception classname", @@ -513,7 +515,7 @@ public class TestHsWebServicesTasks extends JerseyTest { Map jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); - String tid = "task_1234_0_m_0"; + String tid = "task_0000_m_000000"; try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs") .path(jobId).path("tasks").path(tid).get(JSONObject.class); @@ -529,7 +531,8 @@ public class TestHsWebServicesTasks extends JerseyTest { String type = exception.getString("exception"); String classname = exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message", - "java.lang.Exception: For input string: \"m\"", message); + "java.lang.Exception: TaskId string : " + + "task_0000_m_000000 is not properly formed", message); WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type); WebServicesTestUtils.checkStringMatch("exception classname", @@ -544,7 +547,7 @@ public class TestHsWebServicesTasks extends JerseyTest { Map jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); - String tid = "task_1234_0_0_m"; + String tid = "task_0_0000_m"; try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs") .path(jobId).path("tasks").path(tid).get(JSONObject.class); @@ -560,8 +563,8 @@ public class TestHsWebServicesTasks extends JerseyTest { String type = exception.getString("exception"); String classname = exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message", - "java.lang.Exception: Error parsing task ID: task_1234_0_0_m", - message); + "java.lang.Exception: TaskId string : " + + "task_0_0000_m is not properly formed", message); WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type); WebServicesTestUtils.checkStringMatch("exception classname", From d83672d71c44c74b1f2997f8c9dc9a25c9fd5daf Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Sat, 25 Feb 2012 02:30:55 +0000 Subject: [PATCH 12/13] MAPREDUCE-2793. Reneabling commented out test from previous comment. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1293521 13f79535-47bb-0310-9956-ffa450edef68 --- .../hs/webapp/TestHsWebServicesJobsQuery.java | 43 +++++++++---------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java index ed58a6f3b77..7376798e4c3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java @@ -173,7 +173,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { .contextPath("jersey-guice-filter").servletPath("/").build()); } - //@Test + @Test public void testJobsQueryUserNone() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history") @@ -187,7 +187,6 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { @Test public void testJobsQueryUser() throws JSONException, Exception { - System.out.println("###test start"); WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history") .path("mapreduce").path("jobs").queryParam("user", "mock") @@ -204,7 +203,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { VerifyJobsUtils.verifyHsJob(info, job); } - //@Test + @Test public void testJobsQueryLimit() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history") @@ -219,7 +218,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { assertEquals("incorrect number of elements", 2, arr.length()); } - //@Test + @Test public void testJobsQueryLimitInvalid() throws JSONException, Exception { WebResource r = resource(); @@ -243,7 +242,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - //@Test + @Test public void testJobsQueryQueue() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history") @@ -257,7 +256,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { assertEquals("incorrect number of elements", 3, arr.length()); } - //@Test + @Test public void testJobsQueryQueueNonExist() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history") @@ -269,7 +268,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs")); } - //@Test + @Test public void testJobsQueryStartTimeEnd() throws JSONException, Exception { WebResource r = resource(); // the mockJobs start time is the current time - some random amount @@ -286,7 +285,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { assertEquals("incorrect number of elements", 3, arr.length()); } - //@Test + @Test public void testJobsQueryStartTimeBegin() throws JSONException, Exception { WebResource r = resource(); // the mockJobs start time is the current time - some random amount @@ -301,7 +300,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs")); } - //@Test + @Test public void testJobsQueryStartTimeBeginEnd() throws JSONException, Exception { WebResource r = resource(); Map jobsMap = appContext.getAllJobs(); @@ -329,7 +328,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { assertEquals("incorrect number of elements", size - 1, arr.length()); } - //@Test + @Test public void testJobsQueryStartTimeBeginEndInvalid() throws JSONException, Exception { WebResource r = resource(); @@ -358,7 +357,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - //@Test + @Test public void testJobsQueryStartTimeInvalidformat() throws JSONException, Exception { WebResource r = resource(); @@ -384,7 +383,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - //@Test + @Test public void testJobsQueryStartTimeEndInvalidformat() throws JSONException, Exception { WebResource r = resource(); @@ -410,7 +409,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - //@Test + @Test public void testJobsQueryStartTimeNegative() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history") @@ -435,7 +434,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - //@Test + @Test public void testJobsQueryStartTimeEndNegative() throws JSONException, Exception { WebResource r = resource(); @@ -459,7 +458,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - //@Test + @Test public void testJobsQueryFinishTimeEndNegative() throws JSONException, Exception { WebResource r = resource(); @@ -483,7 +482,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - //@Test + @Test public void testJobsQueryFinishTimeBeginNegative() throws JSONException, Exception { WebResource r = resource(); @@ -508,7 +507,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - //@Test + @Test public void testJobsQueryFinishTimeBeginEndInvalid() throws JSONException, Exception { WebResource r = resource(); @@ -537,7 +536,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - //@Test + @Test public void testJobsQueryFinishTimeInvalidformat() throws JSONException, Exception { WebResource r = resource(); @@ -563,7 +562,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - //@Test + @Test public void testJobsQueryFinishTimeEndInvalidformat() throws JSONException, Exception { WebResource r = resource(); @@ -589,7 +588,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } - //@Test + @Test public void testJobsQueryFinishTimeBegin() throws JSONException, Exception { WebResource r = resource(); // the mockJobs finish time is the current time + some random amount @@ -606,7 +605,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { assertEquals("incorrect number of elements", 3, arr.length()); } - //@Test + @Test public void testJobsQueryFinishTimeEnd() throws JSONException, Exception { WebResource r = resource(); // the mockJobs finish time is the current time + some random amount @@ -621,7 +620,7 @@ public class TestHsWebServicesJobsQuery extends JerseyTest { assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs")); } - //@Test + @Test public void testJobsQueryFinishTimeBeginEnd() throws JSONException, Exception { WebResource r = resource(); From b57260f848da5cfc6b03c871987ed34d8bfda9c7 Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Sat, 25 Feb 2012 22:14:53 +0000 Subject: [PATCH 13/13] HDFS-2978. The NameNode should expose name dir statuses via JMX. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1293707 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 +- .../hdfs/server/namenode/FSNamesystem.java | 26 ++++++++++ .../hdfs/server/namenode/NameNodeMXBean.java | 8 ++++ .../server/namenode/TestNameNodeMXBean.java | 48 +++++++++++++++++++ 4 files changed, 85 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index da55e2e93e8..37912e30ec7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -228,7 +228,9 @@ Release 0.23.3 - UNRELEASED INCOMPATIBLE CHANGES - NEW FEATURES + NEW FEATURES + + HDFS-2978. The NameNode should expose name dir statuses via JMX. (atm) IMPROVEMENTS diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index f846295f0e6..40f173405a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -141,6 +141,8 @@ import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType; +import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.common.Util; @@ -4496,6 +4498,30 @@ public class FSNamesystem implements Namesystem, FSClusterStats, public String getBlockPoolId() { return blockPoolId; } + + @Override // NameNodeMXBean + public String getNameDirStatuses() { + Map> statusMap = + new HashMap>(); + + Map activeDirs = new HashMap(); + for (Iterator it + = getFSImage().getStorage().dirIterator(); it.hasNext();) { + StorageDirectory st = it.next(); + activeDirs.put(st.getRoot(), st.getStorageDirType()); + } + statusMap.put("active", activeDirs); + + List removedStorageDirs + = getFSImage().getStorage().getRemovedStorageDirs(); + Map failedDirs = new HashMap(); + for (StorageDirectory st : removedStorageDirs) { + failedDirs.put(st.getRoot(), st.getStorageDirType()); + } + statusMap.put("failed", failedDirs); + + return JSON.toString(statusMap); + } /** @return the block manager. */ public BlockManager getBlockManager() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java index 6e1d8e7cc3f..1fb8869f5f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java @@ -166,4 +166,12 @@ public interface NameNodeMXBean { * @return the block pool id */ public String getBlockPoolId(); + + /** + * Get status information about the directories storing image and edits logs + * of the NN. + * + * @return the name dir status information, as a JSON string. + */ + public String getNameDirStatuses(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 17016640024..66470421450 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -17,23 +17,33 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.*; + +import java.io.File; import java.lang.management.ManagementFactory; +import java.net.URI; +import java.util.Collection; +import java.util.Map; import javax.management.MBeanServer; import javax.management.ObjectName; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.util.VersionInfo; import org.junit.Test; +import org.mortbay.util.ajax.JSON; + import junit.framework.Assert; /** * Class for testing {@link NameNodeMXBean} implementation */ public class TestNameNodeMXBean { + @SuppressWarnings({ "unchecked", "deprecation" }) @Test public void testNameNodeMXBeanInfo() throws Exception { Configuration conf = new Configuration(); @@ -88,8 +98,46 @@ public class TestNameNodeMXBean { String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName, "DeadNodes")); Assert.assertEquals(fsn.getDeadNodes(), deadnodeinfo); + // get attribute NameDirStatuses + String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName, + "NameDirStatuses")); + Assert.assertEquals(fsn.getNameDirStatuses(), nameDirStatuses); + Map> statusMap = + (Map>) JSON.parse(nameDirStatuses); + Collection nameDirUris = cluster.getNameDirs(0); + for (URI nameDirUri : nameDirUris) { + File nameDir = new File(nameDirUri); + System.out.println("Checking for the presence of " + nameDir + + " in active name dirs."); + assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath())); + } + assertEquals(2, statusMap.get("active").size()); + assertEquals(0, statusMap.get("failed").size()); + + // This will cause the first dir to fail. + File failedNameDir = new File(nameDirUris.toArray(new URI[0])[0]); + assertEquals(0, FileUtil.chmod(failedNameDir.getAbsolutePath(), "000")); + cluster.getNameNodeRpc().rollEditLog(); + + nameDirStatuses = (String) (mbs.getAttribute(mxbeanName, + "NameDirStatuses")); + statusMap = (Map>) JSON.parse(nameDirStatuses); + for (URI nameDirUri : nameDirUris) { + File nameDir = new File(nameDirUri); + String expectedStatus = + nameDir.equals(failedNameDir) ? "failed" : "active"; + System.out.println("Checking for the presence of " + nameDir + + " in " + expectedStatus + " name dirs."); + assertTrue(statusMap.get(expectedStatus).containsKey( + nameDir.getAbsolutePath())); + } + assertEquals(1, statusMap.get("active").size()); + assertEquals(1, statusMap.get("failed").size()); } finally { if (cluster != null) { + for (URI dir : cluster.getNameDirs(0)) { + FileUtil.chmod(new File(dir).toString(), "700"); + } cluster.shutdown(); } }