From d511d704d2c71063129b625ec832faa0c0c43c79 Mon Sep 17 00:00:00 2001
From: Jitendra Nath Pandey
Date: Sat, 4 Feb 2012 07:02:30 +0000
Subject: [PATCH 01/27] Merged r1240460 from trunk for HDFS-2785.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1240461 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../hadoop/hdfs/web/WebHdfsFileSystem.java | 19 +++++++++----------
2 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d85e9d23636..7ba0131955f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -104,6 +104,9 @@ Release 0.23.1 - UNRELEASED
HDFS-2784. Update hftp and hdfs for host-based token support.
(Kihwal Lee via jitendra)
+ HDFS-2785. Update webhdfs and httpfs for host-based token support.
+ (Robert Joseph Evans via jitendra)
+
OPTIMIZATIONS
HDFS-2130. Switch default checksum to CRC32C. (todd)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 3a802065167..c64dfb14e8a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -141,6 +141,7 @@ public class WebHdfsFileSystem extends FileSystem
private final UserGroupInformation ugi;
private InetSocketAddress nnAddr;
+ private URI uri;
private Token> delegationToken;
private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
private Path workingDir;
@@ -158,7 +159,11 @@ public class WebHdfsFileSystem extends FileSystem
) throws IOException {
super.initialize(uri, conf);
setConf(conf);
-
+ try {
+ this.uri = new URI(uri.getScheme(), uri.getAuthority(), null, null, null);
+ } catch (URISyntaxException e) {
+ throw new IllegalArgumentException(e);
+ }
this.nnAddr = NetUtils.createSocketAddr(uri.toString());
this.workingDir = getHomeDirectory();
@@ -203,12 +208,7 @@ public class WebHdfsFileSystem extends FileSystem
@Override
public URI getUri() {
- try {
- return new URI(SCHEME, null, nnAddr.getHostName(), nnAddr.getPort(),
- null, null, null);
- } catch (URISyntaxException e) {
- return null;
- }
+ return this.uri;
}
/** @return the home directory. */
@@ -810,8 +810,7 @@ public class WebHdfsFileSystem extends FileSystem
final Token> token, final Configuration conf
) throws IOException, InterruptedException, URISyntaxException {
- final InetSocketAddress nnAddr = NetUtils.createSocketAddr(
- token.getService().toString());
+ final InetSocketAddress nnAddr = SecurityUtil.getTokenServiceAddr(token);
final URI uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, nnAddr);
return (WebHdfsFileSystem)FileSystem.get(uri, conf);
}
@@ -821,7 +820,7 @@ public class WebHdfsFileSystem extends FileSystem
) throws IOException, InterruptedException {
final UserGroupInformation ugi = UserGroupInformation.getLoginUser();
// update the kerberos credentials, if they are coming from a keytab
- ugi.checkTGTAndReloginFromKeytab();
+ ugi.reloginFromKeytab();
try {
WebHdfsFileSystem webhdfs = getWebHdfs(token, conf);
From 43d921b0ad9e5e4409d23b653f6e5d0791d79557 Mon Sep 17 00:00:00 2001
From: Mahadev Konar
Date: Sat, 4 Feb 2012 18:45:17 +0000
Subject: [PATCH 02/27] MAPREDUCE-3791. can't build site in
hadoop-yarn-server-common. (mahadev) - Merging r1240587 from trunk
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1240588 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../hadoop-yarn/hadoop-yarn-common/pom.xml | 11 +++++++++++
.../hadoop-yarn-server-common/pom.xml | 1 -
hadoop-project/src/site/site.xml | 2 +-
4 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 1c16716864e..4a934ee4724 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -641,6 +641,9 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3803. Fix broken build of raid contrib due to HDFS-2864.
(Ravi Prakash via suresh)
+ MAPREDUCE-3791. can't build site in hadoop-yarn-server-common.
+ (mahadev)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 116eadb477d..fefedea5404 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -70,6 +70,17 @@
run
+
+ pre-site
+
+ run
+
+
+
+
+
+
+
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index ebaf899f7b1..e05e6ddbb36 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -64,7 +64,6 @@
-
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index df7afbb05f8..ce5a4e7320a 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -90,7 +90,7 @@
From 845fb480c6b11503c33ef51ccfc31b86657782e7 Mon Sep 17 00:00:00 2001
From: Mahadev Konar
Date: Sat, 4 Feb 2012 18:57:20 +0000
Subject: [PATCH 03/27] MAPREDUCE-3723. TestAMWebServicesJobs &
TestHSWebServicesJobs incorrectly asserting tests (Bhallamudi Venkata Siva
Kamesh via mahadev) - Merging r1240590 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1240591 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 4 ++++
.../hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobs.java | 2 +-
.../hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobs.java | 2 +-
3 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 4a934ee4724..24591fb0fa2 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -644,6 +644,10 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3791. can't build site in hadoop-yarn-server-common.
(mahadev)
+ MAPREDUCE-3723. TestAMWebServicesJobs & TestHSWebServicesJobs
+ incorrectly asserting tests (Bhallamudi Venkata Siva Kamesh
+ via mahadev)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobs.java
index 0f22b657505..a0846e4ac35 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobs.java
@@ -777,7 +777,7 @@ public class TestAMWebServicesJobs extends JerseyTest {
assertTrue("name not set", (name != null && !name.isEmpty()));
JSONArray counters = counterGroup.getJSONArray("counter");
for (int j = 0; j < counters.length(); j++) {
- JSONObject counter = counters.getJSONObject(i);
+ JSONObject counter = counters.getJSONObject(j);
String counterName = counter.getString("name");
assertTrue("counter name not set",
(counterName != null && !counterName.isEmpty()));
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobs.java
index 24926045b17..fd811809567 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobs.java
@@ -617,7 +617,7 @@ public class TestHsWebServicesJobs extends JerseyTest {
assertTrue("name not set", (name != null && !name.isEmpty()));
JSONArray counters = counterGroup.getJSONArray("counter");
for (int j = 0; j < counters.length(); j++) {
- JSONObject counter = counters.getJSONObject(i);
+ JSONObject counter = counters.getJSONObject(j);
String counterName = counter.getString("name");
assertTrue("counter name not set",
(counterName != null && !counterName.isEmpty()));
From 7fcab49694763c60c1c48dc9d5becca4b90b1e8c Mon Sep 17 00:00:00 2001
From: Mahadev Konar
Date: Sat, 4 Feb 2012 19:35:35 +0000
Subject: [PATCH 04/27] MAPREDUCE-3795. "job -status" command line output is
malformed. (vinodkv via mahadev) - Merging r1240593 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1240594 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../java/org/apache/hadoop/mapreduce/Job.java | 4 ++--
.../mapreduce/TestJobMonitorAndPrint.java | 20 +++++++++++++------
3 files changed, 19 insertions(+), 8 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 24591fb0fa2..eae3f68d36d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -648,6 +648,9 @@ Release 0.23.1 - Unreleased
incorrectly asserting tests (Bhallamudi Venkata Siva Kamesh
via mahadev)
+ MAPREDUCE-3795. "job -status" command line output is malformed.
+ (vinodkv via mahadev)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
index ba6b07e8ac9..c502d4cb230 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
@@ -472,8 +472,8 @@ public class Job extends JobContextImpl implements JobContext {
sb.append("Job Tracking URL : ").append(status.getTrackingUrl());
sb.append("\n");
sb.append("Uber job : ").append(status.isUber()).append("\n");
- sb.append("Number of maps: ").append(numMaps);
- sb.append("Number of reduces: ").append(numReduces);
+ sb.append("Number of maps: ").append(numMaps).append("\n");
+ sb.append("Number of reduces: ").append(numReduces).append("\n");
sb.append("map() completion: ");
sb.append(status.getMapProgress()).append("\n");
sb.append("reduce() completion: ");
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
index 7121620906e..1b533e76c6c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
@@ -21,7 +21,9 @@ package org.apache.hadoop.mapreduce;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyInt;
+import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
@@ -34,6 +36,7 @@ import java.io.StringReader;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.TaskReport;
import org.apache.hadoop.mapreduce.JobStatus.State;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.log4j.Layout;
@@ -88,6 +91,7 @@ public class TestJobMonitorAndPrint extends TestCase {
}
).when(job).getTaskCompletionEvents(anyInt(), anyInt());
+ doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class));
when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2);
// setup the logger to capture all logs
Layout layout =
@@ -106,21 +110,25 @@ public class TestJobMonitorAndPrint extends TestCase {
boolean foundHundred = false;
boolean foundComplete = false;
boolean foundUber = false;
- String match_1 = "uber mode : true";
- String match_2 = "map 100% reduce 100%";
- String match_3 = "completed successfully";
+ String uberModeMatch = "uber mode : true";
+ String progressMatch = "map 100% reduce 100%";
+ String completionMatch = "completed successfully";
while ((line = r.readLine()) != null) {
- if (line.contains(match_1)) {
+ if (line.contains(uberModeMatch)) {
foundUber = true;
}
- foundHundred = line.contains(match_2);
+ foundHundred = line.contains(progressMatch);
if (foundHundred)
break;
}
line = r.readLine();
- foundComplete = line.contains(match_3);
+ foundComplete = line.contains(completionMatch);
assertTrue(foundUber);
assertTrue(foundHundred);
assertTrue(foundComplete);
+
+ System.out.println("The output of job.toString() is : \n" + job.toString());
+ assertTrue(job.toString().contains("Number of maps: 5\n"));
+ assertTrue(job.toString().contains("Number of reduces: 5\n"));
}
}
From 9bcf9304335eb0459f0ee1b234d3ea8a3d74e1a6 Mon Sep 17 00:00:00 2001
From: Mahadev Konar
Date: Sat, 4 Feb 2012 20:04:44 +0000
Subject: [PATCH 05/27] MAPREDUCE-3759. ClassCastException thrown in
-list-active-trackers when there are a few unhealthy nodes (vinodkv via
mahadev) - Merging r1240598 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1240599 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../resourcemanager/ClientRMService.java | 9 +-
.../resourcemanager/ResourceManager.java | 2 +-
.../resourcemanager/TestClientRMService.java | 82 +++++++++++++++++++
4 files changed, 93 insertions(+), 3 deletions(-)
create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index eae3f68d36d..628b2574e41 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -651,6 +651,9 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3795. "job -status" command line output is malformed.
(vinodkv via mahadev)
+ MAPREDUCE-3759. ClassCastException thrown in -list-active-trackers when
+ there are a few unhealthy nodes (vinodkv via mahadev)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index f02ae5148bf..40fdfc02fe0 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -28,6 +28,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.Text;
@@ -75,7 +76,6 @@ import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
import org.apache.hadoop.yarn.server.RMDelegationTokenSecretManager;
import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
@@ -157,6 +157,11 @@ public class ClientRMService extends AbstractService implements
super.start();
}
+ @Private
+ public InetSocketAddress getBindAddress() {
+ return clientBindAddress;
+ }
+
/**
* check if the calling user has the access to application information.
* @param callerUGI
@@ -412,7 +417,7 @@ public class ClientRMService extends AbstractService implements
SchedulerNodeReport schedulerNodeReport =
scheduler.getNodeReport(rmNode.getNodeID());
- Resource used = Resources.none();
+ Resource used = BuilderUtils.newResource(0);
int numContainers = 0;
if (schedulerNodeReport != null) {
used = schedulerNodeReport.getUsedResource();
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index cda23f7a5bb..dc7d29cdcbc 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -110,7 +110,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
protected ApplicationACLsManager applicationACLsManager;
protected RMDelegationTokenSecretManager rmDTSecretManager;
private WebApp webApp;
- private RMContext rmContext;
+ protected RMContext rmContext;
private final Store store;
protected ResourceTrackerService resourceTracker;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
new file mode 100644
index 00000000000..5ea88814c17
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import java.net.InetSocketAddress;
+import java.util.List;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.ClientRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.util.Records;
+import org.junit.Test;
+
+public class TestClientRMService {
+
+ private static final Log LOG = LogFactory.getLog(TestClientRMService.class);
+
+ @Test
+ public void testGetClusterNodes() throws Exception {
+ MockRM rm = new MockRM() {
+ protected ClientRMService createClientRMService() {
+ return new ClientRMService(this.rmContext, scheduler,
+ this.rmAppManager, this.applicationACLsManager,
+ this.rmDTSecretManager);
+ };
+ };
+ rm.start();
+
+ // Add a healthy node
+ MockNM node = rm.registerNode("host:1234", 1024);
+ node.nodeHeartbeat(true);
+
+ // Create a client.
+ Configuration conf = new Configuration();
+ YarnRPC rpc = YarnRPC.create(conf);
+ InetSocketAddress rmAddress = rm.getClientRMService().getBindAddress();
+ LOG.info("Connecting to ResourceManager at " + rmAddress);
+ ClientRMProtocol client =
+ (ClientRMProtocol) rpc
+ .getProxy(ClientRMProtocol.class, rmAddress, conf);
+
+ // Make call
+ GetClusterNodesRequest request =
+ Records.newRecord(GetClusterNodesRequest.class);
+ List nodeReports =
+ client.getClusterNodes(request).getNodeReports();
+ Assert.assertEquals(1, nodeReports.size());
+ Assert.assertTrue("Node is expected to be healthy!", nodeReports.get(0)
+ .getNodeHealthStatus().getIsNodeHealthy());
+
+ // Now make the node unhealthy.
+ node.nodeHeartbeat(false);
+
+ // Call again
+ nodeReports = client.getClusterNodes(request).getNodeReports();
+ Assert.assertEquals(1, nodeReports.size());
+ Assert.assertFalse("Node is expected to be unhealthy!", nodeReports.get(0)
+ .getNodeHealthStatus().getIsNodeHealthy());
+ }
+}
From aaa17aa6abcf630602cb781f6750c2e8b6b27291 Mon Sep 17 00:00:00 2001
From: Mahadev Konar
Date: Sat, 4 Feb 2012 20:21:42 +0000
Subject: [PATCH 06/27] MAPREDUCE-3775. Change MiniYarnCluster to escape
special chars in testname. (Hitesh Shah via mahadev) - Merging r1240603 from
trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1240604 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../org/apache/hadoop/mapred/TestMiniMRClientCluster.java | 5 ++++-
.../java/org/apache/hadoop/yarn/server/MiniYARNCluster.java | 6 +++---
3 files changed, 10 insertions(+), 4 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 628b2574e41..81100b0c934 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -654,6 +654,9 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3759. ClassCastException thrown in -list-active-trackers when
there are a few unhealthy nodes (vinodkv via mahadev)
+ MAPREDUCE-3775. Change MiniYarnCluster to escape special chars in testname.
+ (Hitesh Shah via mahadev)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRClientCluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRClientCluster.java
index ddadac99001..27e6666cc5b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRClientCluster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRClientCluster.java
@@ -49,6 +49,9 @@ public class TestMiniMRClientCluster {
private static Path[] inFiles = new Path[5];
private static MiniMRClientCluster mrCluster;
+ private class InternalClass {
+ }
+
@BeforeClass
public static void setup() throws IOException {
final Configuration conf = new Configuration();
@@ -73,7 +76,7 @@ public class TestMiniMRClientCluster {
// create the mini cluster to be used for the tests
mrCluster = MiniMRClientClusterFactory.create(
- TestMiniMRClientCluster.class, 1, new Configuration());
+ InternalClass.class, 1, new Configuration());
}
@AfterClass
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
index ae35de0ac13..37cbcd6369f 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
@@ -81,11 +81,11 @@ public class MiniYARNCluster extends CompositeService {
*/
public MiniYARNCluster(String testName, int noOfNodeManagers,
int numLocalDirs, int numLogDirs) {
-
- super(testName);
+ super(testName.replace("$", ""));
this.numLocalDirs = numLocalDirs;
this.numLogDirs = numLogDirs;
- this.testWorkDir = new File("target", testName);
+ this.testWorkDir = new File("target",
+ testName.replace("$", ""));
try {
FileContext.getLocalFSFileContext().delete(
new Path(testWorkDir.getAbsolutePath()), true);
From 4eb850ceb759c92be79a1fd34f3c6fba43dd05f0 Mon Sep 17 00:00:00 2001
From: Mahadev Konar
Date: Sat, 4 Feb 2012 22:51:10 +0000
Subject: [PATCH 07/27] MAPREDUCE-3765. FifoScheduler does not respect
yarn.scheduler.fifo.minimum-allocation-mb setting (Hitesh Shah via mahadev) -
Merging r1240634 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1240635 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../dev-support/findbugs-exclude.xml | 6 ++
.../scheduler/fifo/FifoScheduler.java | 4 +-
.../resourcemanager/TestFifoScheduler.java | 61 +++++++++++++------
4 files changed, 53 insertions(+), 21 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 81100b0c934..298d1571211 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -657,6 +657,9 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3775. Change MiniYarnCluster to escape special chars in testname.
(Hitesh Shah via mahadev)
+ MAPREDUCE-3765. FifoScheduler does not respect yarn.scheduler.fifo.minimum-
+ allocation-mb setting (Hitesh Shah via mahadev)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-mapreduce-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 4b7d45f66a4..4b17c1e943e 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -161,6 +161,12 @@
+
+
+
+
+
+
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
index ecdb7589234..3d904f3a8c2 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
@@ -230,7 +230,7 @@ public class FifoScheduler implements ResourceScheduler {
}
// Sanity check
- SchedulerUtils.normalizeRequests(ask, MINIMUM_MEMORY);
+ SchedulerUtils.normalizeRequests(ask, minimumAllocation.getMemory());
// Release containers
for (ContainerId releasedContainer : release) {
@@ -592,7 +592,7 @@ public class FifoScheduler implements ResourceScheduler {
minimumAllocation)) {
LOG.debug("Node heartbeat " + rmNode.getNodeID() +
" available resource = " + node.getAvailableResource());
-
+
assignContainers(node);
LOG.debug("Node after allocation " + rmNode.getNodeID() + " resource = "
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java
index 349a2cacbef..2cca6f09ad8 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestFifoScheduler.java
@@ -24,38 +24,22 @@ import junit.framework.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.AMResponse;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerState;
-import org.apache.hadoop.yarn.api.records.NodeReport;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
-import org.junit.After;
-import org.junit.Before;
import org.junit.Test;
public class TestFifoScheduler {
private static final Log LOG = LogFactory.getLog(TestFifoScheduler.class);
- private ResourceManager resourceManager = null;
-
- @Before
- public void setUp() throws Exception {
- Store store = StoreFactory.getStore(new Configuration());
- resourceManager = new ResourceManager(store);
- resourceManager.init(new Configuration());
- }
-
- @After
- public void tearDown() throws Exception {
- }
+ private final int GB = 1024;
@Test
public void test() throws Exception {
@@ -63,7 +47,6 @@ public class TestFifoScheduler {
rootLogger.setLevel(Level.DEBUG);
MockRM rm = new MockRM();
rm.start();
- int GB = 1024;
MockNM nm1 = rm.registerNode("h1:1234", 6 * GB);
MockNM nm2 = rm.registerNode("h2:5678", 4 * GB);
@@ -146,8 +129,48 @@ public class TestFifoScheduler {
rm.stop();
}
+ private void testMinimumAllocation(YarnConfiguration conf)
+ throws Exception {
+ MockRM rm = new MockRM(conf);
+ rm.start();
+
+ // Register node1
+ MockNM nm1 = rm.registerNode("h1:1234", 6 * GB);
+
+ // Submit an application
+ RMApp app1 = rm.submitApp(256);
+
+ // kick the scheduling
+ nm1.nodeHeartbeat(true);
+ RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
+ MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
+ am1.registerAppAttempt();
+ SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
+ nm1.getNodeId());
+
+ int checkAlloc =
+ conf.getInt("yarn.scheduler.fifo.minimum-allocation-mb", GB);
+ Assert.assertEquals(checkAlloc, report_nm1.getUsedResource().getMemory());
+
+ rm.stop();
+ }
+
+ @Test
+ public void testDefaultMinimumAllocation() throws Exception {
+ testMinimumAllocation(new YarnConfiguration());
+ }
+
+ @Test
+ public void testNonDefaultMinimumAllocation() throws Exception {
+ YarnConfiguration conf = new YarnConfiguration();
+ conf.setInt("yarn.scheduler.fifo.minimum-allocation-mb", 512);
+ testMinimumAllocation(conf);
+ }
+
public static void main(String[] args) throws Exception {
TestFifoScheduler t = new TestFifoScheduler();
t.test();
+ t.testDefaultMinimumAllocation();
+ t.testNonDefaultMinimumAllocation();
}
}
From 3f78abc5468c649cda7bcff969fbf480b8eb1dd5 Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Mon, 6 Feb 2012 03:39:50 +0000
Subject: [PATCH 08/27] Merge -c 1240886 from trunk to branch-0.23 to fix
MAPREDUCE-3746. Initialize queue metrics upfront and added start/finish time
to RM Web-UI.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1240887 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../scheduler/capacity/CSQueue.java | 12 ++++
.../scheduler/capacity/CSQueueUtils.java | 38 +++++++++++
.../scheduler/capacity/LeafQueue.java | 66 +++++++++----------
.../scheduler/capacity/ParentQueue.java | 51 +++++++-------
.../resourcemanager/webapp/AppsBlock.java | 16 +++--
.../webapp/MetricsOverviewTable.java | 49 ++++++++++----
.../server/resourcemanager/webapp/RmView.java | 7 +-
.../webapp/dao/ClusterMetricsInfo.java | 48 ++++++++++++++
.../webapp/dao/UserMetricsInfo.java | 33 ++++++++++
.../capacity/TestApplicationLimits.java | 10 ++-
.../resourcemanager/webapp/TestNodesPage.java | 2 +-
.../webapp/TestRMWebServices.java | 13 ++--
13 files changed, 260 insertions(+), 88 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 298d1571211..b703d60fe4b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -660,6 +660,9 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3765. FifoScheduler does not respect yarn.scheduler.fifo.minimum-
allocation-mb setting (Hitesh Shah via mahadev)
+ MAPREDUCE-3747. Initialize queue metrics upfront and added start/finish
+ time to RM Web-UI. (acmurthy)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
index b646e14fb8d..0730cfc25d6 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
@@ -98,6 +98,12 @@ extends org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue {
*/
public float getUsedCapacity();
+ /**
+ * Set used capacity of the queue.
+ * @param usedCapacity used capacity of the queue
+ */
+ public void setUsedCapacity(float usedCapacity);
+
/**
* Get the currently utilized resources in the cluster
* by the queue and children (if any).
@@ -114,6 +120,12 @@ extends org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue {
*/
public float getUtilization();
+ /**
+ * Get the current utilization of the queue.
+ * @param utilization queue utilization
+ */
+ public void setUtilization(float utilization);
+
/**
* Get the current run-state of the queue
* @return current run-state
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
index 89c36ab87aa..01f14ebc53a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
@@ -17,7 +17,9 @@
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+import org.apache.hadoop.yarn.Lock;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
class CSQueueUtils {
@@ -65,4 +67,40 @@ class CSQueueUtils {
1);
}
+ @Lock(CSQueue.class)
+ public static void updateQueueStatistics(
+ final CSQueue childQueue, final CSQueue parentQueue,
+ final Resource clusterResource, final Resource minimumAllocation) {
+ final int clusterMemory = clusterResource.getMemory();
+ final int usedMemory = childQueue.getUsedResources().getMemory();
+
+ float queueLimit = 0.0f;
+ float utilization = 0.0f;
+ float usedCapacity = 0.0f;
+ if (clusterMemory > 0) {
+ queueLimit = clusterMemory * childQueue.getAbsoluteCapacity();
+ final float parentAbsoluteCapacity =
+ (parentQueue == null) ? 1.0f : parentQueue.getAbsoluteCapacity();
+ utilization = (usedMemory / queueLimit);
+ usedCapacity = (usedMemory / (clusterMemory * parentAbsoluteCapacity));
+ }
+
+ childQueue.setUtilization(utilization);
+ childQueue.setUsedCapacity(usedCapacity);
+
+ int available =
+ Math.max((roundUp(minimumAllocation, (int)queueLimit) - usedMemory), 0);
+ childQueue.getMetrics().setAvailableResourcesToQueue(
+ Resources.createResource(available));
+ }
+
+ public static int roundUp(Resource minimumAllocation, int memory) {
+ int minMemory = minimumAllocation.getMemory();
+ return LeafQueue.divideAndCeil(memory, minMemory) * minMemory;
+ }
+
+ public static int roundDown(Resource minimumAllocation, int memory) {
+ int minMemory = minimumAllocation.getMemory();
+ return (memory / minMemory) * minMemory;
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index ecc672c1cb8..f21cfc21fcb 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -180,7 +180,9 @@ public class LeafQueue implements CSQueue {
Map acls =
cs.getConfiguration().getAcls(getQueuePath());
- setupQueueConfigs(capacity, absoluteCapacity,
+ setupQueueConfigs(
+ cs.getClusterResources(),
+ capacity, absoluteCapacity,
maximumCapacity, absoluteMaxCapacity,
userLimit, userLimitFactor,
maxApplications, maxApplicationsPerUser,
@@ -198,6 +200,7 @@ public class LeafQueue implements CSQueue {
}
private synchronized void setupQueueConfigs(
+ Resource clusterResource,
float capacity, float absoluteCapacity,
float maximumCapacity, float absoluteMaxCapacity,
int userLimit, float userLimitFactor,
@@ -235,6 +238,10 @@ public class LeafQueue implements CSQueue {
for (Map.Entry e : acls.entrySet()) {
aclsString.append(e.getKey() + ":" + e.getValue().getAclString());
}
+
+ // Update metrics
+ CSQueueUtils.updateQueueStatistics(
+ this, parent, clusterResource, minimumAllocation);
LOG.info("Initializing " + queueName + "\n" +
"capacity = " + capacity +
@@ -386,11 +393,11 @@ public class LeafQueue implements CSQueue {
return null;
}
- synchronized void setUtilization(float utilization) {
+ public synchronized void setUtilization(float utilization) {
this.utilization = utilization;
}
- synchronized void setUsedCapacity(float usedCapacity) {
+ public synchronized void setUsedCapacity(float usedCapacity) {
this.usedCapacity = usedCapacity;
}
@@ -534,7 +541,9 @@ public class LeafQueue implements CSQueue {
}
LeafQueue leafQueue = (LeafQueue)queue;
- setupQueueConfigs(leafQueue.capacity, leafQueue.absoluteCapacity,
+ setupQueueConfigs(
+ clusterResource,
+ leafQueue.capacity, leafQueue.absoluteCapacity,
leafQueue.maximumCapacity, leafQueue.absoluteMaxCapacity,
leafQueue.userLimit, leafQueue.userLimitFactor,
leafQueue.maxApplications,
@@ -542,8 +551,6 @@ public class LeafQueue implements CSQueue {
leafQueue.getMaximumActiveApplications(),
leafQueue.getMaximumActiveApplicationsPerUser(),
leafQueue.state, leafQueue.acls);
-
- updateResource(clusterResource);
}
@Override
@@ -883,7 +890,8 @@ public class LeafQueue implements CSQueue {
Resource queueMaxCap = // Queue Max-Capacity
Resources.createResource(
- roundDown((int)(absoluteMaxCapacity * clusterResource.getMemory()))
+ CSQueueUtils.roundDown(minimumAllocation,
+ (int)(absoluteMaxCapacity * clusterResource.getMemory()))
);
Resource userConsumed = getUser(user).getConsumedResources();
@@ -904,16 +912,6 @@ public class LeafQueue implements CSQueue {
return userLimit;
}
- private int roundUp(int memory) {
- int minMemory = minimumAllocation.getMemory();
- return divideAndCeil(memory, minMemory) * minMemory;
- }
-
- private int roundDown(int memory) {
- int minMemory = minimumAllocation.getMemory();
- return (memory / minMemory) * minMemory;
- }
-
@Lock(NoLock.class)
private Resource computeUserLimit(SchedulerApp application,
Resource clusterResource, Resource required) {
@@ -927,8 +925,11 @@ public class LeafQueue implements CSQueue {
// Allow progress for queues with miniscule capacity
final int queueCapacity =
Math.max(
- roundUp((int)(absoluteCapacity * clusterResource.getMemory())),
- required.getMemory());
+ CSQueueUtils.roundUp(
+ minimumAllocation,
+ (int)(absoluteCapacity * clusterResource.getMemory())),
+ required.getMemory()
+ );
final int consumed = usedResources.getMemory();
final int currentCapacity =
@@ -943,7 +944,8 @@ public class LeafQueue implements CSQueue {
final int activeUsers = activeUsersManager.getNumActiveUsers();
int limit =
- roundUp(
+ CSQueueUtils.roundUp(
+ minimumAllocation,
Math.min(
Math.max(divideAndCeil(currentCapacity, activeUsers),
divideAndCeil((int)userLimit*currentCapacity, 100)),
@@ -991,7 +993,7 @@ public class LeafQueue implements CSQueue {
return true;
}
- private static int divideAndCeil(int a, int b) {
+ static int divideAndCeil(int a, int b) {
if (b == 0) {
LOG.info("divideAndCeil called with a=" + a + " b=" + b);
return 0;
@@ -1325,7 +1327,8 @@ public class LeafQueue implements CSQueue {
SchedulerApp application, Resource resource) {
// Update queue metrics
Resources.addTo(usedResources, resource);
- updateResource(clusterResource);
+ CSQueueUtils.updateQueueStatistics(
+ this, parent, clusterResource, minimumAllocation);
++numContainers;
// Update user metrics
@@ -1349,7 +1352,8 @@ public class LeafQueue implements CSQueue {
SchedulerApp application, Resource resource) {
// Update queue metrics
Resources.subtractFrom(usedResources, resource);
- updateResource(clusterResource);
+ CSQueueUtils.updateQueueStatistics(
+ this, parent, clusterResource, minimumAllocation);
--numContainers;
// Update user metrics
@@ -1374,6 +1378,10 @@ public class LeafQueue implements CSQueue {
CSQueueUtils.computeMaxActiveApplicationsPerUser(
maxActiveApplications, userLimit, userLimitFactor);
+ // Update metrics
+ CSQueueUtils.updateQueueStatistics(
+ this, parent, clusterResource, minimumAllocation);
+
// Update application properties
for (SchedulerApp application : activeApplications) {
synchronized (application) {
@@ -1383,18 +1391,6 @@ public class LeafQueue implements CSQueue {
}
}
- private synchronized void updateResource(Resource clusterResource) {
- float queueLimit = clusterResource.getMemory() * absoluteCapacity;
- setUtilization(usedResources.getMemory() / queueLimit);
- setUsedCapacity(usedResources.getMemory()
- / (clusterResource.getMemory() * parent.getAbsoluteCapacity()));
-
- Resource resourceLimit =
- Resources.createResource(roundUp((int)queueLimit));
- metrics.setAvailableResourcesToQueue(
- Resources.subtractFrom(resourceLimit, usedResources));
- }
-
@Override
public QueueMetrics getMetrics() {
return metrics;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index 39aa197f2b0..4010aa0ce0a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -97,7 +97,8 @@ public class ParentQueue implements CSQueue {
RecordFactoryProvider.getRecordFactory(null);
public ParentQueue(CapacitySchedulerContext cs,
- String queueName, Comparator comparator, CSQueue parent, CSQueue old) {
+ String queueName, Comparator comparator,
+ CSQueue parent, CSQueue old) {
minimumAllocation = cs.getMinimumResourceCapability();
this.parent = parent;
@@ -137,7 +138,8 @@ public class ParentQueue implements CSQueue {
this.queueInfo.setQueueName(queueName);
this.queueInfo.setChildQueues(new ArrayList());
- setupQueueConfigs(capacity, absoluteCapacity,
+ setupQueueConfigs(cs.getClusterResources(),
+ capacity, absoluteCapacity,
maximumCapacity, absoluteMaxCapacity, state, acls);
this.queueComparator = comparator;
@@ -149,9 +151,10 @@ public class ParentQueue implements CSQueue {
}
private synchronized void setupQueueConfigs(
- float capacity, float absoluteCapacity,
- float maximumCapacity, float absoluteMaxCapacity,
- QueueState state, Map acls
+ Resource clusterResource,
+ float capacity, float absoluteCapacity,
+ float maximumCapacity, float absoluteMaxCapacity,
+ QueueState state, Map acls
) {
// Sanity check
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
@@ -174,6 +177,10 @@ public class ParentQueue implements CSQueue {
aclsString.append(e.getKey() + ":" + e.getValue().getAclString());
}
+ // Update metrics
+ CSQueueUtils.updateQueueStatistics(
+ this, parent, clusterResource, minimumAllocation);
+
LOG.info(queueName +
", capacity=" + capacity +
", asboluteCapacity=" + absoluteCapacity +
@@ -384,12 +391,10 @@ public class ParentQueue implements CSQueue {
childQueues.addAll(currentChildQueues.values());
// Set new configs
- setupQueueConfigs(parentQueue.capacity, parentQueue.absoluteCapacity,
+ setupQueueConfigs(clusterResource,
+ parentQueue.capacity, parentQueue.absoluteCapacity,
parentQueue.maximumCapacity, parentQueue.absoluteMaxCapacity,
parentQueue.state, parentQueue.acls);
-
- // Update
- updateResource(clusterResource);
}
Map getQueues(Set queues) {
@@ -485,11 +490,11 @@ public class ParentQueue implements CSQueue {
" #applications: " + getNumApplications());
}
- synchronized void setUsedCapacity(float usedCapacity) {
+ public synchronized void setUsedCapacity(float usedCapacity) {
this.usedCapacity = usedCapacity;
}
- synchronized void setUtilization(float utilization) {
+ public synchronized void setUtilization(float utilization) {
this.utilization = utilization;
}
@@ -674,14 +679,16 @@ public class ParentQueue implements CSQueue {
synchronized void allocateResource(Resource clusterResource,
Resource resource) {
Resources.addTo(usedResources, resource);
- updateResource(clusterResource);
+ CSQueueUtils.updateQueueStatistics(
+ this, parent, clusterResource, minimumAllocation);
++numContainers;
}
synchronized void releaseResource(Resource clusterResource,
Resource resource) {
Resources.subtractFrom(usedResources, resource);
- updateResource(clusterResource);
+ CSQueueUtils.updateQueueStatistics(
+ this, parent, clusterResource, minimumAllocation);
--numContainers;
}
@@ -691,22 +698,12 @@ public class ParentQueue implements CSQueue {
for (CSQueue childQueue : childQueues) {
childQueue.updateClusterResource(clusterResource);
}
+
+ // Update metrics
+ CSQueueUtils.updateQueueStatistics(
+ this, parent, clusterResource, minimumAllocation);
}
- private synchronized void updateResource(Resource clusterResource) {
- float queueLimit = clusterResource.getMemory() * absoluteCapacity;
- float parentAbsoluteCapacity =
- (rootQueue) ? 1.0f : parent.getAbsoluteCapacity();
- setUtilization(usedResources.getMemory() / queueLimit);
- setUsedCapacity(usedResources.getMemory()
- / (clusterResource.getMemory() * parentAbsoluteCapacity));
-
- Resource resourceLimit =
- Resources.createResource((int)queueLimit);
- metrics.setAvailableResourcesToQueue(
- Resources.subtractFrom(resourceLimit, usedResources));
- }
-
@Override
public QueueMetrics getMetrics() {
return metrics;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
index 9f4d0f94679..27deee34e84 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.join;
-import static org.apache.hadoop.yarn.util.StringHelper.sjoin;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.APP_STATE;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE;
@@ -27,6 +26,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.util.Times;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
@@ -52,11 +52,12 @@ class AppsBlock extends HtmlBlock {
th(".user", "User").
th(".name", "Name").
th(".queue", "Queue").
+ th(".starttime", "StartTime").
+ th(".finishtime", "FinishTime").
th(".state", "State").
th(".finalstatus", "FinalStatus").
th(".progress", "Progress").
- th(".ui", "Tracking UI").
- th(".note", "Note")._()._().
+ th(".ui", "Tracking UI")._()._().
tbody();
int i = 0;
String reqState = $(APP_STATE);
@@ -67,6 +68,8 @@ class AppsBlock extends HtmlBlock {
}
AppInfo appInfo = new AppInfo(app, true);
String percent = String.format("%.1f", appInfo.getProgress());
+ String startTime = Times.format(appInfo.getStartTime());
+ String finishTime = Times.format(appInfo.getFinishTime());
tbody.
tr().
td().
@@ -75,6 +78,10 @@ class AppsBlock extends HtmlBlock {
td(appInfo.getUser()).
td(appInfo.getName()).
td(appInfo.getQueue()).
+ td().
+ br().$title(startTime)._()._(startTime)._().
+ td().
+ br().$title(startTime)._()._(finishTime)._().
td(appInfo.getState()).
td(appInfo.getFinalStatus()).
td().
@@ -85,8 +92,7 @@ class AppsBlock extends HtmlBlock {
$style(join("width:", percent, '%'))._()._()._().
td().
a(!appInfo.isTrackingUrlReady()?
- "#" : appInfo.getTrackingUrlPretty(), appInfo.getTrackingUI())._().
- td(appInfo.getNote())._();
+ "#" : appInfo.getTrackingUrlPretty(), appInfo.getTrackingUI())._()._();
if (list.rendering != Render.HTML && ++i >= 20) break;
}
tbody._()._();
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java
index 92a84a244c7..7a8b681200a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java
@@ -55,15 +55,19 @@ public class MetricsOverviewTable extends HtmlBlock {
//CSS in the correct spot
html.style(".metrics {margin-bottom:5px}");
- ClusterMetricsInfo clusterMetrics = new ClusterMetricsInfo(this.rm, this.rmContext);
-
+ ClusterMetricsInfo clusterMetrics =
+ new ClusterMetricsInfo(this.rm, this.rmContext);
DIV div = html.div().$class("metrics");
- div.table("#metricsoverview").
+ div.h3("Cluster Metrics").
+ table("#metricsoverview").
thead().$class("ui-widget-header").
tr().
th().$class("ui-state-default")._("Apps Submitted")._().
+ th().$class("ui-state-default")._("Apps Pending")._().
+ th().$class("ui-state-default")._("Apps Running")._().
+ th().$class("ui-state-default")._("Apps Completed")._().
th().$class("ui-state-default")._("Containers Running")._().
th().$class("ui-state-default")._("Memory Used")._().
th().$class("ui-state-default")._("Memory Total")._().
@@ -78,6 +82,14 @@ public class MetricsOverviewTable extends HtmlBlock {
tbody().$class("ui-widget-content").
tr().
td(String.valueOf(clusterMetrics.getAppsSubmitted())).
+ td(String.valueOf(clusterMetrics.getAppsPending())).
+ td(String.valueOf(clusterMetrics.getAppsRunning())).
+ td(
+ String.valueOf(
+ clusterMetrics.getAppsCompleted() +
+ clusterMetrics.getAppsFailed() + clusterMetrics.getAppsKilled()
+ )
+ ).
td(String.valueOf(clusterMetrics.getContainersAllocated())).
td(StringUtils.byteDesc(clusterMetrics.getAllocatedMB() * BYTES_IN_MB)).
td(StringUtils.byteDesc(clusterMetrics.getTotalMB() * BYTES_IN_MB)).
@@ -89,26 +101,38 @@ public class MetricsOverviewTable extends HtmlBlock {
td().a(url("nodes/rebooted"),String.valueOf(clusterMetrics.getRebootedNodes()))._().
_().
_()._();
-
+
String user = request().getRemoteUser();
if (user != null) {
UserMetricsInfo userMetrics = new UserMetricsInfo(this.rm, this.rmContext, user);
if (userMetrics.metricsAvailable()) {
- div.table("#usermetricsoverview").
+ div.h3("User Metrics for " + user).
+ table("#usermetricsoverview").
thead().$class("ui-widget-header").
tr().
- th().$class("ui-state-default")._("Apps Submitted ("+user+")")._().
- th().$class("ui-state-default")._("Containers Running ("+user+")")._().
- th().$class("ui-state-default")._("Containers Pending ("+user+")")._().
- th().$class("ui-state-default")._("Containers Reserved ("+user+")")._().
- th().$class("ui-state-default")._("Memory Used ("+user+")")._().
- th().$class("ui-state-default")._("Memory Pending ("+user+")")._().
- th().$class("ui-state-default")._("Memory Reserved ("+user+")")._().
+ th().$class("ui-state-default")._("Apps Submitted")._().
+ th().$class("ui-state-default")._("Apps Pending")._().
+ th().$class("ui-state-default")._("Apps Running")._().
+ th().$class("ui-state-default")._("Apps Completed")._().
+ th().$class("ui-state-default")._("Containers Running")._().
+ th().$class("ui-state-default")._("Containers Pending")._().
+ th().$class("ui-state-default")._("Containers Reserved")._().
+ th().$class("ui-state-default")._("Memory Used")._().
+ th().$class("ui-state-default")._("Memory Pending")._().
+ th().$class("ui-state-default")._("Memory Reserved")._().
_().
_().
tbody().$class("ui-widget-content").
tr().
td(String.valueOf(userMetrics.getAppsSubmitted())).
+ td(String.valueOf(userMetrics.getAppsPending())).
+ td(String.valueOf(userMetrics.getAppsRunning())).
+ td(
+ String.valueOf(
+ (userMetrics.getAppsCompleted() +
+ userMetrics.getAppsFailed() + userMetrics.getAppsKilled())
+ )
+ ).
td(String.valueOf(userMetrics.getRunningContainers())).
td(String.valueOf(userMetrics.getPendingContainers())).
td(String.valueOf(userMetrics.getReservedContainers())).
@@ -117,6 +141,7 @@ public class MetricsOverviewTable extends HtmlBlock {
td(StringUtils.byteDesc(userMetrics.getReservedMB() * BYTES_IN_MB)).
_().
_()._();
+
}
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
index 47532c1562d..bc2bfca328a 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
@@ -63,10 +63,11 @@ public class RmView extends TwoColumnLayout {
private String appsTableInit() {
AppsList list = getInstance(AppsList.class);
- // id, user, name, queue, state, progress, ui, note
+ // id, user, name, queue, starttime, finishtime, state, progress, ui
StringBuilder init = tableInit().
- append(", aoColumns:[{sType:'title-numeric'}, null, null, null, null,").
- append("null,{sType:'title-numeric', bSearchable:false}, null, null]");
+ append(", aoColumns:[{sType:'title-numeric'}, null, null, null, ").
+ append("null, null , null, ").
+ append("null,{sType:'title-numeric', bSearchable:false}, null]");
// Sort by id upon page load
init.append(", aaSorting: [[0, 'asc']]");
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
index d8db4bb99bc..0db26c2e08b 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
@@ -32,10 +32,20 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler
public class ClusterMetricsInfo {
protected int appsSubmitted;
+ protected int appsCompleted;
+ protected int appsPending;
+ protected int appsRunning;
+ protected int appsFailed;
+ protected int appsKilled;
+
protected long reservedMB;
protected long availableMB;
protected long allocatedMB;
+
protected int containersAllocated;
+ protected int containersReserved;
+ protected int containersPending;
+
protected long totalMB;
protected int totalNodes;
protected int lostNodes;
@@ -53,10 +63,20 @@ public class ClusterMetricsInfo {
ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
this.appsSubmitted = metrics.getAppsSubmitted();
+ this.appsCompleted = metrics.getAppsCompleted();
+ this.appsPending = metrics.getAppsPending();
+ this.appsRunning = metrics.getAppsRunning();
+ this.appsFailed = metrics.getAppsFailed();
+ this.appsKilled = metrics.getAppsKilled();
+
this.reservedMB = metrics.getReservedMB();
this.availableMB = metrics.getAvailableMB();
this.allocatedMB = metrics.getAllocatedMB();
+
this.containersAllocated = metrics.getAllocatedContainers();
+ this.containersPending = metrics.getPendingContainers();
+ this.containersReserved = metrics.getReservedContainers();
+
this.totalMB = availableMB + reservedMB + allocatedMB;
this.activeNodes = clusterMetrics.getNumActiveNMs();
this.lostNodes = clusterMetrics.getNumLostNMs();
@@ -71,6 +91,26 @@ public class ClusterMetricsInfo {
return this.appsSubmitted;
}
+ public int getAppsCompleted() {
+ return appsCompleted;
+ }
+
+ public int getAppsPending() {
+ return appsPending;
+ }
+
+ public int getAppsRunning() {
+ return appsRunning;
+ }
+
+ public int getAppsFailed() {
+ return appsFailed;
+ }
+
+ public int getAppsKilled() {
+ return appsKilled;
+ }
+
public long getReservedMB() {
return this.reservedMB;
}
@@ -87,6 +127,14 @@ public class ClusterMetricsInfo {
return this.containersAllocated;
}
+ public int getReservedContainers() {
+ return this.containersReserved;
+ }
+
+ public int getPendingContainers() {
+ return this.containersPending;
+ }
+
public long getTotalMB() {
return this.totalMB;
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java
index a6c1fcaac9f..9d4d77ce08b 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java
@@ -32,6 +32,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler
public class UserMetricsInfo {
protected int appsSubmitted;
+ protected int appsCompleted;
+ protected int appsPending;
+ protected int appsRunning;
+ protected int appsFailed;
+ protected int appsKilled;
protected int runningContainers;
protected int pendingContainers;
protected int reservedContainers;
@@ -54,10 +59,18 @@ public class UserMetricsInfo {
if (userMetrics != null) {
this.userMetricsAvailable = true;
+
this.appsSubmitted = userMetrics.getAppsSubmitted();
+ this.appsCompleted = metrics.getAppsCompleted();
+ this.appsPending = metrics.getAppsPending();
+ this.appsRunning = metrics.getAppsRunning();
+ this.appsFailed = metrics.getAppsFailed();
+ this.appsKilled = metrics.getAppsKilled();
+
this.runningContainers = userMetrics.getAllocatedContainers();
this.pendingContainers = userMetrics.getPendingContainers();
this.reservedContainers = userMetrics.getReservedContainers();
+
this.reservedMB = userMetrics.getReservedMB();
this.pendingMB = userMetrics.getPendingMB();
this.allocatedMB = userMetrics.getAllocatedMB();
@@ -72,6 +85,26 @@ public class UserMetricsInfo {
return this.appsSubmitted;
}
+ public int getAppsCompleted() {
+ return appsCompleted;
+ }
+
+ public int getAppsPending() {
+ return appsPending;
+ }
+
+ public int getAppsRunning() {
+ return appsRunning;
+ }
+
+ public int getAppsFailed() {
+ return appsFailed;
+ }
+
+ public int getAppsKilled() {
+ return appsKilled;
+ }
+
public long getReservedMB() {
return this.reservedMB;
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
index 7426d2b273d..57e0c69385e 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
@@ -142,7 +142,7 @@ public class TestApplicationLimits {
CapacityScheduler.parseQueue(csContext, csConf, null, "root",
queues, queues,
CapacityScheduler.queueComparator,
- CapacityScheduler.applicationComparator,
+ CapacityScheduler.applicationComparator,
TestUtils.spyHook);
LeafQueue queue = (LeafQueue)queues.get(A);
@@ -163,6 +163,10 @@ public class TestApplicationLimits {
expectedMaxActiveApps * (queue.getUserLimit() / 100.0f) *
queue.getUserLimitFactor()),
queue.getMaximumActiveApplicationsPerUser());
+ assertEquals(
+ (int)(clusterResource.getMemory() * queue.getAbsoluteCapacity()),
+ queue.getMetrics().getAvailableMB()
+ );
// Add some nodes to the cluster & test new limits
clusterResource = Resources.createResource(120 * 16 * GB);
@@ -178,6 +182,10 @@ public class TestApplicationLimits {
(int)Math.ceil(expectedMaxActiveApps *
(queue.getUserLimit() / 100.0f) * queue.getUserLimitFactor()),
queue.getMaximumActiveApplicationsPerUser());
+ assertEquals(
+ (int)(clusterResource.getMemory() * queue.getAbsoluteCapacity()),
+ queue.getMetrics().getAvailableMB()
+ );
}
@Test
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
index 5d97adae5d7..4922419c2f1 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
@@ -48,7 +48,7 @@ public class TestNodesPage {
// Number of Actual Table Headers for NodesPage.NodesBlock might change in
// future. In that case this value should be adjusted to the new value.
- final int numberOfThInMetricsTable = 10;
+ final int numberOfThInMetricsTable = 13;
final int numberOfActualTableHeaders = 10;
private Injector injector;
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index d09645a97dc..fc73c0251a4 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -361,6 +361,7 @@ public class TestRMWebServices extends JerseyTest {
verifyClusterMetrics(
WebServicesTestUtils.getXmlInt(element, "appsSubmitted"),
+ WebServicesTestUtils.getXmlInt(element, "appsCompleted"),
WebServicesTestUtils.getXmlInt(element, "reservedMB"),
WebServicesTestUtils.getXmlInt(element, "availableMB"),
WebServicesTestUtils.getXmlInt(element, "allocatedMB"),
@@ -379,8 +380,9 @@ public class TestRMWebServices extends JerseyTest {
Exception {
assertEquals("incorrect number of elements", 1, json.length());
JSONObject clusterinfo = json.getJSONObject("clusterMetrics");
- assertEquals("incorrect number of elements", 12, clusterinfo.length());
- verifyClusterMetrics(clusterinfo.getInt("appsSubmitted"),
+ assertEquals("incorrect number of elements", 19, clusterinfo.length());
+ verifyClusterMetrics(
+ clusterinfo.getInt("appsSubmitted"), clusterinfo.getInt("appsCompleted"),
clusterinfo.getInt("reservedMB"), clusterinfo.getInt("availableMB"),
clusterinfo.getInt("allocatedMB"),
clusterinfo.getInt("containersAllocated"),
@@ -390,7 +392,8 @@ public class TestRMWebServices extends JerseyTest {
clusterinfo.getInt("rebootedNodes"),clusterinfo.getInt("activeNodes"));
}
- public void verifyClusterMetrics(int sub, int reservedMB, int availableMB,
+ public void verifyClusterMetrics(int submittedApps, int completedApps,
+ int reservedMB, int availableMB,
int allocMB, int containersAlloc, int totalMB, int totalNodes,
int lostNodes, int unhealthyNodes, int decommissionedNodes,
int rebootedNodes, int activeNodes) throws JSONException, Exception {
@@ -404,7 +407,9 @@ public class TestRMWebServices extends JerseyTest {
+ metrics.getAllocatedMB();
assertEquals("appsSubmitted doesn't match",
- metrics.getAppsSubmitted(), sub);
+ metrics.getAppsSubmitted(), submittedApps);
+ assertEquals("appsCompleted doesn't match",
+ metrics.getAppsCompleted(), completedApps);
assertEquals("reservedMB doesn't match",
metrics.getReservedMB(), reservedMB);
assertEquals("availableMB doesn't match",
From ce01f2ab1d1967fc86e934b99f4f815abbf7e9bb Mon Sep 17 00:00:00 2001
From: Harsh J
Date: Mon, 6 Feb 2012 05:37:36 +0000
Subject: [PATCH 09/27] HDFS-2868. svn merge -c 1240897 from trunk
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1240899 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +-----
.../org/apache/hadoop/hdfs/server/datanode/DataNode.java | 3 ++-
.../apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java | 7 ++++++-
.../hadoop/hdfs/server/datanode/TestDataNodeMXBean.java | 5 +++++
4 files changed, 14 insertions(+), 7 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7ba0131955f..724d06a27fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -101,11 +101,7 @@ Release 0.23.1 - UNRELEASED
HDFS-2814. NamenodeMXBean does not account for svn revision in the version
information. (Hitesh Shah via jitendra)
- HDFS-2784. Update hftp and hdfs for host-based token support.
- (Kihwal Lee via jitendra)
-
- HDFS-2785. Update webhdfs and httpfs for host-based token support.
- (Robert Joseph Evans via jitendra)
+ HDFS-2868. Expose xceiver counts via the DataNode MXBean. (harsh)
OPTIMIZATIONS
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 007cabace48..abde548346e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1262,7 +1262,8 @@ public class DataNode extends Configured
}
/** Number of concurrent xceivers per node. */
- int getXceiverCount() {
+ @Override // DataNodeMXBean
+ public int getXceiverCount() {
return threadGroup == null ? 0 : threadGroup.activeCount();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
index b4f98625c30..8e80c587420 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
@@ -70,5 +70,10 @@ public interface DataNodeMXBean {
* @return the cluster id
*/
public String getClusterId();
-
+
+ /**
+ * Returns an estimate of the number of Datanode threads
+ * actively transferring blocks.
+ */
+ public int getXceiverCount();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
index eb71d2e65b3..e2954855ea1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
@@ -65,6 +65,11 @@ public class TestDataNodeMXBean {
String volumeInfo = (String)mbs.getAttribute(mxbeanName, "VolumeInfo");
Assert.assertEquals(replaceDigits(datanode.getVolumeInfo()),
replaceDigits(volumeInfo));
+ // Ensure mxbean's XceiverCount is same as the DataNode's
+ // live value.
+ int xceiverCount = (Integer)mbs.getAttribute(mxbeanName,
+ "XceiverCount");
+ Assert.assertEquals(datanode.getXceiverCount(), xceiverCount);
} finally {
if (cluster != null) {cluster.shutdown();}
}
From 04c09247fb626509e6381f68871cd009e5c9492c Mon Sep 17 00:00:00 2001
From: Harsh J
Date: Mon, 6 Feb 2012 05:39:11 +0000
Subject: [PATCH 10/27] Fix CHANGES.txt after 1240899 (HDFS-2868)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1240900 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
1 file changed, 3 insertions(+)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 724d06a27fa..d607f31ebca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -101,6 +101,9 @@ Release 0.23.1 - UNRELEASED
HDFS-2814. NamenodeMXBean does not account for svn revision in the version
information. (Hitesh Shah via jitendra)
+ HDFS-2785. Update webhdfs and httpfs for host-based token support.
+ (Robert Joseph Evans via jitendra)
+
HDFS-2868. Expose xceiver counts via the DataNode MXBean. (harsh)
OPTIMIZATIONS
From 556ad365631d833cc037a9509b68b4ae7c50d91b Mon Sep 17 00:00:00 2001
From: Harsh J
Date: Mon, 6 Feb 2012 07:25:50 +0000
Subject: [PATCH 11/27] Fix CHANGES.txt after 1240899 (HDFS-2868)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1240902 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
1 file changed, 3 insertions(+)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d607f31ebca..1ed1d069798 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -101,6 +101,9 @@ Release 0.23.1 - UNRELEASED
HDFS-2814. NamenodeMXBean does not account for svn revision in the version
information. (Hitesh Shah via jitendra)
+ HDFS-2784. Update hftp and hdfs for host-based token support.
+ (Kihwal Lee via jitendra)
+
HDFS-2785. Update webhdfs and httpfs for host-based token support.
(Robert Joseph Evans via jitendra)
From db4d28d3bbfa5f67be482ec433a308e249a30897 Mon Sep 17 00:00:00 2001
From: Eli Collins
Date: Mon, 6 Feb 2012 08:56:06 +0000
Subject: [PATCH 12/27] HDFS-2893. svn merge -c 1240928 from trunk
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1240930 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../hadoop-hdfs/src/main/bin/start-dfs.sh | 15 +++------------
.../hadoop-hdfs/src/main/bin/stop-dfs.sh | 13 ++-----------
.../content/xdocs/hdfs_user_guide.xml | 4 +---
4 files changed, 9 insertions(+), 26 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1ed1d069798..04acb3aef9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -219,6 +219,9 @@ Release 0.23.1 - UNRELEASED
HDFS-2889. getNumCurrentReplicas is package private but should be public on
0.23 (see HDFS-2408). (Gregory Chanan via atm)
+ HDFS-2893. The start/stop scripts don't start/stop the 2NN when
+ using the default configuration. (eli)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
index d6d03f7f8fb..d267e4cd7c3 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
@@ -59,7 +59,7 @@ echo "Starting namenodes on [$NAMENODES]"
--script "$bin/hdfs" start namenode $nameStartOpt
#---------------------------------------------------------
-# datanodes (using defalut slaves file)
+# datanodes (using default slaves file)
if [ -n "$HADOOP_SECURE_DN_USER" ]; then
echo \
@@ -74,22 +74,13 @@ fi
#---------------------------------------------------------
# secondary namenodes (if any)
-# if there are no secondary namenodes configured it returns
-# 0.0.0.0 or empty string
SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-)
-SECONDARY_NAMENODES=${SECONDARY_NAMENODES:='0.0.0.0'}
-if [ "$SECONDARY_NAMENODES" = '0.0.0.0' ] ; then
- echo \
- "Secondary namenodes are not configured. " \
- "Cannot start secondary namenodes."
-else
- echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
+echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
- "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
--config "$HADOOP_CONF_DIR" \
--hostnames "$SECONDARY_NAMENODES" \
--script "$bin/hdfs" start secondarynamenode
-fi
# eof
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
index 11788e24b71..33967513c4c 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
@@ -50,22 +50,13 @@ fi
#---------------------------------------------------------
# secondary namenodes (if any)
-# if there are no secondary namenodes configured it returns
-# 0.0.0.0 or empty string
SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-)
-SECONDARY_NAMENODES=${SECONDARY_NAMENODES:-'0.0.0.0'}
-if [ "$SECONDARY_NAMENODES" = '0.0.0.0' ] ; then
- echo \
- "Secondary namenodes are not configured. " \
- "Cannot stop secondary namenodes."
-else
- echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
+echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
- "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
--config "$HADOOP_CONF_DIR" \
--hostnames "$SECONDARY_NAMENODES" \
--script "$bin/hdfs" stop secondarynamenode
-fi
# eof
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
index 4d2c6dd0735..976800e0350 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
@@ -253,9 +253,7 @@
The secondary NameNode merges the fsimage and the edits log files periodically
and keeps edits log size within a limit. It is usually run on a
different machine than the primary NameNode since its memory requirements
- are on the same order as the primary NameNode. The secondary
- NameNode is started by bin/start-dfs.sh
on the nodes
- specified in conf/masters
file.
+ are on the same order as the primary NameNode.
The start of the checkpoint process on the secondary NameNode is
From 6325888402585cd98b4d517f6907e2185d5e58a6 Mon Sep 17 00:00:00 2001
From: Vinod Kumar Vavilapalli
Date: Mon, 6 Feb 2012 21:05:10 +0000
Subject: [PATCH 13/27] MAPREDUCE-3814. Fixed MRV1 compilation. (Arun C Murthy
via vinodkv) svn merge --ignore-ancestry -c 1241177 ../../trunk/
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1241181 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 2 ++
hadoop-mapreduce-project/build.xml | 34 ----------------------------
2 files changed, 2 insertions(+), 34 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index b703d60fe4b..9a697c2a38b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -663,6 +663,8 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3747. Initialize queue metrics upfront and added start/finish
time to RM Web-UI. (acmurthy)
+ MAPREDUCE-3814. Fixed MRV1 compilation. (Arun C Murthy via vinodkv)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/build.xml b/hadoop-mapreduce-project/build.xml
index 40c822baedd..b40de5f2afa 100644
--- a/hadoop-mapreduce-project/build.xml
+++ b/hadoop-mapreduce-project/build.xml
@@ -525,43 +525,11 @@
dest.dir="${test.mapred.build.classes}"
classpath="test.classpath"/>
-
-
-
-
-
-
-
-
-
-
-
-
-
@@ -618,8 +586,6 @@
-
-
From caea6d2e855b8170667de7fb49acdcc2bf33bdb2 Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Mon, 6 Feb 2012 22:03:30 +0000
Subject: [PATCH 14/27] Merge -c 1241205 from trunk to branch-0.23 to fix
MAPREDUCE-3810. Performance tweaks - reduced logging in AM and defined
hascode/equals for ResourceRequest & Priority.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1241208 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../v2/app/job/impl/TaskAttemptImpl.java | 18 ++--
.../mapreduce/v2/app/job/impl/TaskImpl.java | 11 ++-
.../mapreduce/v2/app/rm/RMCommunicator.java | 15 +--
.../v2/app/rm/RMContainerAllocator.java | 97 +++++++++++-------
.../v2/app/rm/RMContainerRequestor.java | 62 +++++++-----
.../apache/hadoop/mapreduce/v2/app/MRApp.java | 2 +-
.../mapreduce/v2/app/MRAppBenchmark.java | 98 ++++++++++++++++++-
.../hadoop/yarn/api/records/Priority.java | 29 +++++-
.../yarn/api/records/ResourceRequest.java | 77 ++++++++++++++-
.../api/records/impl/pb/PriorityPBImpl.java | 13 +--
.../impl/pb/ResourceRequestPBImpl.java | 28 +-----
12 files changed, 329 insertions(+), 124 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 9a697c2a38b..8e3d1019eb7 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -665,6 +665,9 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3814. Fixed MRV1 compilation. (Arun C Murthy via vinodkv)
+ MAPREDUCE-3810. Performance tweaks - reduced logging in AM and defined
+ hascode/equals for ResourceRequest & Priority. (vinodkv via acmurthy)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index 7cca98031d5..4f504b381a4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -127,7 +127,7 @@ import org.apache.hadoop.yarn.util.RackResolver;
/**
* Implementation of TaskAttempt interface.
*/
-@SuppressWarnings({ "rawtypes", "deprecation" })
+@SuppressWarnings({ "rawtypes" })
public abstract class TaskAttemptImpl implements
org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt,
EventHandler {
@@ -910,8 +910,10 @@ public abstract class TaskAttemptImpl implements
@SuppressWarnings("unchecked")
@Override
public void handle(TaskAttemptEvent event) {
- LOG.info("Processing " + event.getTaskAttemptID() +
- " of type " + event.getType());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Processing " + event.getTaskAttemptID() + " of type "
+ + event.getType());
+ }
writeLock.lock();
try {
final TaskAttemptState oldState = getState();
@@ -1278,15 +1280,11 @@ public abstract class TaskAttemptImpl implements
TaskAttemptEvent event) {
//set the finish time
taskAttempt.setFinishTime();
- String taskType =
- TypeConverter.fromYarn(taskAttempt.attemptId.getTaskId().getTaskType()).toString();
- LOG.info("In TaskAttemptImpl taskType: " + taskType);
long slotMillis = computeSlotMillis(taskAttempt);
- JobCounterUpdateEvent jce =
- new JobCounterUpdateEvent(taskAttempt.attemptId.getTaskId()
- .getJobId());
+ TaskId taskId = taskAttempt.attemptId.getTaskId();
+ JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskId.getJobId());
jce.addCounterUpdate(
- taskAttempt.attemptId.getTaskId().getTaskType() == TaskType.MAP ?
+ taskId.getTaskType() == TaskType.MAP ?
JobCounter.SLOTS_MILLIS_MAPS : JobCounter.SLOTS_MILLIS_REDUCES,
slotMillis);
taskAttempt.eventHandler.handle(jce);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
index 24a908112d2..9dc135dc1be 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
@@ -81,7 +81,7 @@ import org.apache.hadoop.yarn.state.StateMachineFactory;
/**
* Implementation of Task interface.
*/
-@SuppressWarnings({ "rawtypes", "unchecked", "deprecation" })
+@SuppressWarnings({ "rawtypes", "unchecked" })
public abstract class TaskImpl implements Task, EventHandler {
private static final Log LOG = LogFactory.getLog(TaskImpl.class);
@@ -505,7 +505,9 @@ public abstract class TaskImpl implements Task, EventHandler {
// This is always called in the Write Lock
private void addAndScheduleAttempt() {
TaskAttempt attempt = createAttempt();
- LOG.info("Created attempt " + attempt.getID());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Created attempt " + attempt.getID());
+ }
switch (attempts.size()) {
case 0:
attempts = Collections.singletonMap(attempt.getID(), attempt);
@@ -537,7 +539,10 @@ public abstract class TaskImpl implements Task, EventHandler {
@Override
public void handle(TaskEvent event) {
- LOG.debug("Processing " + event.getTaskID() + " of type " + event.getType());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Processing " + event.getTaskID() + " of type "
+ + event.getType());
+ }
try {
writeLock.lock();
TaskState oldState = getState();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
index b138e9a6619..5ff838c5257 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
-import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
@@ -46,9 +45,9 @@ import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -149,7 +148,7 @@ public abstract class RMCommunicator extends AbstractService {
LOG.info("minContainerCapability: " + minContainerCapability.getMemory());
LOG.info("maxContainerCapability: " + maxContainerCapability.getMemory());
} catch (Exception are) {
- LOG.info("Exception while registering", are);
+ LOG.error("Exception while registering", are);
throw new YarnException(are);
}
}
@@ -183,7 +182,7 @@ public abstract class RMCommunicator extends AbstractService {
request.setTrackingUrl(historyUrl);
scheduler.finishApplicationMaster(request);
} catch(Exception are) {
- LOG.info("Exception while unregistering ", are);
+ LOG.error("Exception while unregistering ", are);
}
}
@@ -205,7 +204,7 @@ public abstract class RMCommunicator extends AbstractService {
try {
allocatorThread.join();
} catch (InterruptedException ie) {
- LOG.info("InterruptedException while stopping", ie);
+ LOG.warn("InterruptedException while stopping", ie);
}
unregister();
super.stop();
@@ -228,7 +227,7 @@ public abstract class RMCommunicator extends AbstractService {
// TODO: for other exceptions
}
} catch (InterruptedException e) {
- LOG.info("Allocated thread interrupted. Returning.");
+ LOG.warn("Allocated thread interrupted. Returning.");
return;
}
}
@@ -255,7 +254,9 @@ public abstract class RMCommunicator extends AbstractService {
if (UserGroupInformation.isSecurityEnabled()) {
String tokenURLEncodedStr = System.getenv().get(
ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME);
- LOG.debug("AppMasterToken is " + tokenURLEncodedStr);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("AppMasterToken is " + tokenURLEncodedStr);
+ }
Token extends TokenIdentifier> token = new Token();
try {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index d188b7c42e9..bcb82230d6a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.NormalizedResourceEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
@@ -254,28 +255,30 @@ public class RMContainerAllocator extends RMContainerRequestor
@SuppressWarnings({ "unchecked" })
protected synchronized void handleEvent(ContainerAllocatorEvent event) {
- LOG.info("Processing the event " + event.toString());
recalculateReduceSchedule = true;
if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {
ContainerRequestEvent reqEvent = (ContainerRequestEvent) event;
+ JobId jobId = getJob().getID();
+ int supportedMaxContainerCapability =
+ getMaxContainerCapability().getMemory();
if (reqEvent.getAttemptID().getTaskId().getTaskType().equals(TaskType.MAP)) {
if (mapResourceReqt == 0) {
mapResourceReqt = reqEvent.getCapability().getMemory();
int minSlotMemSize = getMinContainerCapability().getMemory();
mapResourceReqt = (int) Math.ceil((float) mapResourceReqt/minSlotMemSize)
* minSlotMemSize;
- eventHandler.handle(new JobHistoryEvent(getJob().getID(),
+ eventHandler.handle(new JobHistoryEvent(jobId,
new NormalizedResourceEvent(org.apache.hadoop.mapreduce.TaskType.MAP,
mapResourceReqt)));
LOG.info("mapResourceReqt:"+mapResourceReqt);
- if (mapResourceReqt > getMaxContainerCapability().getMemory()) {
+ if (mapResourceReqt > supportedMaxContainerCapability) {
String diagMsg = "MAP capability required is more than the supported " +
"max container capability in the cluster. Killing the Job. mapResourceReqt: " +
- mapResourceReqt + " maxContainerCapability:" + getMaxContainerCapability().getMemory();
+ mapResourceReqt + " maxContainerCapability:" + supportedMaxContainerCapability;
LOG.info(diagMsg);
eventHandler.handle(new JobDiagnosticsUpdateEvent(
- getJob().getID(), diagMsg));
- eventHandler.handle(new JobEvent(getJob().getID(), JobEventType.JOB_KILL));
+ jobId, diagMsg));
+ eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
}
}
//set the rounded off memory
@@ -288,20 +291,20 @@ public class RMContainerAllocator extends RMContainerRequestor
//round off on slotsize
reduceResourceReqt = (int) Math.ceil((float)
reduceResourceReqt/minSlotMemSize) * minSlotMemSize;
- eventHandler.handle(new JobHistoryEvent(getJob().getID(),
+ eventHandler.handle(new JobHistoryEvent(jobId,
new NormalizedResourceEvent(
org.apache.hadoop.mapreduce.TaskType.REDUCE,
reduceResourceReqt)));
LOG.info("reduceResourceReqt:"+reduceResourceReqt);
- if (reduceResourceReqt > getMaxContainerCapability().getMemory()) {
+ if (reduceResourceReqt > supportedMaxContainerCapability) {
String diagMsg = "REDUCE capability required is more than the " +
"supported max container capability in the cluster. Killing the " +
"Job. reduceResourceReqt: " + reduceResourceReqt +
- " maxContainerCapability:" + getMaxContainerCapability().getMemory();
+ " maxContainerCapability:" + supportedMaxContainerCapability;
LOG.info(diagMsg);
eventHandler.handle(new JobDiagnosticsUpdateEvent(
- getJob().getID(), diagMsg));
- eventHandler.handle(new JobEvent(getJob().getID(), JobEventType.JOB_KILL));
+ jobId, diagMsg));
+ eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
}
}
//set the rounded off memory
@@ -317,6 +320,9 @@ public class RMContainerAllocator extends RMContainerRequestor
} else if (
event.getType() == ContainerAllocator.EventType.CONTAINER_DEALLOCATE) {
+
+ LOG.info("Processing the event " + event.toString());
+
TaskAttemptId aId = event.getAttemptID();
boolean removed = scheduledRequests.remove(aId);
@@ -579,7 +585,7 @@ public class RMContainerAllocator extends RMContainerRequestor
computeIgnoreBlacklisting();
for (ContainerStatus cont : finishedContainers) {
- LOG.info("Received completed container " + cont);
+ LOG.info("Received completed container " + cont.getContainerId());
TaskAttemptId attemptID = assignedRequests.get(cont.getContainerId());
if (attemptID == null) {
LOG.error("Container complete event for unknown container id "
@@ -664,7 +670,9 @@ public class RMContainerAllocator extends RMContainerRequestor
mapsHostMapping.put(host, list);
}
list.add(event.getAttemptID());
- LOG.info("Added attempt req to host " + host);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Added attempt req to host " + host);
+ }
}
for (String rack: event.getRacks()) {
LinkedList list = mapsRackMapping.get(rack);
@@ -673,7 +681,9 @@ public class RMContainerAllocator extends RMContainerRequestor
mapsRackMapping.put(rack, list);
}
list.add(event.getAttemptID());
- LOG.info("Added attempt req to rack " + rack);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Added attempt req to rack " + rack);
+ }
}
request = new ContainerRequest(event, PRIORITY_MAP);
}
@@ -694,18 +704,21 @@ public class RMContainerAllocator extends RMContainerRequestor
containersAllocated += allocatedContainers.size();
while (it.hasNext()) {
Container allocated = it.next();
- LOG.info("Assigning container " + allocated.getId() +
- " with priority " + allocated.getPriority() +
- " to NM " + allocated.getNodeId());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Assigning container " + allocated.getId()
+ + " with priority " + allocated.getPriority() + " to NM "
+ + allocated.getNodeId());
+ }
// check if allocated container meets memory requirements
// and whether we have any scheduled tasks that need
// a container to be assigned
boolean isAssignable = true;
Priority priority = allocated.getPriority();
+ int allocatedMemory = allocated.getResource().getMemory();
if (PRIORITY_FAST_FAIL_MAP.equals(priority)
|| PRIORITY_MAP.equals(priority)) {
- if (allocated.getResource().getMemory() < mapResourceReqt
+ if (allocatedMemory < mapResourceReqt
|| maps.isEmpty()) {
LOG.info("Cannot assign container " + allocated
+ " for a map as either "
@@ -716,7 +729,7 @@ public class RMContainerAllocator extends RMContainerRequestor
}
}
else if (PRIORITY_REDUCE.equals(priority)) {
- if (allocated.getResource().getMemory() < reduceResourceReqt
+ if (allocatedMemory < reduceResourceReqt
|| reduces.isEmpty()) {
LOG.info("Cannot assign container " + allocated
+ " for a reduce as either "
@@ -730,15 +743,17 @@ public class RMContainerAllocator extends RMContainerRequestor
boolean blackListed = false;
ContainerRequest assigned = null;
+ ContainerId allocatedContainerId = allocated.getId();
if (isAssignable) {
// do not assign if allocated container is on a
// blacklisted host
- blackListed = isNodeBlacklisted(allocated.getNodeId().getHost());
+ String allocatedHost = allocated.getNodeId().getHost();
+ blackListed = isNodeBlacklisted(allocatedHost);
if (blackListed) {
// we need to request for a new container
// and release the current one
LOG.info("Got allocated container on a blacklisted "
- + " host "+allocated.getNodeId().getHost()
+ + " host "+allocatedHost
+". Releasing container " + allocated);
// find the request matching this allocated container
@@ -775,11 +790,13 @@ public class RMContainerAllocator extends RMContainerRequestor
eventHandler.handle(new TaskAttemptContainerAssignedEvent(
assigned.attemptID, allocated, applicationACLs));
- assignedRequests.add(allocated.getId(), assigned.attemptID);
+ assignedRequests.add(allocatedContainerId, assigned.attemptID);
- LOG.info("Assigned container (" + allocated + ") " +
- " to task " + assigned.attemptID +
- " on node " + allocated.getNodeId().toString());
+ if (LOG.isDebugEnabled()) {
+ LOG.info("Assigned container (" + allocated + ") "
+ + " to task " + assigned.attemptID + " on node "
+ + allocated.getNodeId().toString());
+ }
}
else {
//not assigned to any request, release the container
@@ -794,7 +811,7 @@ public class RMContainerAllocator extends RMContainerRequestor
// or if we could not assign it
if (blackListed || assigned == null) {
containersReleased++;
- release(allocated.getId());
+ release(allocatedContainerId);
}
}
}
@@ -807,10 +824,14 @@ public class RMContainerAllocator extends RMContainerRequestor
LOG.info("Assigning container " + allocated + " to fast fail map");
assigned = assignToFailedMap(allocated);
} else if (PRIORITY_REDUCE.equals(priority)) {
- LOG.info("Assigning container " + allocated + " to reduce");
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Assigning container " + allocated + " to reduce");
+ }
assigned = assignToReduce(allocated);
} else if (PRIORITY_MAP.equals(priority)) {
- LOG.info("Assigning container " + allocated + " to map");
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Assigning container " + allocated + " to map");
+ }
assigned = assignToMap(allocated);
} else {
LOG.warn("Container allocated at unwanted priority: " + priority +
@@ -897,7 +918,9 @@ public class RMContainerAllocator extends RMContainerRequestor
String host = allocated.getNodeId().getHost();
LinkedList list = mapsHostMapping.get(host);
while (list != null && list.size() > 0) {
- LOG.info("Host matched to the request list " + host);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Host matched to the request list " + host);
+ }
TaskAttemptId tId = list.removeFirst();
if (maps.containsKey(tId)) {
assigned = maps.remove(tId);
@@ -906,7 +929,9 @@ public class RMContainerAllocator extends RMContainerRequestor
jce.addCounterUpdate(JobCounter.DATA_LOCAL_MAPS, 1);
eventHandler.handle(jce);
hostLocalAssigned++;
- LOG.info("Assigned based on host match " + host);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Assigned based on host match " + host);
+ }
break;
}
}
@@ -922,7 +947,9 @@ public class RMContainerAllocator extends RMContainerRequestor
jce.addCounterUpdate(JobCounter.RACK_LOCAL_MAPS, 1);
eventHandler.handle(jce);
rackLocalAssigned++;
- LOG.info("Assigned based on rack match " + rack);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Assigned based on rack match " + rack);
+ }
break;
}
}
@@ -933,7 +960,9 @@ public class RMContainerAllocator extends RMContainerRequestor
new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
eventHandler.handle(jce);
- LOG.info("Assigned based on * match");
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Assigned based on * match");
+ }
break;
}
}
@@ -953,8 +982,7 @@ public class RMContainerAllocator extends RMContainerRequestor
new HashSet();
void add(ContainerId containerId, TaskAttemptId tId) {
- LOG.info("Assigned container " + containerId.toString()
- + " to " + tId);
+ LOG.info("Assigned container " + containerId.toString() + " to " + tId);
containerToAttemptMap.put(containerId, tId);
if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
maps.put(tId, containerId);
@@ -963,6 +991,7 @@ public class RMContainerAllocator extends RMContainerRequestor
}
}
+ @SuppressWarnings("unchecked")
void preemptReduce(int toPreempt) {
List reduceList = new ArrayList
(reduces.keySet());
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
index 7aa638afe6b..ea3101a68d5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
@@ -155,13 +155,14 @@ public abstract class RMContainerRequestor extends RMCommunicator {
lastClusterNmCount = clusterNmCount;
clusterNmCount = allocateResponse.getNumClusterNodes();
- LOG.info("getResources() for " + applicationId + ":" + " ask="
- + ask.size() + " release= " + release.size() +
- " newContainers=" + response.getAllocatedContainers().size() +
- " finishedContainers=" +
- response.getCompletedContainersStatuses().size() +
- " resourcelimit=" + availableResources +
- " knownNMs=" + clusterNmCount);
+ if (ask.size() > 0 || release.size() > 0) {
+ LOG.info("getResources() for " + applicationId + ":" + " ask="
+ + ask.size() + " release= " + release.size() + " newContainers="
+ + response.getAllocatedContainers().size() + " finishedContainers="
+ + response.getCompletedContainersStatuses().size()
+ + " resourcelimit=" + availableResources + " knownNMs="
+ + clusterNmCount);
+ }
ask.clear();
release.clear();
@@ -172,6 +173,9 @@ public abstract class RMContainerRequestor extends RMCommunicator {
// knownNodeCount is based on node managers, not hosts. blacklisting is
// currently based on hosts.
protected void computeIgnoreBlacklisting() {
+ if (!nodeBlacklistingEnabled) {
+ return;
+ }
if (blacklistDisablePercent != -1
&& (blacklistedNodeCount != blacklistedNodes.size() ||
clusterNmCount != lastClusterNmCount)) {
@@ -200,7 +204,9 @@ public abstract class RMContainerRequestor extends RMCommunicator {
return;
}
if (blacklistedNodes.contains(hostName)) {
- LOG.info("Host " + hostName + " is already blacklisted.");
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Host " + hostName + " is already blacklisted.");
+ }
return; //already blacklisted
}
Integer failures = nodeFailures.remove(hostName);
@@ -293,7 +299,9 @@ public abstract class RMContainerRequestor extends RMCommunicator {
if (remoteRequests == null) {
remoteRequests = new HashMap>();
this.remoteRequestsTable.put(priority, remoteRequests);
- LOG.info("Added priority=" + priority);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Added priority=" + priority);
+ }
}
Map reqMap = remoteRequests.get(resourceName);
if (reqMap == null) {
@@ -313,10 +321,12 @@ public abstract class RMContainerRequestor extends RMCommunicator {
// Note this down for next interaction with ResourceManager
ask.add(remoteRequest);
- LOG.info("addResourceRequest:" + " applicationId=" + applicationId.getId()
- + " priority=" + priority.getPriority() + " resourceName=" + resourceName
- + " numContainers=" + remoteRequest.getNumContainers() + " #asks="
- + ask.size());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("addResourceRequest:" + " applicationId="
+ + applicationId.getId() + " priority=" + priority.getPriority()
+ + " resourceName=" + resourceName + " numContainers="
+ + remoteRequest.getNumContainers() + " #asks=" + ask.size());
+ }
}
private void decResourceRequest(Priority priority, String resourceName,
@@ -328,16 +338,20 @@ public abstract class RMContainerRequestor extends RMCommunicator {
// as we modify the resource requests by filtering out blacklisted hosts
// when they are added, this value may be null when being
// decremented
- LOG.debug("Not decrementing resource as " + resourceName
- + " is not present in request table");
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Not decrementing resource as " + resourceName
+ + " is not present in request table");
+ }
return;
}
ResourceRequest remoteRequest = reqMap.get(capability);
- LOG.info("BEFORE decResourceRequest:" + " applicationId=" + applicationId.getId()
- + " priority=" + priority.getPriority() + " resourceName=" + resourceName
- + " numContainers=" + remoteRequest.getNumContainers() + " #asks="
- + ask.size());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("BEFORE decResourceRequest:" + " applicationId="
+ + applicationId.getId() + " priority=" + priority.getPriority()
+ + " resourceName=" + resourceName + " numContainers="
+ + remoteRequest.getNumContainers() + " #asks=" + ask.size());
+ }
remoteRequest.setNumContainers(remoteRequest.getNumContainers() -1);
if (remoteRequest.getNumContainers() == 0) {
@@ -355,10 +369,12 @@ public abstract class RMContainerRequestor extends RMCommunicator {
//already have it.
}
- LOG.info("AFTER decResourceRequest:" + " applicationId="
- + applicationId.getId() + " priority=" + priority.getPriority()
- + " resourceName=" + resourceName + " numContainers="
- + remoteRequest.getNumContainers() + " #asks=" + ask.size());
+ if (LOG.isDebugEnabled()) {
+ LOG.info("AFTER decResourceRequest:" + " applicationId="
+ + applicationId.getId() + " priority=" + priority.getPriority()
+ + " resourceName=" + resourceName + " numContainers="
+ + remoteRequest.getNumContainers() + " #asks=" + ask.size());
+ }
}
protected void release(ContainerId containerId) {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
index 3eb214d79c9..00bdaebfe80 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
@@ -436,7 +436,7 @@ public class MRApp extends MRAppMaster {
return new ClientService(){
@Override
public InetSocketAddress getBindAddress() {
- return null;
+ return NetUtils.createSocketAddr("localhost:9876");
}
@Override
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index 279b81199ae..ebb20b06565 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.mapreduce.v2.app;
+import java.util.ArrayList;
+import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
@@ -29,16 +31,30 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
+import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.AMRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.AMResponse;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.apache.hadoop.yarn.util.Records;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
+import org.junit.Test;
public class MRAppBenchmark {
@@ -167,17 +183,89 @@ public class MRAppBenchmark {
}
}
+ @Test
public void benchmark1() throws Exception {
- int maps = 100000;
- int reduces = 100;
+ int maps = 100; // Adjust for benchmarking. Start with thousands.
+ int reduces = 0;
System.out.println("Running benchmark with maps:"+maps +
" reduces:"+reduces);
- run(new MRApp(maps, reduces, true, this.getClass().getName(), true));
+ run(new MRApp(maps, reduces, true, this.getClass().getName(), true) {
+
+ @Override
+ protected ContainerAllocator createContainerAllocator(
+ ClientService clientService, AppContext context) {
+ return new RMContainerAllocator(clientService, context) {
+ @Override
+ protected AMRMProtocol createSchedulerProxy() {
+ return new AMRMProtocol() {
+
+ @Override
+ public RegisterApplicationMasterResponse
+ registerApplicationMaster(
+ RegisterApplicationMasterRequest request)
+ throws YarnRemoteException {
+ RegisterApplicationMasterResponse response =
+ Records.newRecord(RegisterApplicationMasterResponse.class);
+ response.setMinimumResourceCapability(BuilderUtils
+ .newResource(1024));
+ response.setMaximumResourceCapability(BuilderUtils
+ .newResource(10240));
+ return response;
+ }
+
+ @Override
+ public FinishApplicationMasterResponse finishApplicationMaster(
+ FinishApplicationMasterRequest request)
+ throws YarnRemoteException {
+ FinishApplicationMasterResponse response =
+ Records.newRecord(FinishApplicationMasterResponse.class);
+ return response;
+ }
+
+ @Override
+ public AllocateResponse allocate(AllocateRequest request)
+ throws YarnRemoteException {
+
+ AllocateResponse response =
+ Records.newRecord(AllocateResponse.class);
+ List askList = request.getAskList();
+ List containers = new ArrayList();
+ for (ResourceRequest req : askList) {
+ if (req.getHostName() != "*") {
+ continue;
+ }
+ int numContainers = req.getNumContainers();
+ for (int i = 0; i < numContainers; i++) {
+ ContainerId containerId =
+ BuilderUtils.newContainerId(
+ request.getApplicationAttemptId(),
+ request.getResponseId() + i);
+ containers.add(BuilderUtils
+ .newContainer(containerId, BuilderUtils.newNodeId("host"
+ + containerId.getId(), 2345),
+ "host" + containerId.getId() + ":5678", req
+ .getCapability(), req.getPriority(), null));
+ }
+ }
+
+ AMResponse amResponse = Records.newRecord(AMResponse.class);
+ amResponse.setAllocatedContainers(containers);
+ amResponse.setResponseId(request.getResponseId() + 1);
+ response.setAMResponse(amResponse);
+ response.setNumClusterNodes(350);
+ return response;
+ }
+ };
+ }
+ };
+ }
+ });
}
+ @Test
public void benchmark2() throws Exception {
- int maps = 4000;
- int reduces = 1000;
+ int maps = 100; // Adjust for benchmarking, start with a couple of thousands
+ int reduces = 50;
int maxConcurrentRunningTasks = 500;
System.out.println("Running benchmark with throttled running tasks with " +
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Priority.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Priority.java
index bed0788365e..fea1f48aea3 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Priority.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Priority.java
@@ -23,7 +23,7 @@ package org.apache.hadoop.yarn.api.records;
* allocation
*
*/
-public interface Priority extends Comparable {
+public abstract class Priority implements Comparable {
/**
* Get the assigned priority
@@ -37,4 +37,31 @@ public interface Priority extends Comparable {
*/
public abstract void setPriority(int priority);
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + getPriority();
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ Priority other = (Priority) obj;
+ if (getPriority() != other.getPriority())
+ return false;
+ return true;
+ }
+
+ @Override
+ public int compareTo(Priority other) {
+ return this.getPriority() - other.getPriority();
+ }
+
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
index 4072da1b613..72d703eeaf7 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
@@ -48,7 +48,7 @@ import org.apache.hadoop.yarn.api.AMRMProtocol;
*/
@Public
@Stable
-public interface ResourceRequest extends Comparable {
+public abstract class ResourceRequest implements Comparable {
/**
* Get the Priority
of the request.
* @return Priority
of the request
@@ -121,4 +121,79 @@ public interface ResourceRequest extends Comparable {
@Public
@Stable
public abstract void setNumContainers(int numContainers);
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ Resource capability = getCapability();
+ String hostName = getHostName();
+ Priority priority = getPriority();
+ result =
+ prime * result + ((capability == null) ? 0 : capability.hashCode());
+ result = prime * result + ((hostName == null) ? 0 : hostName.hashCode());
+ result = prime * result + getNumContainers();
+ result = prime * result + ((priority == null) ? 0 : priority.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ ResourceRequest other = (ResourceRequest) obj;
+ Resource capability = getCapability();
+ if (capability == null) {
+ if (other.getCapability() != null)
+ return false;
+ } else if (!capability.equals(other.getCapability()))
+ return false;
+ String hostName = getHostName();
+ if (hostName == null) {
+ if (other.getHostName() != null)
+ return false;
+ } else if (!hostName.equals(other.getHostName()))
+ return false;
+ if (getNumContainers() != other.getNumContainers())
+ return false;
+ Priority priority = getPriority();
+ if (priority == null) {
+ if (other.getPriority() != null)
+ return false;
+ } else if (!priority.equals(other.getPriority()))
+ return false;
+ return true;
+ }
+
+ @Override
+ public int compareTo(ResourceRequest other) {
+ int priorityComparison = this.getPriority().compareTo(other.getPriority());
+ if (priorityComparison == 0) {
+ int hostNameComparison =
+ this.getHostName().compareTo(other.getHostName());
+ if (hostNameComparison == 0) {
+ int capabilityComparison =
+ this.getCapability().compareTo(other.getCapability());
+ if (capabilityComparison == 0) {
+ int numContainersComparison =
+ this.getNumContainers() - other.getNumContainers();
+ if (numContainersComparison == 0) {
+ return 0;
+ } else {
+ return numContainersComparison;
+ }
+ } else {
+ return capabilityComparison;
+ }
+ } else {
+ return hostNameComparison;
+ }
+ } else {
+ return priorityComparison;
+ }
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PriorityPBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PriorityPBImpl.java
index 11bf6d0dd3d..9a3f9bb2c95 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PriorityPBImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PriorityPBImpl.java
@@ -18,15 +18,11 @@
package org.apache.hadoop.yarn.api.records.impl.pb;
-
import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.api.records.ProtoBase;
import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProtoOrBuilder;
-
-
-public class PriorityPBImpl extends ProtoBase implements Priority {
+public class PriorityPBImpl extends Priority {
PriorityProto proto = PriorityProto.getDefaultInstance();
PriorityProto.Builder builder = null;
boolean viaProto = false;
@@ -66,11 +62,4 @@ public class PriorityPBImpl extends ProtoBase implements Priority
builder.setPriority((priority));
}
-
- @Override
- public int compareTo(Priority other) {
- return this.getPriority() - other.getPriority();
- }
-
-
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
index 33dba0d6c02..f3b8ffa89f9 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
@@ -20,19 +20,14 @@ package org.apache.hadoop.yarn.api.records.impl.pb;
import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.api.records.ProtoBase;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl;
-import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProtoOrBuilder;
-
-
-public class ResourceRequestPBImpl extends ProtoBase implements ResourceRequest {
+public class ResourceRequestPBImpl extends ResourceRequest {
ResourceRequestProto proto = ResourceRequestProto.getDefaultInstance();
ResourceRequestProto.Builder builder = null;
boolean viaProto = false;
@@ -168,25 +163,4 @@ public class ResourceRequestPBImpl extends ProtoBase imple
return ((ResourcePBImpl)t).getProto();
}
- @Override
- public int compareTo(ResourceRequest other) {
- if (this.getPriority().compareTo(other.getPriority()) == 0) {
- if (this.getHostName().equals(other.getHostName())) {
- if (this.getCapability().equals(other.getCapability())) {
- if (this.getNumContainers() == other.getNumContainers()) {
- return 0;
- } else {
- return this.getNumContainers() - other.getNumContainers();
- }
- } else {
- return this.getCapability().compareTo(other.getCapability());
- }
- } else {
- return this.getHostName().compareTo(other.getHostName());
- }
- } else {
- return this.getPriority().compareTo(other.getPriority());
- }
- }
-
}
From 287501b2bc82b9d5f458910d9f0bfbd969dcffb1 Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Mon, 6 Feb 2012 22:07:51 +0000
Subject: [PATCH 15/27] Merge -c 1241209 from trunk to branch-0.23 to fix
MAPREDUCE-3813. Added a cache for resolved racks.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1241210 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 2 +
.../apache/hadoop/yarn/util/RackResolver.java | 10 ++-
.../hadoop/yarn/util/TestRackResolver.java | 71 +++++++++++++++++++
3 files changed, 82 insertions(+), 1 deletion(-)
create mode 100644 hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 8e3d1019eb7..8e2f587f5a3 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -668,6 +668,8 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3810. Performance tweaks - reduced logging in AM and defined
hascode/equals for ResourceRequest & Priority. (vinodkv via acmurthy)
+ MAPREDUCE-3813. Added a cache for resolved racks. (vinodkv via acmurthy)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java
index 4b70afe74e7..efbc90e2bb9 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java
@@ -26,6 +26,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.net.CachedDNSToSwitchMapping;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
@@ -50,7 +51,14 @@ public class RackResolver {
try {
Constructor extends DNSToSwitchMapping> dnsToSwitchMappingConstructor
= dnsToSwitchMappingClass.getConstructor();
- dnsToSwitchMapping = dnsToSwitchMappingConstructor.newInstance();
+ DNSToSwitchMapping newInstance =
+ dnsToSwitchMappingConstructor.newInstance();
+ // Wrap around the configured class with the Cached implementation so as
+ // to save on repetitive lookups.
+ // Check if the impl is already caching, to avoid double caching.
+ dnsToSwitchMapping =
+ ((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance
+ : new CachedDNSToSwitchMapping(newInstance));
} catch (Exception e) {
throw new RuntimeException(e);
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java
new file mode 100644
index 00000000000..a82f129d517
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java
@@ -0,0 +1,71 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.util;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.net.DNSToSwitchMapping;
+import org.apache.hadoop.net.Node;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestRackResolver {
+
+ public static final class MyResolver implements DNSToSwitchMapping {
+
+ int numHost1 = 0;
+
+ @Override
+ public List resolve(List hostList) {
+ // Only one host at a time
+ Assert.assertTrue("hostList size is " + hostList.size(),
+ hostList.size() <= 1);
+ List returnList = new ArrayList();
+ if (hostList.isEmpty()) {
+ return returnList;
+ }
+ if (hostList.get(0).equals("host1")) {
+ numHost1++;
+ returnList.add("/rack1");
+ }
+ // I should not be reached again as RackResolver is supposed to do
+ // caching.
+ Assert.assertTrue(numHost1 <= 1);
+ return returnList;
+ }
+
+ }
+
+ @Test
+ public void testCaching() {
+ Configuration conf = new Configuration();
+ conf.setClass(
+ CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
+ MyResolver.class, DNSToSwitchMapping.class);
+ RackResolver.init(conf);
+ Node node = RackResolver.resolve("host1");
+ Assert.assertEquals("/rack1", node.getNetworkLocation());
+ node = RackResolver.resolve("host1");
+ Assert.assertEquals("/rack1", node.getNetworkLocation());
+ }
+
+}
From 15c308d948b5ca34ab27634ad9ed7db4f9de4bbb Mon Sep 17 00:00:00 2001
From: Vinod Kumar Vavilapalli
Date: Mon, 6 Feb 2012 22:17:18 +0000
Subject: [PATCH 16/27] MAPREDUCE-3808. Fixed an NPE in FileOutputCommitter for
jobs with maps but no reduces. Contributed by Robert Joseph Evans. svn merge
--ignore-ancestry -c 1241217 ../../trunk/
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1241218 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +
.../hadoop/mapred/FileOutputCommitter.java | 21 ++++---
.../lib/output/FileOutputCommitter.java | 58 ++++++++++---------
.../mapred/TestFileOutputCommitter.java | 34 ++++++++++-
4 files changed, 79 insertions(+), 37 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 8e2f587f5a3..67582531bee 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -670,6 +670,9 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3813. Added a cache for resolved racks. (vinodkv via acmurthy)
+ MAPREDUCE-3808. Fixed an NPE in FileOutputCommitter for jobs with maps
+ but no reduces. (Robert Joseph Evans via vinodkv)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputCommitter.java
index c23e9a93789..a6190d2060d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputCommitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputCommitter.java
@@ -85,18 +85,21 @@ public class FileOutputCommitter extends OutputCommitter {
*/
@Private
Path getJobAttemptPath(JobContext context) {
- return org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
- .getJobAttemptPath(context, getOutputPath(context));
+ Path out = getOutputPath(context);
+ return out == null ? null :
+ org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
+ .getJobAttemptPath(context, out);
}
@Private
Path getTaskAttemptPath(TaskAttemptContext context) throws IOException {
- return getTaskAttemptPath(context, getOutputPath(context));
+ Path out = getOutputPath(context);
+ return out == null ? null : getTaskAttemptPath(context, out);
}
private Path getTaskAttemptPath(TaskAttemptContext context, Path out) throws IOException {
Path workPath = FileOutputFormat.getWorkOutputPath(context.getJobConf());
- if(workPath == null) {
+ if(workPath == null && out != null) {
return org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
.getTaskAttemptPath(context, out);
}
@@ -110,14 +113,17 @@ public class FileOutputCommitter extends OutputCommitter {
* @return the path where the output of a committed task is stored until
* the entire job is committed.
*/
+ @Private
Path getCommittedTaskPath(TaskAttemptContext context) {
- return org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
- .getCommittedTaskPath(context, getOutputPath(context));
+ Path out = getOutputPath(context);
+ return out == null ? null :
+ org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
+ .getCommittedTaskPath(context, out);
}
public Path getWorkPath(TaskAttemptContext context, Path outputPath)
throws IOException {
- return getTaskAttemptPath(context, outputPath);
+ return outputPath == null ? null : getTaskAttemptPath(context, outputPath);
}
@Override
@@ -156,6 +162,7 @@ public class FileOutputCommitter extends OutputCommitter {
getWrapped(context).abortJob(context, state);
}
+ @Override
public void setupTask(TaskAttemptContext context) throws IOException {
getWrapped(context).setupTask(context);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
index 0845f153504..7bad09f3039 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
@@ -495,36 +495,40 @@ public class FileOutputCommitter extends OutputCommitter {
@Override
public void recoverTask(TaskAttemptContext context)
throws IOException {
- context.progress();
- TaskAttemptID attemptId = context.getTaskAttemptID();
- int previousAttempt = getAppAttemptId(context) - 1;
- if (previousAttempt < 0) {
- throw new IOException ("Cannot recover task output for first attempt...");
- }
-
- Path committedTaskPath = getCommittedTaskPath(context);
- Path previousCommittedTaskPath = getCommittedTaskPath(
- previousAttempt, context);
- FileSystem fs = committedTaskPath.getFileSystem(context.getConfiguration());
-
- LOG.debug("Trying to recover task from " + previousCommittedTaskPath
- + " into " + committedTaskPath);
- if (fs.exists(previousCommittedTaskPath)) {
- if(fs.exists(committedTaskPath)) {
- if(!fs.delete(committedTaskPath, true)) {
- throw new IOException("Could not delete "+committedTaskPath);
+ if(hasOutputPath()) {
+ context.progress();
+ TaskAttemptID attemptId = context.getTaskAttemptID();
+ int previousAttempt = getAppAttemptId(context) - 1;
+ if (previousAttempt < 0) {
+ throw new IOException ("Cannot recover task output for first attempt...");
+ }
+
+ Path committedTaskPath = getCommittedTaskPath(context);
+ Path previousCommittedTaskPath = getCommittedTaskPath(
+ previousAttempt, context);
+ FileSystem fs = committedTaskPath.getFileSystem(context.getConfiguration());
+
+ LOG.debug("Trying to recover task from " + previousCommittedTaskPath
+ + " into " + committedTaskPath);
+ if (fs.exists(previousCommittedTaskPath)) {
+ if(fs.exists(committedTaskPath)) {
+ if(!fs.delete(committedTaskPath, true)) {
+ throw new IOException("Could not delete "+committedTaskPath);
+ }
}
+ //Rename can fail if the parent directory does not yet exist.
+ Path committedParent = committedTaskPath.getParent();
+ fs.mkdirs(committedParent);
+ if(!fs.rename(previousCommittedTaskPath, committedTaskPath)) {
+ throw new IOException("Could not rename " + previousCommittedTaskPath +
+ " to " + committedTaskPath);
+ }
+ LOG.info("Saved output of " + attemptId + " to " + committedTaskPath);
+ } else {
+ LOG.warn(attemptId+" had no output to recover.");
}
- //Rename can fail if the parent directory does not yet exist.
- Path committedParent = committedTaskPath.getParent();
- fs.mkdirs(committedParent);
- if(!fs.rename(previousCommittedTaskPath, committedTaskPath)) {
- throw new IOException("Could not rename " + previousCommittedTaskPath +
- " to " + committedTaskPath);
- }
- LOG.info("Saved output of " + attemptId + " to " + committedTaskPath);
} else {
- LOG.warn(attemptId+" had no output to recover.");
+ LOG.warn("Output Path is null in recoverTask()");
}
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java
index e8a67cd848f..0859571d1f2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java
@@ -104,7 +104,9 @@ public class TestFileOutputCommitter extends TestCase {
writeOutput(theRecordWriter, tContext);
// do commit
- committer.commitTask(tContext);
+ if(committer.needsTaskCommit(tContext)) {
+ committer.commitTask(tContext);
+ }
Path jobTempDir1 = committer.getCommittedTaskPath(tContext);
File jtd1 = new File(jobTempDir1.toUri().getPath());
assertTrue(jtd1.exists());
@@ -188,7 +190,9 @@ public class TestFileOutputCommitter extends TestCase {
writeOutput(theRecordWriter, tContext);
// do commit
- committer.commitTask(tContext);
+ if(committer.needsTaskCommit(tContext)) {
+ committer.commitTask(tContext);
+ }
committer.commitJob(jContext);
// validate output
@@ -214,7 +218,9 @@ public class TestFileOutputCommitter extends TestCase {
writeMapFileOutput(theRecordWriter, tContext);
// do commit
- committer.commitTask(tContext);
+ if(committer.needsTaskCommit(tContext)) {
+ committer.commitTask(tContext);
+ }
committer.commitJob(jContext);
// validate output
@@ -222,6 +228,28 @@ public class TestFileOutputCommitter extends TestCase {
FileUtil.fullyDelete(new File(outDir.toString()));
}
+ public void testMapOnlyNoOutput() throws Exception {
+ JobConf conf = new JobConf();
+ //This is not set on purpose. FileOutputFormat.setOutputPath(conf, outDir);
+ conf.set(JobContext.TASK_ATTEMPT_ID, attempt);
+ JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
+ TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
+ FileOutputCommitter committer = new FileOutputCommitter();
+
+ // setup
+ committer.setupJob(jContext);
+ committer.setupTask(tContext);
+
+ if(committer.needsTaskCommit(tContext)) {
+ // do commit
+ committer.commitTask(tContext);
+ }
+ committer.commitJob(jContext);
+
+ // validate output
+ FileUtil.fullyDelete(new File(outDir.toString()));
+ }
+
public void testAbort() throws IOException, InterruptedException {
JobConf conf = new JobConf();
FileOutputFormat.setOutputPath(conf, outDir);
From 5a34d782828148faae54098e85d51b215d9255cc Mon Sep 17 00:00:00 2001
From: Robert Joseph Evans
Date: Mon, 6 Feb 2012 22:41:31 +0000
Subject: [PATCH 17/27] svn merge -c 1241225 fixes MAPREDUCE-3804
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1241230 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-mapreduce-project/CHANGES.txt | 3 +++
.../main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java | 4 +++-
.../java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java | 2 +-
3 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 67582531bee..8a7ccb4987f 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -195,6 +195,9 @@ Release 0.23.1 - Unreleased
acmurthy)
BUG FIXES
+ MAPREDUCE-3804. yarn webapp interface vulnerable to cross scripting attacks
+ (Dave Thompson via bobby)
+
MAPREDUCE-2784. [Gridmix] Bug fixes in ExecutionSummarizer and
ResourceUsageMatcher. (amarrk)
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
index e404fe5a723..e6df3460950 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
@@ -36,6 +36,7 @@ import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.yarn.webapp.Controller.RequestContext;
import org.apache.hadoop.yarn.webapp.Router.Dest;
import org.apache.hadoop.yarn.webapp.view.ErrorPage;
+import org.apache.hadoop.http.HtmlQuoting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -73,7 +74,8 @@ public class Dispatcher extends HttpServlet {
public void service(HttpServletRequest req, HttpServletResponse res)
throws ServletException, IOException {
res.setCharacterEncoding("UTF-8");
- String uri = req.getRequestURI();
+ String uri = HtmlQuoting.quoteHtmlChars(req.getRequestURI());
+
if (uri == null) {
uri = "/";
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java
index 126841b8607..d792d31c38c 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java
@@ -307,7 +307,7 @@ public class HamletImpl extends HamletSpec {
sb.setLength(0);
sb.append(' ').append(name);
if (value != null) {
- sb.append("=\"").append(value).append("\"");
+ sb.append("=\"").append(escapeHtml(value)).append("\"");
}
out.print(sb.toString());
}
From c6e7c24bde2e9f25f3731ae1c6d552e29ca010fb Mon Sep 17 00:00:00 2001
From: Arun Murthy
Date: Mon, 6 Feb 2012 23:13:16 +0000
Subject: [PATCH 18/27] Merge -c 1241250 from trunk to branch-0.23 to fix
MAPREDUCE-3354. Changed scripts so that jobhistory server is started by
bin/mapred instead of bin/yarn. Contributed by Jonathan Eagles.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1241252 13f79535-47bb-0310-9956-ffa450edef68
---
.../assemblies/hadoop-mapreduce-dist.xml | 8 +
hadoop-mapreduce-project/CHANGES.txt | 3 +
hadoop-mapreduce-project/INSTALL | 4 +-
hadoop-mapreduce-project/bin/mapred | 18 ++-
.../bin/mr-jobhistory-daemon.sh | 144 ++++++++++++++++++
.../hadoop-yarn/bin/start-yarn.sh | 2 -
.../hadoop-yarn/bin/stop-yarn.sh | 2 -
hadoop-mapreduce-project/hadoop-yarn/bin/yarn | 6 -
.../hadoop-yarn/bin/yarn-daemon.sh | 1 -
.../src/site/apt/ClusterSetup.apt.vm | 8 +-
10 files changed, 173 insertions(+), 23 deletions(-)
create mode 100644 hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml
index 281ce0ddcdd..eed27a88570 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml
@@ -71,6 +71,14 @@
0755
+
+ bin
+ sbin
+
+ mr-jobhistory-daemon.sh
+
+ 0755
+
hadoop-yarn/conf
etc/hadoop
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 8a7ccb4987f..3c3dadedf71 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -676,6 +676,9 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3808. Fixed an NPE in FileOutputCommitter for jobs with maps
but no reduces. (Robert Joseph Evans via vinodkv)
+ MAPREDUCE-3354. Changed scripts so that jobhistory server is started by
+ bin/mapred instead of bin/yarn. (Jonathan Eagles via acmurthy)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/INSTALL b/hadoop-mapreduce-project/INSTALL
index 5bc063194b5..cac914bad77 100644
--- a/hadoop-mapreduce-project/INSTALL
+++ b/hadoop-mapreduce-project/INSTALL
@@ -59,12 +59,12 @@ Step 10) sbin/yarn-daemon.sh start resourcemanager
Step 11) sbin/yarn-daemon.sh start nodemanager
-Step 12) sbin/yarn-daemon.sh start historyserver
+Step 12) sbin/mr-jobhistory-daemon.sh start historyserver
Step 13) You are all set, an example on how to run a mapreduce job is:
cd $HADOOP_MAPRED_HOME
ant examples -Dresolvers=internal
-$HADOOP_COMMON_HOME/bin/hadoop jar $HADOOP_MAPRED_HOME/build/hadoop-mapreduce-examples-0.23.0-SNAPSHOT.jar randomwriter -Dmapreduce.job.user.name=$USER -Dmapreduce.clientfactory.class.name=org.apache.hadoop.mapred.YarnClientFactory -Dmapreduce.randomwriter.bytespermap=10000 -Ddfs.blocksize=536870912 -Ddfs.block.size=536870912 -libjars $YARN_HOME/modules/hadoop-mapreduce-client-jobclient-0.23.0-SNAPSHOT.jar output
+$HADOOP_COMMON_HOME/bin/hadoop jar $HADOOP_MAPRED_HOME/build/hadoop-mapreduce-examples-*.jar randomwriter -Dmapreduce.job.user.name=$USER -Dmapreduce.clientfactory.class.name=org.apache.hadoop.mapred.YarnClientFactory -Dmapreduce.randomwriter.bytespermap=10000 -Ddfs.blocksize=536870912 -Ddfs.block.size=536870912 -libjars $YARN_HOME/modules/hadoop-mapreduce-client-jobclient-*.jar output
The output on the command line should be almost similar to what you see in the JT/TT setup (Hadoop 0.20/0.21)
diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred
index ff1ebbc67df..5ade3dabf43 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -36,6 +36,7 @@ function print_usage(){
echo " classpath prints the class path needed for running"
echo " mapreduce subcommands"
echo " groups get the groups which users belong to"
+ echo " historyserver run job history servers as a standalone daemon"
echo ""
echo "Most commands print help when invoked w/o parameters."
}
@@ -48,6 +49,8 @@ fi
COMMAND=$1
shift
+HADOOP_JOB_HISTORYSERVER_OPTS="-Dmapred.jobsummary.logger=${HADOOP_JHS_LOGGER:-INFO,console}"
+
if [ "$COMMAND" = "job" ] ; then
CLASS=org.apache.hadoop.mapred.JobClient
elif [ "$COMMAND" = "queue" ] ; then
@@ -63,6 +66,9 @@ elif [ "$COMMAND" = "classpath" ] ; then
elif [ "$COMMAND" = "groups" ] ; then
CLASS=org.apache.hadoop.mapred.tools.GetGroups
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "historyserver" ] ; then
+ CLASS=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOB_HISTORYSERVER_OPTS"
elif [ "$COMMAND" = "mradmin" ] \
|| [ "$COMMAND" = "jobtracker" ] \
|| [ "$COMMAND" = "tasktracker" ] ; then
@@ -103,6 +109,11 @@ for f in $HADOOP_MAPRED_HOME/${MAPRED_LIB_JARS_DIR}/*.jar; do
CLASSPATH=${CLASSPATH}:$f;
done
+# add modules to CLASSPATH
+for f in $HADOOP_MAPRED_HOME/modules/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
if $cygwin; then
CLASSPATH=`cygpath -p -w "$CLASSPATH"`
fi
@@ -112,12 +123,7 @@ if [ "$COMMAND" = "classpath" ] ; then
exit
fi
-#turn security logger on the jobtracker
-if [ $COMMAND = "jobtracker" ]; then
- HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,DRFAS}"
-else
- HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
-fi
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
export CLASSPATH
exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
diff --git a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
new file mode 100644
index 00000000000..6fc3ee7e863
--- /dev/null
+++ b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
@@ -0,0 +1,144 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Runs a yarn command as a daemon.
+#
+# Environment Variables
+#
+# YARN_CONF_DIR Alternate conf dir. Default is ${YARN_HOME}/conf.
+# YARN_LOG_DIR Where log files are stored. PWD by default.
+# YARN_MASTER host:path where hadoop code should be rsync'd from
+# YARN_PID_DIR The pid files are stored. /tmp by default.
+# YARN_IDENT_STRING A string representing this instance of hadoop. $USER by default
+# YARN_NICENESS The scheduling priority for daemons. Defaults to 0.
+##
+
+usage="Usage: mr-jobhistory-daemon.sh [--config ] [--hosts hostlistfile] (start|stop) "
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/yarn-config.sh
+
+# get arguments
+startStop=$1
+shift
+command=$1
+shift
+
+hadoop_rotate_log ()
+{
+ log=$1;
+ num=5;
+ if [ -n "$2" ]; then
+ num=$2
+ fi
+ if [ -f "$log" ]; then # rotate logs
+ while [ $num -gt 1 ]; do
+ prev=`expr $num - 1`
+ [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
+ num=$prev
+ done
+ mv "$log" "$log.$num";
+ fi
+}
+
+if [ -f "${YARN_CONF_DIR}/yarn-env.sh" ]; then
+ . "${YARN_CONF_DIR}/yarn-env.sh"
+fi
+
+if [ "$YARN_IDENT_STRING" = "" ]; then
+ export YARN_IDENT_STRING="$USER"
+fi
+
+# get log directory
+if [ "$YARN_LOG_DIR" = "" ]; then
+ export YARN_LOG_DIR="$YARN_HOME/logs"
+fi
+mkdir -p "$YARN_LOG_DIR"
+chown $YARN_IDENT_STRING $YARN_LOG_DIR
+
+if [ "$YARN_PID_DIR" = "" ]; then
+ YARN_PID_DIR=/tmp
+fi
+
+# some variables
+export YARN_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log
+export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,DRFA}
+log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
+pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid
+
+# Set default scheduling priority
+if [ "$YARN_NICENESS" = "" ]; then
+ export YARN_NICENESS=0
+fi
+
+case $startStop in
+
+ (start)
+
+ mkdir -p "$YARN_PID_DIR"
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo $command running as process `cat $pid`. Stop it first.
+ exit 1
+ fi
+ fi
+
+ if [ "$YARN_MASTER" != "" ]; then
+ echo rsync from $YARN_MASTER
+ rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $YARN_MASTER/ "$YARN_HOME"
+ fi
+
+ hadoop_rotate_log $log
+ echo starting $command, logging to $log
+ cd "$YARN_HOME"
+ nohup nice -n $YARN_NICENESS "$YARN_HOME"/bin/mapred --config $YARN_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+ echo $! > $pid
+ sleep 1; head "$log"
+ ;;
+
+ (stop)
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo stopping $command
+ kill `cat $pid`
+ else
+ echo no $command to stop
+ fi
+ else
+ echo no $command to stop
+ fi
+ ;;
+
+ (*)
+ echo $usage
+ exit 1
+ ;;
+
+esac
diff --git a/hadoop-mapreduce-project/hadoop-yarn/bin/start-yarn.sh b/hadoop-mapreduce-project/hadoop-yarn/bin/start-yarn.sh
index ccd63a44789..40b77fb35ab 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/bin/start-yarn.sh
+++ b/hadoop-mapreduce-project/hadoop-yarn/bin/start-yarn.sh
@@ -31,7 +31,5 @@ HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager
# start nodeManager
"$bin"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager
-# start historyserver
-#"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start historyserver
# start proxyserver
#"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver
diff --git a/hadoop-mapreduce-project/hadoop-yarn/bin/stop-yarn.sh b/hadoop-mapreduce-project/hadoop-yarn/bin/stop-yarn.sh
index c10d1ce7d18..a8498ef3ffd 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/bin/stop-yarn.sh
+++ b/hadoop-mapreduce-project/hadoop-yarn/bin/stop-yarn.sh
@@ -31,7 +31,5 @@ HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR stop resourcemanager
# stop nodeManager
"$bin"/yarn-daemons.sh --config $YARN_CONF_DIR stop nodemanager
-# stop historyServer
-"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR stop historyserver
# stop proxy server
"$bin"/yarn-daemon.sh --config $YARN_CONF_DIR stop proxyserver
diff --git a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn
index 7ceac4feae8..d7dae8b8d86 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn
+++ b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn
@@ -59,7 +59,6 @@ if [ $# = 0 ]; then
echo "where COMMAND is one of:"
echo " resourcemanager run the ResourceManager"
echo " nodemanager run a nodemanager on each slave"
- echo " historyserver run job history servers as a standalone daemon"
echo " rmadmin admin tools"
echo " version print the version"
echo " jar run a jar file"
@@ -154,8 +153,6 @@ if [ "$YARN_LOGFILE" = "" ]; then
YARN_LOGFILE='yarn.log'
fi
-YARN_JOB_HISTORYSERVER_OPTS="-Dmapred.jobsummary.logger=${YARN_JHS_LOGGER:-INFO,console}"
-
# restore ordinary behaviour
unset IFS
@@ -181,9 +178,6 @@ elif [ "$COMMAND" = "nodemanager" ] ; then
else
YARN_OPTS="$YARN_OPTS -server $YARN_NODEMANAGER_OPTS"
fi
-elif [ "$COMMAND" = "historyserver" ] ; then
- CLASS=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer
- YARN_OPTS="$YARN_OPTS $YARN_JOB_HISTORYSERVER_OPTS"
elif [ "$COMMAND" = "proxyserver" ] ; then
CLASS='org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer'
YARN_OPTS="$YARN_OPTS $YARN_PROXYSERVER_OPTS"
diff --git a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh
index 1fa43d8b1b2..c36e99cac70 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh
+++ b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh
@@ -91,7 +91,6 @@ fi
# some variables
export YARN_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log
export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,DRFA}
-export YARN_JHS_LOGGER=${YARN_JHS_LOGGER:-INFO,JSA}
log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm
index 0290c23b8ae..67db4b13aea 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm
@@ -476,7 +476,7 @@ Hadoop MapReduce Next Generation - Cluster Setup
designated server:
----
- $ $YARN_HOME/bin/yarn start historyserver --config $HADOOP_CONF_DIR
+ $ $YARN_HOME/bin/mapred start historyserver --config $YARN_CONF_DIR
----
* Hadoop Shutdown
@@ -519,7 +519,7 @@ Hadoop MapReduce Next Generation - Cluster Setup
designated server:
----
- $ $YARN_HOME/bin/yarn stop historyserver --config $HADOOP_CONF_DIR
+ $ $YARN_HOME/bin/mapred stop historyserver --config $YARN_CONF_DIR
----
@@ -1020,7 +1020,7 @@ KVNO Timestamp Principal
designated server as :
----
-[mapred]$ $YARN_HOME/bin/yarn start historyserver --config $HADOOP_CONF_DIR
+[mapred]$ $YARN_HOME/bin/mapred start historyserver --config $YARN_CONF_DIR
----
* Hadoop Shutdown
@@ -1063,7 +1063,7 @@ KVNO Timestamp Principal
designated server as :
----
-[mapred]$ $YARN_HOME/bin/yarn stop historyserver --config $HADOOP_CONF_DIR
+[mapred]$ $YARN_HOME/bin/mapred stop historyserver --config $YARN_CONF_DIR
----
* {Web Interfaces}
From a5d09c0892473fe2c12ab820cd741a6b41443264 Mon Sep 17 00:00:00 2001
From: Eli Collins
Date: Mon, 6 Feb 2012 23:43:50 +0000
Subject: [PATCH 19/27] HADOOP-7761. svn merge -c 1189613 from trunk
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1241266 13f79535-47bb-0310-9956-ffa450edef68
---
.../hadoop-common/CHANGES.txt | 2 +
.../apache/hadoop/io/FastByteComparisons.java | 237 ++++++++++++++++++
.../apache/hadoop/io/WritableComparator.java | 11 +-
3 files changed, 240 insertions(+), 10 deletions(-)
create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index b9534c88e27..314f370d015 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -97,6 +97,8 @@ Release 0.23.1 - Unreleased
OPTIMIZATIONS
+ HADOOP-7761. Improve the performance of raw comparisons. (todd)
+
BUG FIXES
HADOOP-8013. ViewFileSystem does not honor setVerifyChecksum
(Daryn Sharp via bobby)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
new file mode 100644
index 00000000000..3f5881b2dd6
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
@@ -0,0 +1,237 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io;
+
+import java.lang.reflect.Field;
+import java.nio.ByteOrder;
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+
+import sun.misc.Unsafe;
+
+import com.google.common.primitives.Longs;
+import com.google.common.primitives.UnsignedBytes;
+
+/**
+ * Utility code to do optimized byte-array comparison.
+ * This is borrowed and slightly modified from Guava's {@link UnsignedBytes}
+ * class to be able to compare arrays that start at non-zero offsets.
+ */
+abstract class FastByteComparisons {
+
+ /**
+ * Lexicographically compare two byte arrays.
+ */
+ public static int compareTo(byte[] b1, int s1, int l1, byte[] b2, int s2,
+ int l2) {
+ return LexicographicalComparerHolder.BEST_COMPARER.compareTo(
+ b1, s1, l1, b2, s2, l2);
+ }
+
+
+ private interface Comparer {
+ abstract public int compareTo(T buffer1, int offset1, int length1,
+ T buffer2, int offset2, int length2);
+ }
+
+ private static Comparer lexicographicalComparerJavaImpl() {
+ return LexicographicalComparerHolder.PureJavaComparer.INSTANCE;
+ }
+
+
+ /**
+ * Provides a lexicographical comparer implementation; either a Java
+ * implementation or a faster implementation based on {@link Unsafe}.
+ *
+ * Uses reflection to gracefully fall back to the Java implementation if
+ * {@code Unsafe} isn't available.
+ */
+ private static class LexicographicalComparerHolder {
+ static final String UNSAFE_COMPARER_NAME =
+ LexicographicalComparerHolder.class.getName() + "$UnsafeComparer";
+
+ static final Comparer BEST_COMPARER = getBestComparer();
+ /**
+ * Returns the Unsafe-using Comparer, or falls back to the pure-Java
+ * implementation if unable to do so.
+ */
+ static Comparer getBestComparer() {
+ try {
+ Class> theClass = Class.forName(UNSAFE_COMPARER_NAME);
+
+ // yes, UnsafeComparer does implement Comparer
+ @SuppressWarnings("unchecked")
+ Comparer comparer =
+ (Comparer) theClass.getEnumConstants()[0];
+ return comparer;
+ } catch (Throwable t) { // ensure we really catch *everything*
+ return lexicographicalComparerJavaImpl();
+ }
+ }
+
+ private enum PureJavaComparer implements Comparer {
+ INSTANCE;
+
+ @Override
+ public int compareTo(byte[] buffer1, int offset1, int length1,
+ byte[] buffer2, int offset2, int length2) {
+ // Short circuit equal case
+ if (buffer1 == buffer2 &&
+ offset1 == offset2 &&
+ length1 == length2) {
+ return 0;
+ }
+ // Bring WritableComparator code local
+ int end1 = offset1 + length1;
+ int end2 = offset2 + length2;
+ for (int i = offset1, j = offset2; i < end1 && j < end2; i++, j++) {
+ int a = (buffer1[i] & 0xff);
+ int b = (buffer2[j] & 0xff);
+ if (a != b) {
+ return a - b;
+ }
+ }
+ return length1 - length2;
+ }
+ }
+
+ @SuppressWarnings("unused") // used via reflection
+ private enum UnsafeComparer implements Comparer {
+ INSTANCE;
+
+ static final Unsafe theUnsafe;
+
+ /** The offset to the first element in a byte array. */
+ static final int BYTE_ARRAY_BASE_OFFSET;
+
+ static {
+ theUnsafe = (Unsafe) AccessController.doPrivileged(
+ new PrivilegedAction