MAPREDUCE-3265. Removed debug logs during job submission to LOG.debug to cut down noise.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1205628 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arun Murthy 2011-11-23 22:04:33 +00:00
parent d3a51478e3
commit f17ed541c7
13 changed files with 65 additions and 44 deletions

View File

@ -175,6 +175,9 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3464. mapreduce jsp pages missing DOCTYPE. (Dave Vronay via mattf) MAPREDUCE-3464. mapreduce jsp pages missing DOCTYPE. (Dave Vronay via mattf)
MAPREDUCE-3265. Removed debug logs during job submission to LOG.debug to
cut down noise. (acmurthy)
Release 0.23.0 - 2011-11-01 Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -72,7 +72,7 @@ public class TaskLog {
if (!LOG_DIR.exists()) { if (!LOG_DIR.exists()) {
boolean b = LOG_DIR.mkdirs(); boolean b = LOG_DIR.mkdirs();
if (!b) { if (!b) {
LOG.warn("mkdirs failed. Ignoring."); LOG.debug("mkdirs failed. Ignoring.");
} }
} }
} }

View File

@ -108,7 +108,7 @@ public class Cluster {
break; break;
} }
else { else {
LOG.info("Cannot pick " + provider.getClass().getName() LOG.debug("Cannot pick " + provider.getClass().getName()
+ " as the ClientProtocolProvider - returned null protocol"); + " as the ClientProtocolProvider - returned null protocol");
} }
} }

View File

@ -30,12 +30,9 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.mapreduce.v2.security.client.ClientHSSecurityInfo;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityInfo;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.ipc.YarnRPC;
public class ClientCache { public class ClientCache {
@ -79,9 +76,9 @@ public class ClientCache {
if (StringUtils.isEmpty(serviceAddr)) { if (StringUtils.isEmpty(serviceAddr)) {
return null; return null;
} }
LOG.info("Connecting to HistoryServer at: " + serviceAddr); LOG.debug("Connecting to HistoryServer at: " + serviceAddr);
final YarnRPC rpc = YarnRPC.create(conf); final YarnRPC rpc = YarnRPC.create(conf);
LOG.info("Connected to HistoryServer at: " + serviceAddr); LOG.debug("Connected to HistoryServer at: " + serviceAddr);
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
return currentUser.doAs(new PrivilegedAction<MRClientProtocol>() { return currentUser.doAs(new PrivilegedAction<MRClientProtocol>() {
@Override @Override

View File

@ -143,7 +143,7 @@ public class ClientServiceDelegate {
|| YarnApplicationState.RUNNING == application || YarnApplicationState.RUNNING == application
.getYarnApplicationState()) { .getYarnApplicationState()) {
if (application == null) { if (application == null) {
LOG.info("Could not get Job info from RM for job " + jobId LOG.debug("Could not get Job info from RM for job " + jobId
+ ". Redirecting to job history server."); + ". Redirecting to job history server.");
return checkAndGetHSProxy(null, JobState.NEW); return checkAndGetHSProxy(null, JobState.NEW);
} }
@ -169,8 +169,8 @@ public class ClientServiceDelegate {
+ ":" + addr.getPort())); + ":" + addr.getPort()));
UserGroupInformation.getCurrentUser().addToken(clientToken); UserGroupInformation.getCurrentUser().addToken(clientToken);
} }
LOG.info("Tracking Url of JOB is " + application.getTrackingUrl()); LOG.info("The url to track the job: " + application.getTrackingUrl());
LOG.info("Connecting to " + serviceAddr); LOG.debug("Connecting to " + serviceAddr);
realProxy = instantiateAMProxy(serviceAddr); realProxy = instantiateAMProxy(serviceAddr);
return realProxy; return realProxy;
} catch (IOException e) { } catch (IOException e) {
@ -187,7 +187,7 @@ public class ClientServiceDelegate {
} }
application = rm.getApplicationReport(appId); application = rm.getApplicationReport(appId);
if (application == null) { if (application == null) {
LOG.info("Could not get Job info from RM for job " + jobId LOG.debug("Could not get Job info from RM for job " + jobId
+ ". Redirecting to job history server."); + ". Redirecting to job history server.");
return checkAndGetHSProxy(null, JobState.RUNNING); return checkAndGetHSProxy(null, JobState.RUNNING);
} }
@ -281,16 +281,13 @@ public class ClientServiceDelegate {
LOG.debug("Tracing remote error ", e.getTargetException()); LOG.debug("Tracing remote error ", e.getTargetException());
throw (YarnRemoteException) e.getTargetException(); throw (YarnRemoteException) e.getTargetException();
} }
LOG.info("Failed to contact AM/History for job " + jobId + LOG.debug("Failed to contact AM/History for job " + jobId +
" retrying.."); " retrying..", e.getTargetException());
LOG.debug("Failed exception on AM/History contact",
e.getTargetException());
// Force reconnection by setting the proxy to null. // Force reconnection by setting the proxy to null.
realProxy = null; realProxy = null;
} catch (Exception e) { } catch (Exception e) {
LOG.info("Failed to contact AM/History for job " + jobId LOG.debug("Failed to contact AM/History for job " + jobId
+ " Will retry.."); + " Will retry..", e);
LOG.debug("Failing to contact application master", e);
// Force reconnection by setting the proxy to null. // Force reconnection by setting the proxy to null.
realProxy = null; realProxy = null;
} }

View File

@ -25,7 +25,6 @@ import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@ -40,11 +39,9 @@ import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityInfo;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.ClientRMProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
@ -56,6 +53,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationReport;
@ -67,13 +65,13 @@ import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo;
// TODO: This should be part of something like yarn-client. // TODO: This should be part of something like yarn-client.
public class ResourceMgrDelegate { public class ResourceMgrDelegate {
private static final Log LOG = LogFactory.getLog(ResourceMgrDelegate.class); private static final Log LOG = LogFactory.getLog(ResourceMgrDelegate.class);
private final String rmAddress;
private YarnConfiguration conf; private YarnConfiguration conf;
ClientRMProtocol applicationsManager; ClientRMProtocol applicationsManager;
private ApplicationId applicationId; private ApplicationId applicationId;
@ -92,21 +90,25 @@ public class ResourceMgrDelegate {
YarnConfiguration.DEFAULT_RM_ADDRESS), YarnConfiguration.DEFAULT_RM_ADDRESS),
YarnConfiguration.DEFAULT_RM_PORT, YarnConfiguration.DEFAULT_RM_PORT,
YarnConfiguration.RM_ADDRESS); YarnConfiguration.RM_ADDRESS);
LOG.info("Connecting to ResourceManager at " + rmAddress); this.rmAddress = rmAddress.toString();
LOG.debug("Connecting to ResourceManager at " + rmAddress);
applicationsManager = applicationsManager =
(ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class, (ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class,
rmAddress, this.conf); rmAddress, this.conf);
LOG.info("Connected to ResourceManager at " + rmAddress); LOG.debug("Connected to ResourceManager at " + rmAddress);
} }
/** /**
* Used for injecting applicationsManager, mostly for testing. * Used for injecting applicationsManager, mostly for testing.
* @param conf the configuration object * @param conf the configuration object
* @param applicationsManager the handle to talk the resource managers {@link ClientRMProtocol}. * @param applicationsManager the handle to talk the resource managers
* {@link ClientRMProtocol}.
*/ */
public ResourceMgrDelegate(YarnConfiguration conf, ClientRMProtocol applicationsManager) { public ResourceMgrDelegate(YarnConfiguration conf,
ClientRMProtocol applicationsManager) {
this.conf = conf; this.conf = conf;
this.applicationsManager = applicationsManager; this.applicationsManager = applicationsManager;
this.rmAddress = applicationsManager.toString();
} }
public void cancelDelegationToken(Token<DelegationTokenIdentifier> arg0) public void cancelDelegationToken(Token<DelegationTokenIdentifier> arg0)
@ -295,18 +297,22 @@ public class ResourceMgrDelegate {
} }
public ApplicationId submitApplication(ApplicationSubmissionContext appContext) public ApplicationId submitApplication(
ApplicationSubmissionContext appContext)
throws IOException { throws IOException {
appContext.setApplicationId(applicationId); appContext.setApplicationId(applicationId);
SubmitApplicationRequest request = recordFactory.newRecordInstance(SubmitApplicationRequest.class); SubmitApplicationRequest request =
recordFactory.newRecordInstance(SubmitApplicationRequest.class);
request.setApplicationSubmissionContext(appContext); request.setApplicationSubmissionContext(appContext);
applicationsManager.submitApplication(request); applicationsManager.submitApplication(request);
LOG.info("Submitted application " + applicationId + " to ResourceManager"); LOG.info("Submitted application " + applicationId + " to ResourceManager" +
" at " + rmAddress);
return applicationId; return applicationId;
} }
public void killApplication(ApplicationId applicationId) throws IOException { public void killApplication(ApplicationId applicationId) throws IOException {
KillApplicationRequest request = recordFactory.newRecordInstance(KillApplicationRequest.class); KillApplicationRequest request =
recordFactory.newRecordInstance(KillApplicationRequest.class);
request.setApplicationId(applicationId); request.setApplicationId(applicationId);
applicationsManager.forceKillApplication(request); applicationsManager.forceKillApplication(request);
LOG.info("Killing application " + applicationId); LOG.info("Killing application " + applicationId);

View File

@ -276,7 +276,7 @@ public class YARNRunner implements ClientProtocol {
Resource capability = recordFactory.newRecordInstance(Resource.class); Resource capability = recordFactory.newRecordInstance(Resource.class);
capability.setMemory(conf.getInt(MRJobConfig.MR_AM_VMEM_MB, capability.setMemory(conf.getInt(MRJobConfig.MR_AM_VMEM_MB,
MRJobConfig.DEFAULT_MR_AM_VMEM_MB)); MRJobConfig.DEFAULT_MR_AM_VMEM_MB));
LOG.info("AppMaster capability = " + capability); LOG.debug("AppMaster capability = " + capability);
// Setup LocalResources // Setup LocalResources
Map<String, LocalResource> localResources = Map<String, LocalResource> localResources =
@ -352,7 +352,7 @@ public class YARNRunner implements ClientProtocol {
} }
vargsFinal.add(mergedCommand.toString()); vargsFinal.add(mergedCommand.toString());
LOG.info("Command to launch container for ApplicationMaster is : " LOG.debug("Command to launch container for ApplicationMaster is : "
+ mergedCommand); + mergedCommand);
// Setup the CLASSPATH in environment // Setup the CLASSPATH in environment

View File

@ -37,12 +37,12 @@ import org.apache.hadoop.yarn.factory.providers.RpcFactoryProvider;
*/ */
public class HadoopYarnProtoRPC extends YarnRPC { public class HadoopYarnProtoRPC extends YarnRPC {
private static final Log LOG = LogFactory.getLog(HadoopYarnRPC.class); private static final Log LOG = LogFactory.getLog(HadoopYarnProtoRPC.class);
@Override @Override
public Object getProxy(Class protocol, InetSocketAddress addr, public Object getProxy(Class protocol, InetSocketAddress addr,
Configuration conf) { Configuration conf) {
LOG.info("Creating a HadoopYarnProtoRpc proxy for protocol " + protocol); LOG.debug("Creating a HadoopYarnProtoRpc proxy for protocol " + protocol);
return RpcFactoryProvider.getClientFactory(conf).getClient(protocol, 1, return RpcFactoryProvider.getClientFactory(conf).getClient(protocol, 1,
addr, conf); addr, conf);
} }
@ -57,11 +57,11 @@ public class HadoopYarnProtoRPC extends YarnRPC {
InetSocketAddress addr, Configuration conf, InetSocketAddress addr, Configuration conf,
SecretManager<? extends TokenIdentifier> secretManager, SecretManager<? extends TokenIdentifier> secretManager,
int numHandlers) { int numHandlers) {
LOG.info("Creating a HadoopYarnProtoRpc server for protocol " + protocol + LOG.debug("Creating a HadoopYarnProtoRpc server for protocol " + protocol +
" with " + numHandlers + " handlers"); " with " + numHandlers + " handlers");
return RpcFactoryProvider.getServerFactory(conf).getServer(protocol, instance, return RpcFactoryProvider.getServerFactory(conf).getServer(protocol,
addr, conf, secretManager, numHandlers); instance, addr, conf, secretManager, numHandlers);
} }

View File

@ -45,7 +45,7 @@ public class HadoopYarnRPC extends YarnRPC {
@Override @Override
public Object getProxy(Class protocol, InetSocketAddress addr, public Object getProxy(Class protocol, InetSocketAddress addr,
Configuration conf) { Configuration conf) {
LOG.info("Creating a HadoopYarnRpc proxy for protocol " + protocol); LOG.debug("Creating a HadoopYarnRpc proxy for protocol " + protocol);
RPC.setProtocolEngine(conf, protocol, AvroSpecificRpcEngine.class); RPC.setProtocolEngine(conf, protocol, AvroSpecificRpcEngine.class);
try { try {
return RPC.getProxy(protocol, 1, addr, conf); return RPC.getProxy(protocol, 1, addr, conf);
@ -64,7 +64,7 @@ public class HadoopYarnRPC extends YarnRPC {
InetSocketAddress addr, Configuration conf, InetSocketAddress addr, Configuration conf,
SecretManager<? extends TokenIdentifier> secretManager, SecretManager<? extends TokenIdentifier> secretManager,
int numHandlers) { int numHandlers) {
LOG.info("Creating a HadoopYarnRpc server for protocol " + protocol + LOG.debug("Creating a HadoopYarnRpc server for protocol " + protocol +
" with " + numHandlers + " handlers"); " with " + numHandlers + " handlers");
RPC.setProtocolEngine(conf, protocol, AvroSpecificRpcEngine.class); RPC.setProtocolEngine(conf, protocol, AvroSpecificRpcEngine.class);
final RPC.Server hadoopServer; final RPC.Server hadoopServer;

View File

@ -46,7 +46,8 @@ public abstract class YarnRPC {
int numHandlers); int numHandlers);
public static YarnRPC create(Configuration conf) { public static YarnRPC create(Configuration conf) {
LOG.info("Creating YarnRPC for " + conf.get(YarnConfiguration.IPC_RPC_IMPL)); LOG.debug("Creating YarnRPC for " +
conf.get(YarnConfiguration.IPC_RPC_IMPL));
String clazzName = conf.get(YarnConfiguration.IPC_RPC_IMPL); String clazzName = conf.get(YarnConfiguration.IPC_RPC_IMPL);
if (clazzName == null) { if (clazzName == null) {
clazzName = YarnConfiguration.DEFAULT_IPC_RPC_IMPL; clazzName = YarnConfiguration.DEFAULT_IPC_RPC_IMPL;

View File

@ -39,9 +39,9 @@ public class ApplicationTokenSelector implements
if (service == null) { if (service == null) {
return null; return null;
} }
LOG.info("Looking for a token with service " + service.toString()); LOG.debug("Looking for a token with service " + service.toString());
for (Token<? extends TokenIdentifier> token : tokens) { for (Token<? extends TokenIdentifier> token : tokens) {
LOG.info("Token kind is " + token.getKind().toString() LOG.debug("Token kind is " + token.getKind().toString()
+ " and the token's service name is " + token.getService()); + " and the token's service name is " + token.getService());
if (ApplicationTokenIdentifier.KIND_NAME.equals(token.getKind()) if (ApplicationTokenIdentifier.KIND_NAME.equals(token.getKind())
&& service.equals(token.getService())) { && service.equals(token.getService())) {

View File

@ -39,9 +39,9 @@ public class ClientTokenSelector implements
if (service == null) { if (service == null) {
return null; return null;
} }
LOG.info("Looking for a token with service " + service.toString()); LOG.debug("Looking for a token with service " + service.toString());
for (Token<? extends TokenIdentifier> token : tokens) { for (Token<? extends TokenIdentifier> token : tokens) {
LOG.info("Token kind is " + token.getKind().toString() LOG.debug("Token kind is " + token.getKind().toString()
+ " and the token's service name is " + token.getService()); + " and the token's service name is " + token.getService());
if (ClientTokenIdentifier.KIND_NAME.equals(token.getKind()) if (ClientTokenIdentifier.KIND_NAME.equals(token.getKind())
&& service.equals(token.getService())) { && service.equals(token.getService())) {

View File

@ -1,3 +1,20 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
class CSQueueUtils { class CSQueueUtils {