Merge r1446505 through r1448504 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1448505 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-02-21 03:30:10 +00:00
commit 89d05b53d9
12 changed files with 85 additions and 28 deletions

View File

@ -323,7 +323,7 @@ checkAuthor () {
}
###############################################################################
### Check for tests in the patch
### Check for tests and their timeout in the patch
checkTests () {
echo ""
echo ""
@ -357,6 +357,24 @@ checkTests () {
JIRA_COMMENT="$JIRA_COMMENT
{color:green}+1 tests included{color}. The patch appears to include $testReferences new or modified test files."
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Checking if the tests have timeout assigned in this patch."
echo "======================================================================"
echo "======================================================================"
nontimeoutTests=`cat $PATCH_DIR/patch | $AWK '{ printf "%s ", $0 }' | $GREP --extended-regex --count '[ ]*\+[ ]*((@Test[\+ ]*[A-Za-z]+)|([\+ ]*@Test[ \+]*\([ \+]*\)[\ ]*\+?[ ]*[A-Za-z]+)|([\+ ]*@Test[\+ ]*\(exception[ \+]*=[ \+]*[A-Z.a-z0-9A-Z ]*\)))'`
if [[ $nontimeoutTests == 0 ]] ; then
JIRA_COMMENT="$JIRA_COMMENT
{color:green}+1 tests included appear to have a timeout.{color}"
return 1
fi
JIRA_COMMENT="$JIRA_COMMENT
{color:red}-1 one of tests included doesn't have a timeout.{color}"
return 0
}

View File

@ -150,6 +150,9 @@ Trunk (Unreleased)
HADOOP-9218 Document the Rpc-wrappers used internally (sanjay Radia)
HADOOP-9112. test-patch should -1 for @Tests without a timeout
(Surenkumar Nihalani via bobby)
BUG FIXES
HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)

View File

@ -299,6 +299,9 @@ Trunk (Unreleased)
HDFS-4340. Update addBlock() to inculde inode id as additional argument.
(Brandon Li via suresh)
HDFS-4502. JsonUtil.toFileStatus(..) should check if the fileId property
exists. (Brandon Li via suresh)
Release 2.0.4-beta - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
@ -244,7 +245,8 @@ public class JsonUtil {
final long mTime = (Long) m.get("modificationTime");
final long blockSize = (Long) m.get("blockSize");
final short replication = (short) (long) (Long) m.get("replication");
final long fileId = (Long) m.get("fileId");
final long fileId = m.containsKey("fileId") ? (Long) m.get("fileId")
: INodeId.GRANDFATHER_INODE_ID;
return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
blockSize, mTime, aTime, permission, owner, group,
symlink, DFSUtil.string2Bytes(localName), fileId);

View File

@ -152,6 +152,9 @@ Trunk (Unreleased)
MAPREDUCE-4884. Streaming tests fail to start MiniMRCluster due to missing
queue configuration. (Chris Nauroth via suresh)
MAPREDUCE-5012. Typo in javadoc for IdentityMapper class. (Adam Monsen
via suresh)
Release 2.0.4-beta - UNRELEASED
INCOMPATIBLE CHANGES
@ -720,6 +723,10 @@ Release 0.23.7 - UNRELEASED
MAPREDUCE-4992. AM hangs in RecoveryService when recovering tasks with
speculative attempts (Robert Parker via jlowe)
MAPREDUCE-5009. Killing the Task Attempt slated for commit does not clear
the value from the Task commitAttempt member (Robert Parker via jeagles)
Release 0.23.6 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -857,6 +857,9 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
if (task.successfulAttempt == null) {
task.addAndScheduleAttempt(Avataar.VIRGIN);
}
if ((task.commitAttempt != null) && (task.commitAttempt == taskAttemptId)) {
task.commitAttempt = null;
}
}
}

View File

@ -492,6 +492,24 @@ public class TestTaskImpl {
}
@Test
public void testKillDuringTaskAttemptCommit() {
mockTask = createMockTask(TaskType.REDUCE);
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.COMMIT_PENDING);
commitTaskAttempt(getLastAttempt().getAttemptId());
TaskAttemptId commitAttempt = getLastAttempt().getAttemptId();
updateLastAttemptState(TaskAttemptState.KILLED);
killRunningTaskAttempt(commitAttempt);
assertFalse(mockTask.canCommit(commitAttempt));
}
@Test
public void testFailureDuringTaskAttemptCommit() {
mockTask = createMockTask(TaskType.MAP);

View File

@ -35,7 +35,7 @@ import org.apache.hadoop.mapred.MapReduceBase;
public class IdentityMapper<K, V>
extends MapReduceBase implements Mapper<K, V, K, V> {
/** The identify function. Input key/value pair is written directly to
/** The identity function. Input key/value pair is written directly to
* output.*/
public void map(K key, V val,
OutputCollector<K, V> output, Reporter reporter)

View File

@ -316,6 +316,9 @@ Release 0.23.7 - UNRELEASED
YARN-249. Capacity Scheduler web page should show list of active users per
queue like it used to (in 1.x) (Ravi Prakash via tgraves)
YARN-236. RM should point tracking URL to RM web page when app fails to
start (Jason Lowe via jeagles)
OPTIMIZATIONS
YARN-357. App submission should not be synchronized (daryn)
@ -331,6 +334,9 @@ Release 0.23.7 - UNRELEASED
YARN-362. Unexpected extra results when using webUI table search (Ravi
Prakash via jlowe)
YARN-400. RM can return null application resource usage report leading to
NPE in client (Jason Lowe via tgraves)
Release 0.23.6 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -406,7 +406,8 @@ public class RMAppImpl implements RMApp, Recoverable {
String host = UNAVAILABLE;
String origTrackingUrl = UNAVAILABLE;
int rpcPort = -1;
ApplicationResourceUsageReport appUsageReport = null;
ApplicationResourceUsageReport appUsageReport =
DUMMY_APPLICATION_RESOURCE_USAGE_REPORT;
FinalApplicationStatus finishState = getFinalApplicationStatus();
String diags = UNAVAILABLE;
if (allowAccess) {
@ -418,18 +419,17 @@ public class RMAppImpl implements RMApp, Recoverable {
host = this.currentAttempt.getHost();
rpcPort = this.currentAttempt.getRpcPort();
appUsageReport = currentAttempt.getApplicationResourceUsageReport();
} else {
currentApplicationAttemptId =
BuilderUtils.newApplicationAttemptId(this.applicationId,
DUMMY_APPLICATION_ATTEMPT_NUMBER);
}
diags = this.diagnostics.toString();
} else {
appUsageReport = DUMMY_APPLICATION_RESOURCE_USAGE_REPORT;
}
if (currentApplicationAttemptId == null) {
currentApplicationAttemptId =
BuilderUtils.newApplicationAttemptId(this.applicationId,
DUMMY_APPLICATION_ATTEMPT_NUMBER);
}
return BuilderUtils.newApplicationReport(this.applicationId,
currentApplicationAttemptId, this.user, this.queue,
this.name, host, rpcPort, clientToken,

View File

@ -29,6 +29,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.MockApps;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
@ -616,4 +617,12 @@ public class TestRMAppTransitions {
assertTimesAtFinish(application);
assertAppState(RMAppState.KILLED, application);
}
@Test
public void testGetAppReport() {
RMApp app = createNewTestApp(null);
assertAppState(RMAppState.NEW, app);
ApplicationReport report = app.createAndGetApplicationReport(true);
Assert.assertNotNull(report.getApplicationResourceUsageReport());
}
}

View File

@ -66,6 +66,7 @@ public class WebAppProxyServlet extends HttpServlet {
public static final String PROXY_USER_COOKIE_NAME = "proxy-user";
private final List<TrackingUriPlugin> trackingUriPlugins;
private final String rmAppPageUrlBase;
private static class _ implements Hamlet._ {
//Empty
@ -91,6 +92,8 @@ public class WebAppProxyServlet extends HttpServlet {
this.trackingUriPlugins =
conf.getInstances(YarnConfiguration.YARN_TRACKING_URL_GENERATOR,
TrackingUriPlugin.class);
this.rmAppPageUrlBase = StringHelper.pjoin(
YarnConfiguration.getRMWebAppURL(conf), "cluster", "app");
}
/**
@ -291,25 +294,10 @@ public class WebAppProxyServlet extends HttpServlet {
if (original != null) {
trackingUri = ProxyUriUtils.getUriFromAMUrl(original);
}
// fallback to ResourceManager's app page if no tracking URI provided
if(original == null || original.equals("N/A")) {
String message;
switch(applicationReport.getFinalApplicationStatus()) {
case FAILED:
case KILLED:
case SUCCEEDED:
message =
"The requested application exited before setting a tracking URL.";
break;
case UNDEFINED:
message = "The requested application does not appear to be running "
+"yet, and has not set a tracking URL.";
break;
default:
//This should never happen, but just to be safe
message = "The requested application has not set a tracking URL.";
break;
}
notFound(resp, message);
resp.sendRedirect(resp.encodeRedirectURL(
StringHelper.pjoin(rmAppPageUrlBase, id.toString())));
return;
}