MAPREDUCE-5280. Bring back removed constructor and a method in mapreduce ClusterMetrics for binary compatibility with 1.x APIs. Contributed by Mayank Bansal.

svn merge --ignore-ancestry -c 1488458 ../../trunk/


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1488459 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Vinod Kumar Vavilapalli 2013-06-01 03:59:23 +00:00
parent 089b3edd0e
commit 4e0ac70cea
5 changed files with 70 additions and 19 deletions

View File

@ -126,6 +126,10 @@ Release 2.0.5-beta - UNRELEASED
mapreduce CombineFileRecordReader for binary compatibility with 1.x APIs.
(Mayank Bansal via vinodkv)
MAPREDUCE-5280. Bring back removed constructor and a method in mapreduce
ClusterMetrics for binary compatibility with 1.x APIs. (Mayank Bansal via
vinodkv)
OPTIMIZATIONS
MAPREDUCE-4974. Optimising the LineRecordReader initialize() method

View File

@ -188,6 +188,7 @@ public String toString() {
private JobTrackerStatus status;
private Collection<BlackListInfo> blacklistedTrackersInfo =
new ArrayList<BlackListInfo>();
private int grayListedTrackers;
ClusterStatus() {}
@ -223,9 +224,30 @@ public String toString() {
* @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
* @param numDecommissionedNodes number of decommission trackers
*/
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval,
int maps, int reduces, int maxMaps, int maxReduces,
JobTrackerStatus status, int numDecommissionedNodes) {
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps,
int reduces, int maxMaps, int maxReduces, JobTrackerStatus status,
int numDecommissionedNodes) {
this(trackers, blacklists, ttExpiryInterval, maps, reduces, maxMaps,
maxReduces, status, numDecommissionedNodes, 0);
}
/**
* Construct a new cluster status.
*
* @param trackers no. of tasktrackers in the cluster
* @param blacklists no of blacklisted task trackers in the cluster
* @param ttExpiryInterval the tasktracker expiry interval
* @param maps no. of currently running map-tasks in the cluster
* @param reduces no. of currently running reduce-tasks in the cluster
* @param maxMaps the maximum no. of map tasks in the cluster
* @param maxReduces the maximum no. of reduce tasks in the cluster
* @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
* @param numDecommissionedNodes number of decommission trackers
* @param numGrayListedTrackers number of graylisted trackers
*/
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps,
int reduces, int maxMaps, int maxReduces, JobTrackerStatus status,
int numDecommissionedNodes, int numGrayListedTrackers) {
numActiveTrackers = trackers;
numBlacklistedTrackers = blacklists;
this.numExcludedNodes = numDecommissionedNodes;
@ -235,6 +257,7 @@ public String toString() {
max_map_tasks = maxMaps;
max_reduce_tasks = maxReduces;
this.status = status;
this.grayListedTrackers = numGrayListedTrackers;
}
/**
@ -339,7 +362,7 @@ public Collection<String> getGraylistedTrackerNames() {
*/
@Deprecated
public int getGraylistedTrackers() {
return 0;
return grayListedTrackers;
}
/**
@ -480,6 +503,7 @@ public void write(DataOutput out) throws IOException {
out.writeInt(max_map_tasks);
out.writeInt(max_reduce_tasks);
WritableUtils.writeEnum(out, status);
out.writeInt(grayListedTrackers);
}
public void readFields(DataInput in) throws IOException {
@ -507,5 +531,6 @@ public void readFields(DataInput in) throws IOException {
max_map_tasks = in.readInt();
max_reduce_tasks = in.readInt();
status = WritableUtils.readEnum(in, JobTrackerStatus.class);
grayListedTrackers = in.readInt();
}
}

View File

@ -709,19 +709,18 @@ public void displayTasks(final JobID jobId, String type, String state)
public ClusterStatus getClusterStatus() throws IOException {
try {
return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
public ClusterStatus run() throws IOException, InterruptedException {
public ClusterStatus run() throws IOException, InterruptedException {
ClusterMetrics metrics = cluster.getClusterStatus();
return new ClusterStatus(metrics.getTaskTrackerCount(),
metrics.getBlackListedTaskTrackerCount(), cluster.getTaskTrackerExpiryInterval(),
metrics.getOccupiedMapSlots(),
metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
metrics.getReduceSlotCapacity(),
cluster.getJobTrackerStatus(),
metrics.getDecommissionedTaskTrackerCount());
return new ClusterStatus(metrics.getTaskTrackerCount(), metrics
.getBlackListedTaskTrackerCount(), cluster
.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
metrics.getReduceSlotCapacity(), cluster.getJobTrackerStatus(),
metrics.getDecommissionedTaskTrackerCount(), metrics
.getGrayListedTaskTrackerCount());
}
});
}
catch (InterruptedException ie) {
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}

View File

@ -69,18 +69,28 @@ public class ClusterMetrics implements Writable {
private int totalJobSubmissions;
private int numTrackers;
private int numBlacklistedTrackers;
private int numGraylistedTrackers;
private int numDecommissionedTrackers;
public ClusterMetrics() {
}
public ClusterMetrics(int runningMaps, int runningReduces,
int occupiedMapSlots, int occupiedReduceSlots,
int reservedMapSlots, int reservedReduceSlots,
int mapSlots, int reduceSlots,
int totalJobSubmissions,
int numTrackers, int numBlacklistedTrackers,
int occupiedMapSlots, int occupiedReduceSlots, int reservedMapSlots,
int reservedReduceSlots, int mapSlots, int reduceSlots,
int totalJobSubmissions, int numTrackers, int numBlacklistedTrackers,
int numDecommissionedNodes) {
this(runningMaps, runningReduces, occupiedMapSlots, occupiedReduceSlots,
reservedMapSlots, reservedReduceSlots, mapSlots, reduceSlots,
totalJobSubmissions, numTrackers, numBlacklistedTrackers, 0,
numDecommissionedNodes);
}
public ClusterMetrics(int runningMaps, int runningReduces,
int occupiedMapSlots, int occupiedReduceSlots, int reservedMapSlots,
int reservedReduceSlots, int mapSlots, int reduceSlots,
int totalJobSubmissions, int numTrackers, int numBlacklistedTrackers,
int numGraylistedTrackers, int numDecommissionedNodes) {
this.runningMaps = runningMaps;
this.runningReduces = runningReduces;
this.occupiedMapSlots = occupiedMapSlots;
@ -92,6 +102,7 @@ public ClusterMetrics(int runningMaps, int runningReduces,
this.totalJobSubmissions = totalJobSubmissions;
this.numTrackers = numTrackers;
this.numBlacklistedTrackers = numBlacklistedTrackers;
this.numGraylistedTrackers = numGraylistedTrackers;
this.numDecommissionedTrackers = numDecommissionedNodes;
}
@ -194,6 +205,15 @@ public int getBlackListedTaskTrackerCount() {
return numBlacklistedTrackers;
}
/**
* Get the number of graylisted trackers in the cluster.
*
* @return graylisted tracker count
*/
public int getGrayListedTaskTrackerCount() {
return numGraylistedTrackers;
}
/**
* Get the number of decommissioned trackers in the cluster.
*
@ -216,6 +236,7 @@ public void readFields(DataInput in) throws IOException {
totalJobSubmissions = in.readInt();
numTrackers = in.readInt();
numBlacklistedTrackers = in.readInt();
numGraylistedTrackers = in.readInt();
numDecommissionedTrackers = in.readInt();
}
@ -232,6 +253,7 @@ public void write(DataOutput out) throws IOException {
out.writeInt(totalJobSubmissions);
out.writeInt(numTrackers);
out.writeInt(numBlacklistedTrackers);
out.writeInt(numGraylistedTrackers);
out.writeInt(numDecommissionedTrackers);
}

View File

@ -206,6 +206,7 @@ public void testNetworkedJob() throws Exception {
assertEquals(status.getTaskTrackers(), 2);
assertEquals(status.getTTExpiryInterval(), 0);
assertEquals(status.getJobTrackerStatus(), JobTrackerStatus.RUNNING);
assertEquals(status.getGraylistedTrackers(), 0);
// test read and write
ByteArrayOutputStream dataOut = new ByteArrayOutputStream();