MAPREDUCE-2733. [Gridmix] Gridmix3 cpu emulation system tests. (Vinay Thota via amarrk)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1199678 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Amar Kamat 2011-11-09 09:47:50 +00:00
parent 94c631af1f
commit cec9f82344
10 changed files with 571 additions and 2 deletions

View File

@ -10,6 +10,9 @@ Trunk (unreleased changes)
(Plamen Jeliazkov via shv)
IMPROVEMENTS
MAPREDUCE-2733. [Gridmix] Gridmix3 cpu emulation system tests.
(Vinay Thota via amarrk)
MAPREDUCE-3008. Improvements to cumulative CPU emulation for short running
tasks in Gridmix. (amarrk)

View File

@ -123,7 +123,7 @@ public class Gridmix extends Configured implements Tool {
summarizer = new Summarizer(args);
}
Gridmix() {
public Gridmix() {
summarizer = new Summarizer();
}

View File

@ -0,0 +1,108 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Test;
import org.junit.Assert;
/**
* Test cpu emulation with default interval for gridmix jobs
* against different input data, submission policies and user resolvers.
* Verify the cpu resource metrics of both maps and reduces phase of
* Gridmix jobs with their corresponding original job in the input trace.
*/
public class TestCPUEmulationForMapsAndReducesWithCustomInterval
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestCPUEmulationWithUncompressedInput.class");
int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue();
/**
* Generate compressed input and run {@link Gridmix} by turning on the
* cpu emulation feature with default setting. The {@link Gridmix}
* should use the following runtime parameters.
* Submission Policy : STRESS, UserResovler: RoundRobinUserResolver.
* Once the {@link Gridmix} run is complete, verify cpu resource metrics of
* {@link Gridmix} jobs with their corresponding original job in a trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testCPUEmulationForMapsAndReducesWithCompressedInputCase7()
throws Exception {
final long inputSizeInMB = 1024 * 7;
String tracePath = getTraceFile("cpu_emul_case2");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{ "LOADJOB",
RoundRobinUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.35F",
"-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
}
/**
* Generate uncompressed input and run {@link Gridmix} by turning on the
* cpu emulation feature with default setting. The {@link Gridmix}
* should use the following runtime parameters.
* Submission Policy : SERIAL, UserResovler: SubmitterUserResolver
* Once the {@link Gridmix} run is complete, verify cpu resource metrics of
* {@link Gridmix} jobs with their corresponding original job in a trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testCPUEmulatonForMapsAndReducesWithUncompressedInputCase8()
throws Exception {
final long inputSizeInMB = cSize * 300;
String tracePath = getTraceFile("cpu_emul_case2");
Assert.assertNotNull("Trace file not found.", tracePath);
String [] runtimeValues =
{ "LOADJOB",
SubmitterUserResolver.class.getName(),
"SERIAL",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.4F",
"-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN };
runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
}
}

View File

@ -0,0 +1,105 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.JobContext;
import org.junit.Test;
import org.junit.Assert;
/**
* Test cpu emulation with default interval for gridmix jobs
* against different input data, submission policies and user resolvers.
* Verify the cpu resource metrics for both maps and reduces of
* Gridmix jobs with their corresponding original job in the input trace.
*/
public class TestCPUEmulationForMapsAndReducesWithDefaultInterval
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(
"TestCPUEmulationForMapsAndReducesWithDefaultInterval.class");
int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue();
/**
* Generate compressed input and run {@link Gridmix} by turning on the
* cpu emulation feature with default setting. The {@link Gridmix}
* should use the following runtime parameters.
* Submission Policy : REPLAY, UserResovler: RoundRobinUserResolver.
* Once the {@link Gridmix} run is complete, verify cpu resource metrics of
* {@link Gridmix} jobs with their corresponding original jobs in the trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testCPUEmulationForMapsAndReducesWithCompressedInputCase5()
throws Exception {
final long inputSizeInMB = 7168;
String tracePath = getTraceFile("cpu_emul_case2");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{ "LOADJOB",
RoundRobinUserResolver.class.getName(),
"REPLAY",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", JobContext.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
}
/**
* Generate uncompressed input and run {@link Gridmix} by turning on the
* cpu emulation feature with default settings. The {@link Gridmix}
* should use the following runtime parameters.
* Submission Policy : STRESS, UserResovler: SubmitterUserResolver
* Once the Gridmix run is complete, verify cpu resource metrics of
* {@link Gridmix} jobs with their corresponding original jobs in the trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testCPUEmulatonForMapsAndReducesWithUncompressedInputCase6()
throws Exception {
final long inputSizeInMB = cSize * 400;
String tracePath = getTraceFile("cpu_emul_case2");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{ "LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", JobContext.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN };
runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
}
}

View File

@ -0,0 +1,105 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.junit.Test;
import org.junit.Assert;
/**
* Test the {@link Gridmix} cpu emulation with custom interval for
* gridmix jobs against different input data, submission policies and
* user resolvers. Verify the map phase cpu metrics of gridmix jobs
* against their original job in the trace.
*/
public class TestCPUEmulationForMapsWithCustomInterval
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestCPUEmulationForMapsWithCustomInterval.class");
int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue();
/**
* Generate compressed input and run {@link Gridmix} by turning on
* cpu emulation feature with custom setting. The {@link Gridmix} should
* use the following runtime parameters while running gridmix jobs.
* Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
* Once {@link Gridmix} run is complete, verify maps phase cpu resource
* metrics of {@link Gridmix} jobs with their corresponding original
* in the trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testCPUEmulatonForMapsWithCompressedInputCase3()
throws Exception {
final long inputSizeInMB = 1024 * 7;
String tracePath = getTraceFile("cpu_emul_case1");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN,
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.25F"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
}
/**
* Generate uncompressed input and run {@link Gridmix} by turning on
* cpu emulation feature with custom settings. The {@link Gridmix}
* should use the following runtime paramters while running gridmix jobs.
* Submission Policy: REPLAY User Resolver Mode: RoundRobinUserResolver
* Once {@link Gridmix} run is complete, verify the map phase cpu resource
* metrics of {@link Gridmix} jobs with their corresponding jobs
* in the original trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testCPUEmulatonForMapsUnCompressedInputCase4()
throws Exception {
final long inputSizeInMB = cSize * 200;
String tracePath = getTraceFile("cpu_emul_case1");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{"LOADJOB",
RoundRobinUserResolver.class.getName(),
"REPLAY",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN,
"-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.35F"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
}
}

View File

@ -0,0 +1,103 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.junit.Test;
import org.junit.Assert;
/**
* Test the {@link Gridmix} cpu emulation with default settings for
* gridmix jobs against different input data, submission policies and
* user resolvers. Verify the map phase cpu metrics of gridmix jobs
* against their original jobs in the trace.
*/
public class TestCPUEmulationForMapsWithDefaultInterval
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestCPUEmulationForMapsWithDefaultInterval.class");
int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue();
/**
* Generate compressed input and run {@link Gridmix} by turning on cpu
* emulation feature with default settings. The {@link Gridmix} should
* use the following runtime parameters while running the gridmix jobs.
* Submission Policy: STRESS, UserResolver: SubmitterUserResolver.
* Once the {@link Gridmix} run is complete, verify map phase cpu metrics of
* {@link Gridmix} jobs with their corresponding original job in a trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testCPUEmulatonForMapsWithCompressedInputCase1()
throws Exception {
final long inputSizeInMB = 1024 * 6;
String tracePath = getTraceFile("cpu_emul_case1");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues = { "LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN,
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE + "=false"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
}
/**
* Generate uncompressed input and run {@link Gridmix} by turning on
* cpu emulation feature with default settings. The {@link Gridmix}
* should use the following runtime parameters while running Gridmix jobs.
* Submission Policy: REPLAY, UserResolver: RoundRobinUserResolver
* Once the Gridmix run is complete, verify cpu resource metrics of
* {@link Gridmix} jobs with their corresponding original job in a trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testCPUEmulatonForMapsWithUnCompressedInputCase2()
throws Exception {
final long inputSizeInMB = cSize * 200;
String tracePath = getTraceFile("cpu_emul_case1");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{ "LOADJOB",
RoundRobinUserResolver.class.getName(),
"REPLAY",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
}

View File

@ -23,6 +23,7 @@ import org.apache.hadoop.mapred.gridmix.SleepJob;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.*;
/**
* Gridmix system tests configurations.
@ -218,4 +219,33 @@ public class GridMixConfig {
*/
public static final String CLUSTER_MAX_REDUCE_MEMORY =
JTConfig.JT_MAX_REDUCEMEMORY_MB;
/**
* Gridmix cpu emulation.
*/
public static final String GRIDMIX_CPU_EMULATON =
ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS;
/**
* Gridmix cpu usage emulation plugin.
*/
public static final String GRIDMIX_CPU_USAGE_PLUGIN =
CumulativeCpuUsageEmulatorPlugin.class.getName();
/**
* Gridmix cpu emulation custom interval.
*/
public static final String GRIDMIX_CPU_CUSTOM_INTERVAL =
CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL;
/**
* Gridmix cpu emulation lower limit.
*/
public static int GRIDMIX_CPU_EMULATION_LOWER_LIMIT = 55;
/**
* Gridmix cpu emulation upper limit.
*/
public static int GRIDMIX_CPU_EMULATION_UPPER_LIMIT = 130;
}

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.CounterGroup;
@ -105,7 +106,7 @@ public class GridmixJobVerification {
* @throws ParseException - if an parse error occurs.
*/
public void verifyGridmixJobsWithJobStories(List<JobID> jobids)
throws IOException, ParseException {
throws Exception {
SortedMap <Long, String> origSubmissionTime = new TreeMap <Long, String>();
SortedMap <Long, String> simuSubmissionTime = new TreeMap<Long, String>();
@ -147,6 +148,7 @@ public class GridmixJobVerification {
setJobDistributedCacheInfo(simuJobId.toString(), simuJobConf,
zombieJob.getJobConf());
verifyHighRamMemoryJobs(zombieJob, simuJobConf);
verifyCPUEmulationOfJobs(zombieJob, jhInfo, simuJobConf);
LOG.info("Done.");
}
verifyDistributedCacheBetweenJobs(simuAndOrigJobsInfo);
@ -353,6 +355,119 @@ public class GridmixJobVerification {
fs.close();
}
}
/**
* It verifies the cpu resource usage of a gridmix job against
* their original job.
* @param origJobHistory - Original job history.
* @param simuJobHistoryInfo - Simulated job history.
* @param simuJobConf - simulated job configuration.
*/
public void verifyCPUEmulationOfJobs(ZombieJob origJobHistory,
JobHistoryParser.JobInfo simuJobHistoryInfo,
JobConf simuJobConf) throws Exception {
if (simuJobConf.get(GridMixConfig.GRIDMIX_CPU_EMULATON) != null) {
Map<String,Long> origJobMetrics =
getOriginalJobCPUMetrics(origJobHistory);
Map<String,Long> simuJobMetrics =
getSimulatedJobCPUMetrics(simuJobHistoryInfo);
long origMapUsage = origJobMetrics.get("MAP");
LOG.info("Maps cpu usage of original job:" + origMapUsage);
long origReduceUsage = origJobMetrics.get("REDUCE");
LOG.info("Reduces cpu usage of original job:" + origReduceUsage);
long simuMapUsage = simuJobMetrics.get("MAP");
LOG.info("Maps cpu usage of simulated job:" + simuMapUsage);
long simuReduceUsage = simuJobMetrics.get("REDUCE");
LOG.info("Reduces cpu usage of simulated job:"+ simuReduceUsage);
long mapCount = simuJobHistoryInfo.getTotalMaps();
long reduceCount = simuJobHistoryInfo.getTotalReduces();
if (mapCount > 0) {
double mapEmulFactor = (simuMapUsage * 100) / origMapUsage;
long mapEmulAccuracy = Math.round(mapEmulFactor);
LOG.info("CPU emulation accuracy for maps in job " +
simuJobHistoryInfo.getJobId() +
":"+ mapEmulAccuracy + "%");
Assert.assertTrue("Map-side cpu emulaiton inaccurate!" +
" Actual cpu usage: " + simuMapUsage +
" Expected cpu usage: " + origMapUsage, mapEmulAccuracy
>= GridMixConfig.GRIDMIX_CPU_EMULATION_LOWER_LIMIT
&& mapEmulAccuracy
<= GridMixConfig.GRIDMIX_CPU_EMULATION_UPPER_LIMIT);
}
if (reduceCount >0) {
double reduceEmulFactor = (simuReduceUsage * 100) / origReduceUsage;
long reduceEmulAccuracy = Math.round(reduceEmulFactor);
LOG.info("CPU emulation accuracy for reduces in job " +
simuJobHistoryInfo.getJobId() +
": " + reduceEmulAccuracy + "%");
Assert.assertTrue("Reduce side cpu emulaiton inaccurate!" +
" Actual cpu usage:" + simuReduceUsage +
"Expected cpu usage: " + origReduceUsage,
reduceEmulAccuracy
>= GridMixConfig.GRIDMIX_CPU_EMULATION_LOWER_LIMIT
&& reduceEmulAccuracy
<= GridMixConfig.GRIDMIX_CPU_EMULATION_UPPER_LIMIT);
}
}
}
/**
* Get the simulated job cpu metrics.
* @param jhInfo - Simulated job history
* @return - cpu metrics as a map.
* @throws Exception - if an error occurs.
*/
private Map<String,Long> getSimulatedJobCPUMetrics(
JobHistoryParser.JobInfo jhInfo) throws Exception {
Map<String, Long> resourceMetrics = new HashMap<String, Long>();
long mapCPUUsage =
getCounterValue(jhInfo.getMapCounters(),
TaskCounter.CPU_MILLISECONDS.toString());
resourceMetrics.put("MAP", mapCPUUsage);
long reduceCPUUsage =
getCounterValue(jhInfo.getReduceCounters(),
TaskCounter.CPU_MILLISECONDS.toString());
resourceMetrics.put("REDUCE", reduceCPUUsage);
return resourceMetrics;
}
/**
* Get the original job cpu metrics.
* @param zombieJob - original job history.
* @return - cpu metrics as map.
*/
private Map<String, Long> getOriginalJobCPUMetrics(ZombieJob zombieJob) {
long mapTotalCPUUsage = 0;
long reduceTotalCPUUsage = 0;
Map<String,Long> resourceMetrics = new HashMap<String,Long>();
for (int index = 0; index < zombieJob.getNumberMaps(); index ++) {
TaskInfo mapTask = zombieJob.getTaskInfo(TaskType.MAP, index);
if (mapTask.getResourceUsageMetrics().getCumulativeCpuUsage() > 0) {
mapTotalCPUUsage +=
mapTask.getResourceUsageMetrics().getCumulativeCpuUsage();
}
}
resourceMetrics.put("MAP", mapTotalCPUUsage);
for (int index = 0; index < zombieJob.getNumberReduces(); index ++) {
TaskInfo reduceTask = zombieJob.getTaskInfo(TaskType.REDUCE, index);
if (reduceTask.getResourceUsageMetrics().getCumulativeCpuUsage() > 0) {
reduceTotalCPUUsage +=
reduceTask.getResourceUsageMetrics().getCumulativeCpuUsage();
}
}
resourceMetrics.put("REDUCE", reduceTotalCPUUsage);
return resourceMetrics;
}
/**
* Get the user resolver of a job.