MAPREDUCE-2563. [Gridmix] Add High-Ram emulation system tests to Gridmix. (Vinay Kumar Thota via amarrk)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1135462 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Amar Kamat 2011-06-14 10:18:27 +00:00
parent 3fd40ae8d0
commit 7f0064e81b
13 changed files with 593 additions and 1 deletions

View File

@ -30,6 +30,9 @@ Trunk (unreleased changes)
IMPROVEMENTS
MAPREDUCE-2563. [Gridmix] Add High-Ram emulation system tests to
Gridmix. (Vinay Kumar Thota via amarrk)
MAPREDUCE-2104. [Rumen] Add Cpu, Memory and Heap usages to
TraceBuilder's output. (amarrk)

View File

@ -24,11 +24,14 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.test.system.MRCluster;
import org.apache.hadoop.mapreduce.test.system.JTProtocol;
import org.apache.hadoop.mapreduce.test.system.JTClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.gridmix.test.system.GridmixJobSubmission;
import org.apache.hadoop.mapred.gridmix.test.system.GridmixJobVerification;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapred.gridmix.test.system.GridmixJobStory;
import org.apache.hadoop.tools.rumen.ZombieJob;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.mapreduce.JobID;
import org.junit.AfterClass;
@ -37,7 +40,9 @@ import org.junit.BeforeClass;
import java.util.Iterator;
import java.util.Map;
import java.util.List;
import java.util.Set;
import java.io.IOException;
import org.junit.Assert;
/**
* Run and verify the Gridmix jobs for given a trace.
@ -80,7 +85,8 @@ public class GridmixSystemTestCase {
/* Clean up the proxy user directories if gridmix run with
RoundRobinUserResovler mode.*/
if (gridmixJV.getJobUserResolver().contains("RoundRobin")) {
if (gridmixJV != null
&& gridmixJV.getJobUserResolver().contains("RoundRobin")) {
List<String> proxyUsers =
UtilsForGridmix.listProxyUsers(gridmixJS.getJobConf(),
UserGroupInformation.getLoginUser().getShortUserName());
@ -163,6 +169,59 @@ public class GridmixSystemTestCase {
return null;
}
/**
* Validate the task memory parameters.
* @param tracePath - trace file.
* @param isTraceHasHighRamJobs - true if trace has high ram job(s)
* otherwise its false
*/
@SuppressWarnings("deprecation")
public static void validateTaskMemoryParamters(String tracePath,
boolean isTraceHasHighRamJobs) throws IOException {
if (isTraceHasHighRamJobs) {
GridmixJobStory gjs = new GridmixJobStory(new Path(tracePath),
rtClient.getDaemonConf());
Set<JobID> jobids = gjs.getZombieJobs().keySet();
boolean isHighRamFlag = false;
for (JobID jobid :jobids) {
ZombieJob zombieJob = gjs.getZombieJobs().get(jobid);
JobConf origJobConf = zombieJob.getJobConf();
int origMapFactor =
GridmixJobVerification.getMapFactor(origJobConf);
int origReduceFactor =
GridmixJobVerification.getReduceFactor(origJobConf);
if (origMapFactor >= 2 || origReduceFactor >= 2) {
isHighRamFlag = true;
long TaskMapMemInMB =
GridmixJobVerification.getScaledTaskMemInMB(
GridMixConfig.JOB_MAP_MEMORY_MB,
GridMixConfig.CLUSTER_MAP_MEMORY,
origJobConf, rtClient.getDaemonConf());
long TaskReduceMemInMB =
GridmixJobVerification.getScaledTaskMemInMB(
GridMixConfig.JOB_REDUCE_MEMORY_MB,
GridMixConfig.CLUSTER_REDUCE_MEMORY,
origJobConf, rtClient.getDaemonConf());
long taskMapLimitInMB =
conf.getLong(GridMixConfig.CLUSTER_MAX_MAP_MEMORY,
JobConf.DISABLED_MEMORY_LIMIT);
long taskReduceLimitInMB =
conf.getLong(GridMixConfig.CLUSTER_MAX_REDUCE_MEMORY,
JobConf.DISABLED_MEMORY_LIMIT);
GridmixJobVerification.verifyMemoryLimits(TaskMapMemInMB,
taskMapLimitInMB);
GridmixJobVerification.verifyMemoryLimits(TaskReduceMemInMB,
taskReduceLimitInMB);
}
}
Assert.assertTrue("Trace doesn't have atleast one high ram job.",
isHighRamFlag);
}
}
public static boolean isLocalDistCache(String fileName, String userName,
boolean visibility) {
return DistributedCacheEmulator.isLocalDistCacheFile(fileName,

View File

@ -0,0 +1,65 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.GridmixJob;
import org.junit.Test;
import org.junit.Assert;
/**
* Run the {@link Gridmix} with a high ram jobs trace by disabling the
* emulation of high ram and verify each {@link Gridmix} job
* whether it honors the high ram or not. In disable mode it should
* should not honor the high ram and run it as a normal job.
*/
public class TestDisableGridmixEmulationOfHighRam
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestDisableGridmixEmulationOfHighRam.class");
/**
* Generate input data and run {@link Gridmix} with a high ram jobs trace
* as a load job and STRESS submission policy in a SubmitterUserResolver
* mode. Verify each {@link Gridmix} job whether it honors the
* high ram or not after completion of execution. In disable mode the
* jobs should not honor the high ram.
* @throws Exception - if an error occurs.
*/
@Test
public void testEmulationOfHighRamForReducersOfMRJobs()
throws Exception {
final long inputSizeInMB = cSize * 250;
String tracePath = getTraceFile("highram_mr_jobs_case3");
Assert.assertNotNull("Trace file has not found.", tracePath);
String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"SERIAL",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
}
}

View File

@ -0,0 +1,64 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.junit.Test;
import org.junit.Assert;
/**
* Run the {@link Gridmix} with combination of high ram and normal jobs of
* trace and verify whether high ram jobs{@link Gridmix} are honoring or not.
* Normal MR jobs should not honors the high ram emulation.
*/
public class TestEmulationOfHighRamAndNormalMRJobs
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestEmulationOfHighRamAndNormalMRJobs.class");
/**
* Generate input data and run the combination normal and high ram
* {@link Gridmix} jobs as load job and STRESS submission policy
* in a SubmitterUserResolver mode. Verify whether each {@link Gridmix}
* job honors the high ram or not after completion of execution.
* @throws Exception - if an error occurs.
*/
@Test
public void testEmulationOfHighRamForReducersOfMRJobs()
throws Exception {
final long inputSizeInMB = cSize * 250;
String tracePath = getTraceFile("highram_mr_jobs_case4");
Assert.assertNotNull("Trace file has not found.", tracePath);
String [] runtimeArgs = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"SERIAL",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE + "=true"};
validateTaskMemoryParamters(tracePath, true);
runGridmixAndVerify(runtimeArgs, otherArgs, tracePath);
}
}

View File

@ -0,0 +1,64 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.GridmixJob;
import org.junit.Test;
import org.junit.Assert;
/**
* Run the {@link Gridmix} with a high ram jobs trace and
* verify each {@link Gridmix} job whether it honors the high ram or not.
* In the trace the jobs should use the high ram for both maps and reduces.
*/
public class TestGridmixEmulationOfHighRamJobsCase1
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestGridmixEmulationOfHighRamJobsCase1.class");
/**
* Generate input data and run {@link Gridmix} with a high ram jobs trace
* as a load job and STRESS submission policy in a SubmitterUserResolver
* mode. Verify each {@link Gridmix} job whether it honors the high ram or not
* after completion of execution.
* @throws Exception - if an error occurs.
*/
@Test
public void testEmulationOfHighRamForMapsAndReducesOfMRJobs()
throws Exception {
final long inputSizeInMB = cSize * 400;
String tracePath = getTraceFile("highram_mr_jobs_case1");
Assert.assertNotNull("Trace file has not found.", tracePath);
String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=true"};
validateTaskMemoryParamters(tracePath, true);
runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
}
}

View File

@ -0,0 +1,67 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.GridmixJob;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.junit.Test;
import org.junit.Assert;
/**
* Run the {@link Gridmix} with a high ram jobs trace and
* verify each {@link Gridmix} job whether it honors the high ram or not.
* In the trace the jobs should use the high ram only for maps.
*/
public class TestGridmixEmulationOfHighRamJobsCase2
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestGridmixEmulationOfHighRamJobsCase2.class");
/**
* Generate input data and run {@link Gridmix} with a high ram jobs trace
* as a load job and REPALY submission policy in a RoundRobinUserResolver
* mode. Verify each {@link Gridmix} job whether it honors the high ram or not
* after completion of execution.
* @throws Exception - if an error occurs.
*/
@Test
public void testEmulationOfHighRamForMapsOfMRJobs()
throws Exception {
final long inputSizeInMB = cSize * 300;
String tracePath = getTraceFile("highram_mr_jobs_case2");
Assert.assertNotNull("Trace file has not found.", tracePath);
String [] runtimeValues =
{"LOADJOB",
RoundRobinUserResolver.class.getName(),
"REPLAY",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=true"};
validateTaskMemoryParamters(tracePath, true);
runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
}
}

View File

@ -0,0 +1,64 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.GridmixJob;
import org.junit.Test;
import org.junit.Assert;
/**
* Run the {@link Gridmix} with a high ram jobs trace and
* verify each {@link Gridmix} job whether it honors the high ram or not.
* In the trace the jobs should use the high ram only for reducers.
*/
public class TestGridmixEmulationOfHighRamJobsCase3
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(TestGridmixEmulationOfHighRamJobsCase3.class);
/**
* Generate input data and run {@link Gridmix} with a high ram jobs trace
* as a load job and SERIAL submission policy in a SubmitterUserResolver
* mode. Verify each {@link Gridmix} job whether it honors the
* high ram or not after completion of execution.
* @throws Exception - if an error occurs.
*/
@Test
public void testEmulationOfHighRamForReducersOfMRJobs()
throws Exception {
final long inputSizeInMB = cSize * 250;
String tracePath = getTraceFile("highram_mr_jobs_case3");
Assert.assertNotNull("Trace file has not found.", tracePath);
String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"SERIAL",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=true"};
validateTaskMemoryParamters(tracePath, true);
runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
}
}

View File

@ -20,8 +20,13 @@ package org.apache.hadoop.mapred.gridmix.test.system;
import org.apache.hadoop.mapred.gridmix.Gridmix;
import org.apache.hadoop.mapred.gridmix.JobCreator;
import org.apache.hadoop.mapred.gridmix.SleepJob;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
/**
* Gridmix system tests configurations.
*/
public class GridMixConfig {
/**
@ -171,4 +176,46 @@ public class GridMixConfig {
*/
public static final String GRIDMIX_SLEEP_REDUCE_MAX_TIME =
SleepJob.GRIDMIX_SLEEP_MAX_REDUCE_TIME;
/**
* Gridmix high ram job emulation enable.
*/
public static final String GRIDMIX_HIGH_RAM_JOB_ENABLE =
"gridmix.highram-emulation.enable";
/**
* Job map memory in mb.
*/
public static final String JOB_MAP_MEMORY_MB =
MRJobConfig.MAP_MEMORY_MB;
/**
* Job reduce memory in mb.
*/
public static final String JOB_REDUCE_MEMORY_MB =
MRJobConfig.REDUCE_MEMORY_MB;
/**
* Cluster map memory in mb.
*/
public static final String CLUSTER_MAP_MEMORY =
MRConfig.MAPMEMORY_MB;
/**
* Cluster reduce memory in mb.
*/
public static final String CLUSTER_REDUCE_MEMORY =
MRConfig.REDUCEMEMORY_MB;
/**
* Cluster maximum map memory.
*/
public static final String CLUSTER_MAX_MAP_MEMORY =
JTConfig.JT_MAX_MAPMEMORY_MB;
/**
* Cluster maximum reduce memory.
*/
public static final String CLUSTER_MAX_REDUCE_MEMORY =
JTConfig.JT_MAX_REDUCEMEMORY_MB;
}

View File

@ -146,6 +146,7 @@ public class GridmixJobVerification {
verifyDistributeCache(zombieJob,simuJobConf);
setJobDistributedCacheInfo(simuJobId.toString(), simuJobConf,
zombieJob.getJobConf());
verifyHighRamMemoryJobs(zombieJob, simuJobConf);
LOG.info("Done.");
}
verifyDistributedCacheBetweenJobs(simuAndOrigJobsInfo);
@ -740,6 +741,164 @@ public class GridmixJobVerification {
return occursList;
}
/**
* It verifies the high ram gridmix jobs.
* @param zombieJob - Original job story.
* @param simuJobConf - Simulated job configuration.
*/
@SuppressWarnings("deprecation")
public void verifyHighRamMemoryJobs(ZombieJob zombieJob,
JobConf simuJobConf) {
JobConf origJobConf = zombieJob.getJobConf();
int origMapFactor = getMapFactor(origJobConf);
int origReduceFactor = getReduceFactor(origJobConf);
boolean isHighRamEnable =
simuJobConf.getBoolean(GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE,
false);
if (isHighRamEnable) {
if (origMapFactor >= 2 && origReduceFactor >= 2) {
assertGridMixHighRamJob(simuJobConf, origJobConf, 1);
} else if(origMapFactor >= 2) {
assertGridMixHighRamJob(simuJobConf, origJobConf, 2);
} else if(origReduceFactor >= 2) {
assertGridMixHighRamJob(simuJobConf, origJobConf, 3);
}
} else {
if (origMapFactor >= 2 && origReduceFactor >= 2) {
assertGridMixHighRamJob(simuJobConf, origJobConf, 4);
} else if(origMapFactor >= 2) {
assertGridMixHighRamJob(simuJobConf, origJobConf, 5);
} else if(origReduceFactor >= 2) {
assertGridMixHighRamJob(simuJobConf, origJobConf, 6);
}
}
}
/**
* Get the value for identifying the slots used by the map.
* @param jobConf - job configuration
* @return - map factor value.
*/
public static int getMapFactor(Configuration jobConf) {
long clusterMapMem =
Long.parseLong(jobConf.get(GridMixConfig.CLUSTER_MAP_MEMORY));
long jobMapMem =
Long.parseLong(jobConf.get(GridMixConfig.JOB_MAP_MEMORY_MB));
return (int)Math.ceil((double)jobMapMem / clusterMapMem);
}
/**
* Get the value for identifying the slots used by the reduce.
* @param jobConf - job configuration.
* @return - reduce factor value.
*/
public static int getReduceFactor(Configuration jobConf) {
long clusterReduceMem =
Long.parseLong(jobConf.get(GridMixConfig.CLUSTER_REDUCE_MEMORY));
long jobReduceMem =
Long.parseLong(jobConf.get(GridMixConfig.JOB_REDUCE_MEMORY_MB));
return (int)Math.ceil((double)jobReduceMem / clusterReduceMem);
}
@SuppressWarnings("deprecation")
private void assertGridMixHighRamJob(JobConf simuJobConf,
Configuration origConf, int option) {
int simuMapFactor = getMapFactor(simuJobConf);
int simuReduceFactor = getReduceFactor(simuJobConf);
/**
* option 1 : Both map and reduce honors the high ram.
* option 2 : Map only honors the high ram.
* option 3 : Reduce only honors the high ram.
* option 4 : Both map and reduce should not honors the high ram
* in disable state.
* option 5 : Map should not honors the high ram in disable state.
* option 6 : Reduce should not honors the high ram in disable state.
*/
switch (option) {
case 1 :
Assert.assertTrue("Gridmix job has not honored the high "
+ "ram for map.", simuMapFactor >= 2
&& simuMapFactor == getMapFactor(origConf));
Assert.assertTrue("Gridmix job has not honored the high "
+ "ram for reduce.", simuReduceFactor >= 2
&& simuReduceFactor
== getReduceFactor(origConf));
break;
case 2 :
Assert.assertTrue("Gridmix job has not honored the high "
+ "ram for map.", simuMapFactor >= 2
&& simuMapFactor == getMapFactor(origConf));
break;
case 3 :
Assert.assertTrue("Girdmix job has not honored the high "
+ "ram for reduce.", simuReduceFactor >= 2
&& simuReduceFactor
== getReduceFactor(origConf));
break;
case 4 :
Assert.assertTrue("Gridmix job has honored the high "
+ "ram for map in emulation disable state.",
simuMapFactor < 2
&& simuMapFactor != getMapFactor(origConf));
Assert.assertTrue("Gridmix job has honored the high "
+ "ram for reduce in emulation disable state.",
simuReduceFactor < 2
&& simuReduceFactor
!= getReduceFactor(origConf));
break;
case 5 :
Assert.assertTrue("Gridmix job has honored the high "
+ "ram for map in emulation disable state.",
simuMapFactor < 2
&& simuMapFactor != getMapFactor(origConf));
break;
case 6 :
Assert.assertTrue("Girdmix job has honored the high "
+ "ram for reduce in emulation disable state.",
simuReduceFactor < 2
&& simuReduceFactor
!= getReduceFactor(origConf));
break;
}
}
/**
* Get task memory after scaling based on cluster configuration.
* @param jobTaskKey - Job task key attribute.
* @param clusterTaskKey - Cluster task key attribute.
* @param origConf - Original job configuration.
* @param simuConf - Simulated job configuration.
* @return scaled task memory value.
*/
@SuppressWarnings("deprecation")
public static long getScaledTaskMemInMB(String jobTaskKey,
String clusterTaskKey,
Configuration origConf,
Configuration simuConf) {
long simuClusterTaskValue =
simuConf.getLong(clusterTaskKey, JobConf.DISABLED_MEMORY_LIMIT);
long origClusterTaskValue =
origConf.getLong(clusterTaskKey, JobConf.DISABLED_MEMORY_LIMIT);
long origJobTaskValue =
origConf.getLong(jobTaskKey, JobConf.DISABLED_MEMORY_LIMIT);
double scaleFactor =
Math.ceil((double)origJobTaskValue / origClusterTaskValue);
long simulatedJobValue = (long)(scaleFactor * simuClusterTaskValue);
return simulatedJobValue;
}
/**
* It Verifies the memory limit of a task.
* @param TaskMemInMB - task memory limit.
* @param taskLimitInMB - task upper limit.
*/
public static void verifyMemoryLimits(long TaskMemInMB, long taskLimitInMB) {
if (TaskMemInMB > taskLimitInMB) {
Assert.fail("Simulated job's task memory exceeds the "
+ "upper limit of task virtual memory.");
}
}
private String convertJobStatus(String jobStatus) {
if (jobStatus.equals("SUCCEEDED")) {
return "SUCCESS";