MAPREDUCE-6673. Add a test example job that grows in memory usage over time (Karthik Kambatla via Haibo Chen)
Change-Id: Iccfc8c67c38c526cc61726d87bfcbcf69ac36fea (cherry picked from commit 25ac44709b4bbed78b607ea48021237b64e01b9f)
This commit is contained in:
parent
8515d35bd5
commit
d4f553d42f
|
@ -0,0 +1,68 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.mapreduce;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.io.IntWritable;
|
||||||
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A sleep job whose mappers create 1MB buffer for every record.
|
||||||
|
*/
|
||||||
|
public class GrowingSleepJob extends SleepJob {
|
||||||
|
private static final Log LOG = LogFactory.getLog(GrowingSleepJob.class);
|
||||||
|
|
||||||
|
public static class GrowingSleepMapper extends SleepMapper {
|
||||||
|
private final int MB = 1024 * 1024;
|
||||||
|
private ArrayList<byte[]> bytes = new ArrayList<>();
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void map(IntWritable key, IntWritable value, Context context)
|
||||||
|
throws IOException, InterruptedException {
|
||||||
|
super.map(key, value, context);
|
||||||
|
long free = Runtime.getRuntime().freeMemory();
|
||||||
|
if (free > 32 * MB) {
|
||||||
|
LOG.info("Free memory = " + free +
|
||||||
|
" bytes. Creating 1 MB on the heap.");
|
||||||
|
bytes.add(new byte[MB]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(String[] args) throws Exception {
|
||||||
|
int res = ToolRunner.run(new Configuration(), new GrowingSleepJob(), args);
|
||||||
|
System.exit(res);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Job createJob(int numMapper, int numReducer,
|
||||||
|
long mapSleepTime, int mapSleepCount,
|
||||||
|
long reduceSleepTime, int reduceSleepCount)
|
||||||
|
throws IOException {
|
||||||
|
Job job = super.createJob(numMapper, numReducer, mapSleepTime,
|
||||||
|
mapSleepCount, reduceSleepTime, reduceSleepCount);
|
||||||
|
job.setMapperClass(GrowingSleepMapper.class);
|
||||||
|
job.setJobName("Growing sleep job");
|
||||||
|
return job;
|
||||||
|
}
|
||||||
|
}
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.mapred.TestTextInputFormat;
|
||||||
import org.apache.hadoop.mapred.ThreadedMapBenchmark;
|
import org.apache.hadoop.mapred.ThreadedMapBenchmark;
|
||||||
import org.apache.hadoop.mapreduce.TimelineServicePerformance;
|
import org.apache.hadoop.mapreduce.TimelineServicePerformance;
|
||||||
import org.apache.hadoop.mapreduce.FailJob;
|
import org.apache.hadoop.mapreduce.FailJob;
|
||||||
|
import org.apache.hadoop.mapreduce.GrowingSleepJob;
|
||||||
import org.apache.hadoop.mapreduce.LargeSorter;
|
import org.apache.hadoop.mapreduce.LargeSorter;
|
||||||
import org.apache.hadoop.mapreduce.MiniHadoopClusterManager;
|
import org.apache.hadoop.mapreduce.MiniHadoopClusterManager;
|
||||||
import org.apache.hadoop.mapreduce.SleepJob;
|
import org.apache.hadoop.mapreduce.SleepJob;
|
||||||
|
@ -91,6 +92,8 @@ public class MapredTestDriver {
|
||||||
pgd.addClass("fail", FailJob.class, "a job that always fails");
|
pgd.addClass("fail", FailJob.class, "a job that always fails");
|
||||||
pgd.addClass("sleep", SleepJob.class,
|
pgd.addClass("sleep", SleepJob.class,
|
||||||
"A job that sleeps at each map and reduce task.");
|
"A job that sleeps at each map and reduce task.");
|
||||||
|
pgd.addClass("gsleep", GrowingSleepJob.class,
|
||||||
|
"A sleep job whose mappers create 1MB buffer for every record.");
|
||||||
pgd.addClass("timelineperformance", TimelineServicePerformance.class,
|
pgd.addClass("timelineperformance", TimelineServicePerformance.class,
|
||||||
"A job that launches mappers to test timlineserver performance.");
|
"A job that launches mappers to test timlineserver performance.");
|
||||||
pgd.addClass("nnbench", NNBench.class,
|
pgd.addClass("nnbench", NNBench.class,
|
||||||
|
|
Loading…
Reference in New Issue