svn merge -c 1414239 FIXES: YARN-204. test coverage for org.apache.hadoop.tools (Aleksey Gorshkov via bobby)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1414241 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Joseph Evans 2012-11-27 16:06:57 +00:00
parent 5b7d4c8c6c
commit de89166116
6 changed files with 417 additions and 117 deletions

View File

@ -27,8 +27,6 @@ import java.util.Collections;
import java.util.List;
import java.util.StringTokenizer;
import junit.framework.TestCase;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
@ -38,111 +36,117 @@ import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.util.JarFinder;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* test {@link HadoopArchives}
*/
public class TestHadoopArchives extends TestCase {
public class TestHadoopArchives {
public static final String HADOOP_ARCHIVES_JAR = JarFinder.getJar(HadoopArchives.class);
public static final String HADOOP_ARCHIVES_JAR = JarFinder
.getJar(HadoopArchives.class);
{
((Log4JLogger)LogFactory.getLog(org.apache.hadoop.security.Groups.class)
).getLogger().setLevel(Level.ERROR);
((Log4JLogger)org.apache.hadoop.ipc.Server.LOG
).getLogger().setLevel(Level.ERROR);
((Log4JLogger)org.apache.hadoop.util.AsyncDiskService.LOG
).getLogger().setLevel(Level.ERROR);
((Log4JLogger) LogFactory.getLog(org.apache.hadoop.security.Groups.class))
.getLogger().setLevel(Level.ERROR);
}
private static final String inputDir = "input";
private Path inputPath;
private MiniDFSCluster dfscluster;
private MiniMRCluster mapred;
private Configuration conf;
private FileSystem fs;
private Path archivePath;
static private Path createFile(Path dir, String filename, FileSystem fs
) throws IOException {
static private Path createFile(Path dir, String filename, FileSystem fs)
throws IOException {
final Path f = new Path(dir, filename);
final FSDataOutputStream out = fs.create(f);
final FSDataOutputStream out = fs.create(f);
out.write(filename.getBytes());
out.close();
return f;
}
protected void setUp() throws Exception {
super.setUp();
dfscluster = new MiniDFSCluster(new Configuration(), 2, true, null);
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.set(CapacitySchedulerConfiguration.PREFIX
+ CapacitySchedulerConfiguration.ROOT + "."
+ CapacitySchedulerConfiguration.QUEUES, "default");
conf.set(CapacitySchedulerConfiguration.PREFIX
+ CapacitySchedulerConfiguration.ROOT + ".default."
+ CapacitySchedulerConfiguration.CAPACITY, "100");
dfscluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true)
.build();
fs = dfscluster.getFileSystem();
mapred = new MiniMRCluster(2, fs.getUri().toString(), 1);
inputPath = new Path(fs.getHomeDirectory(), inputDir);
inputPath = new Path(fs.getHomeDirectory(), inputDir);
archivePath = new Path(fs.getHomeDirectory(), "archive");
fs.mkdirs(inputPath);
createFile(inputPath, "a", fs);
createFile(inputPath, "b", fs);
createFile(inputPath, "c", fs);
}
protected void tearDown() throws Exception {
@After
public void tearDown() throws Exception {
try {
if (mapred != null) {
mapred.shutdown();
if (dfscluster != null) {
dfscluster.shutdown();
}
if (dfscluster != null) {
dfscluster.shutdown();
}
} catch(Exception e) {
} catch (Exception e) {
System.err.println(e);
}
super.tearDown();
}
@Test
public void testRelativePath() throws Exception {
fs.delete(archivePath, true);
final Path sub1 = new Path(inputPath, "dir1");
fs.mkdirs(sub1);
createFile(sub1, "a", fs);
final Configuration conf = mapred.createJobConf();
final FsShell shell = new FsShell(conf);
final List<String> originalPaths = lsr(shell, "input");
System.out.println("originalPath: " + originalPaths);
final URI uri = fs.getUri();
final String prefix = "har://hdfs-" + uri.getHost() +":" + uri.getPort()
final String prefix = "har://hdfs-" + uri.getHost() + ":" + uri.getPort()
+ archivePath.toUri().getPath() + Path.SEPARATOR;
{
final String harName = "foo.har";
final String[] args = {
"-archiveName",
harName,
"-p",
"input",
"*",
"archive"
};
System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH, HADOOP_ARCHIVES_JAR);
final HadoopArchives har = new HadoopArchives(mapred.createJobConf());
assertEquals(0, ToolRunner.run(har, args));
final String[] args = { "-archiveName", harName, "-p", "input", "*",
"archive" };
System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH,
HADOOP_ARCHIVES_JAR);
final HadoopArchives har = new HadoopArchives(conf);
Assert.assertEquals(0, ToolRunner.run(har, args));
//compare results
// compare results
final List<String> harPaths = lsr(shell, prefix + harName);
assertEquals(originalPaths, harPaths);
Assert.assertEquals(originalPaths, harPaths);
}
}
@Test
public void testPathWithSpaces() throws Exception {
fs.delete(archivePath, true);
//create files/directories with spaces
// create files/directories with spaces
createFile(inputPath, "c c", fs);
final Path sub1 = new Path(inputPath, "sub 1");
fs.mkdirs(sub1);
@ -154,42 +158,36 @@ public class TestHadoopArchives extends TestCase {
final Path sub2 = new Path(inputPath, "sub 1 with suffix");
fs.mkdirs(sub2);
createFile(sub2, "z", fs);
final Configuration conf = mapred.createJobConf();
final FsShell shell = new FsShell(conf);
final String inputPathStr = inputPath.toUri().getPath();
System.out.println("inputPathStr = " + inputPathStr);
final List<String> originalPaths = lsr(shell, inputPathStr);
final URI uri = fs.getUri();
final String prefix = "har://hdfs-" + uri.getHost() +":" + uri.getPort()
final String prefix = "har://hdfs-" + uri.getHost() + ":" + uri.getPort()
+ archivePath.toUri().getPath() + Path.SEPARATOR;
{//Enable space replacement
{// Enable space replacement
final String harName = "foo.har";
final String[] args = {
"-archiveName",
harName,
"-p",
inputPathStr,
"*",
archivePath.toString()
};
System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH, HADOOP_ARCHIVES_JAR);
final HadoopArchives har = new HadoopArchives(mapred.createJobConf());
assertEquals(0, ToolRunner.run(har, args));
final String[] args = { "-archiveName", harName, "-p", inputPathStr, "*",
archivePath.toString() };
System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH,
HADOOP_ARCHIVES_JAR);
final HadoopArchives har = new HadoopArchives(conf);
Assert.assertEquals(0, ToolRunner.run(har, args));
//compare results
// compare results
final List<String> harPaths = lsr(shell, prefix + harName);
assertEquals(originalPaths, harPaths);
Assert.assertEquals(originalPaths, harPaths);
}
}
private static List<String> lsr(final FsShell shell, String dir
) throws Exception {
private static List<String> lsr(final FsShell shell, String dir)
throws Exception {
System.out.println("lsr root=" + dir);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(bytes);
final PrintStream oldOut = System.out;
final PrintStream oldErr = System.err;
@ -197,7 +195,7 @@ public class TestHadoopArchives extends TestCase {
System.setErr(out);
final String results;
try {
assertEquals(0, shell.run(new String[]{"-lsr", dir}));
Assert.assertEquals(0, shell.run(new String[] { "-lsr", dir }));
results = bytes.toString();
} finally {
IOUtils.closeStream(out);
@ -206,13 +204,13 @@ public class TestHadoopArchives extends TestCase {
}
System.out.println("lsr results:\n" + results);
String dirname = dir;
if (dir.lastIndexOf(Path.SEPARATOR) != -1 ) {
if (dir.lastIndexOf(Path.SEPARATOR) != -1) {
dirname = dir.substring(dir.lastIndexOf(Path.SEPARATOR));
}
final List<String> paths = new ArrayList<String>();
for(StringTokenizer t = new StringTokenizer(results, "\n");
t.hasMoreTokens(); ) {
for (StringTokenizer t = new StringTokenizer(results, "\n"); t
.hasMoreTokens();) {
final String s = t.nextToken();
final int i = s.indexOf(dirname);
if (i >= 0) {
@ -220,7 +218,8 @@ public class TestHadoopArchives extends TestCase {
}
}
Collections.sort(paths);
System.out.println("lsr paths = " + paths.toString().replace(", ", ",\n "));
System.out
.println("lsr paths = " + paths.toString().replace(", ", ",\n "));
return paths;
}
}

View File

@ -0,0 +1,166 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.JobSubmissionFiles;
import org.apache.hadoop.tools.util.TestDistCpUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.io.OutputStream;
import java.security.Permission;
public class TestExternalCall {
private static final Log LOG = LogFactory.getLog(TestExternalCall.class);
private static FileSystem fs;
private static String root;
private static Configuration getConf() {
Configuration conf = new Configuration();
conf.set("fs.default.name", "file:///");
conf.set("mapred.job.tracker", "local");
return conf;
}
@Before
public void setup() {
securityManager = System.getSecurityManager();
System.setSecurityManager(new NoExitSecurityManager());
try {
fs = FileSystem.get(getConf());
root = new Path("target/tmp").makeQualified(fs.getUri(),
fs.getWorkingDirectory()).toString();
TestDistCpUtils.delete(fs, root);
} catch (IOException e) {
LOG.error("Exception encountered ", e);
}
}
@After
public void tearDown() {
System.setSecurityManager(securityManager);
}
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception
*/
@Test
public void testCleanup() throws Exception {
Configuration conf = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf),
conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
Path soure = createFile("tmp.txt");
Path target = createFile("target.txt");
DistCp distcp = new DistCp(conf, null);
String[] arg = { soure.toString(), target.toString() };
distcp.run(arg);
Assert.assertTrue(fs.exists(target));
}
private Path createFile(String fname) throws IOException {
Path result = new Path(root + "/" + fname);
OutputStream out = fs.create(result);
try {
out.write((root + "/" + fname).getBytes());
out.write("\n".getBytes());
} finally {
out.close();
}
return result;
}
/**
* test main method of DistCp. Method should to call System.exit().
*
*/
@Test
public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {
Configuration conf = getConf();
Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf), conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
Path soure = createFile("tmp.txt");
Path target = createFile("target.txt");
try {
String[] arg = {target.toString(),soure.toString()};
DistCp.main(arg);
Assert.fail();
} catch (ExitException t) {
Assert.assertTrue(fs.exists(target));
Assert.assertEquals(t.status, 0);
Assert.assertEquals(
stagingDir.getFileSystem(conf).listStatus(stagingDir).length, 0);
}
}
private SecurityManager securityManager;
protected static class ExitException extends SecurityException {
private static final long serialVersionUID = -1982617086752946683L;
public final int status;
public ExitException(int status) {
super("There is no escape!");
this.status = status;
}
}
private static class NoExitSecurityManager extends SecurityManager {
@Override
public void checkPermission(Permission perm) {
// allow anything.
}
@Override
public void checkPermission(Permission perm, Object context) {
// allow anything.
}
@Override
public void checkExit(int status) {
super.checkExit(status);
throw new ExitException(status);
}
}
}

View File

@ -43,21 +43,19 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.Builder;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.DistCpV1;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Level;
import org.junit.Ignore;
/**
* A JUnit test for copying files recursively.
*/
@Ignore
public class TestCopyFiles extends TestCase {
{
((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.StateChange")
@ -738,20 +736,22 @@ public class TestCopyFiles extends TestCase {
public void testMapCount() throws Exception {
String namenode = null;
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;
MiniDFSCluster mr = null;
try {
Configuration conf = new Configuration();
dfs = new MiniDFSCluster(conf, 3, true, null);
dfs= new MiniDFSCluster.Builder(conf).numDataNodes(3).format(true).build();
FileSystem fs = dfs.getFileSystem();
final FsShell shell = new FsShell(conf);
namenode = fs.getUri().toString();
mr = new MiniMRCluster(3, namenode, 1);
MyFile[] files = createFiles(fs.getUri(), "/srcdat");
long totsize = 0;
for (MyFile f : files) {
totsize += f.getSize();
}
Configuration job = mr.createJobConf();
Configuration job = new JobConf(conf);
job.setLong("distcp.bytes.per.map", totsize / 3);
ToolRunner.run(new DistCpV1(job),
new String[] {"-m", "100",
@ -766,8 +766,7 @@ public class TestCopyFiles extends TestCase {
System.out.println(execCmd(shell, "-lsr", logdir));
FileStatus[] logs = fs.listStatus(new Path(logdir));
// rare case where splits are exact, logs.length can be 4
assertTrue("Unexpected map count, logs.length=" + logs.length,
logs.length == 5 || logs.length == 4);
assertTrue( logs.length == 2);
deldir(fs, "/destdat");
deldir(fs, "/logs");

View File

@ -22,8 +22,6 @@ import java.io.DataOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.LogFactory;
@ -39,10 +37,10 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.log4j.Level;
import org.junit.Ignore;
@Ignore
public class TestDistCh extends junit.framework.TestCase {
{
((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.StateChange")
@ -52,7 +50,8 @@ public class TestDistCh extends junit.framework.TestCase {
}
static final Long RANDOM_NUMBER_GENERATOR_SEED = null;
static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
private static final Random RANDOM = new Random();
static {
final long seed = RANDOM_NUMBER_GENERATOR_SEED == null?
@ -65,7 +64,7 @@ public class TestDistCh extends junit.framework.TestCase {
new Path(System.getProperty("test.build.data","/tmp")
).toString().replace(' ', '+');
static final int NUN_SUBS = 5;
static final int NUN_SUBS = 7;
static class FileTree {
private final FileSystem fs;
@ -127,9 +126,12 @@ public class TestDistCh extends junit.framework.TestCase {
public void testDistCh() throws Exception {
final Configuration conf = new Configuration();
final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
conf.set(CapacitySchedulerConfiguration.PREFIX+CapacitySchedulerConfiguration.ROOT+"."+CapacitySchedulerConfiguration.QUEUES, "default");
conf.set(CapacitySchedulerConfiguration.PREFIX+CapacitySchedulerConfiguration.ROOT+".default."+CapacitySchedulerConfiguration.CAPACITY, "100");
final MiniDFSCluster cluster= new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
final FileSystem fs = cluster.getFileSystem();
final MiniMRCluster mr = new MiniMRCluster(2, fs.getUri().toString(), 1);
final FsShell shell = new FsShell(conf);
try {
@ -138,37 +140,36 @@ public class TestDistCh extends junit.framework.TestCase {
runLsr(shell, tree.root, 0);
//generate random arguments
final String[] args = new String[RANDOM.nextInt(NUN_SUBS-1) + 1];
final String[] args = new String[NUN_SUBS];
final PermissionStatus[] newstatus = new PermissionStatus[NUN_SUBS];
final List<Integer> indices = new LinkedList<Integer>();
for(int i = 0; i < NUN_SUBS; i++) {
indices.add(i);
}
for(int i = 0; i < args.length; i++) {
final int index = indices.remove(RANDOM.nextInt(indices.size()));
final String sub = "sub" + index;
final boolean changeOwner = RANDOM.nextBoolean();
final boolean changeGroup = RANDOM.nextBoolean();
final boolean changeMode = !changeOwner && !changeGroup? true: RANDOM.nextBoolean();
final String owner = changeOwner? sub: "";
final String group = changeGroup? sub: "";
final String permission = changeMode? RANDOM.nextInt(8) + "" + RANDOM.nextInt(8) + "" + RANDOM.nextInt(8): "";
args[i] = tree.root + "/" + sub + ":" + owner + ":" + group + ":" + permission;
newstatus[index] = new ChPermissionStatus(rootstatus, owner, group, permission);
}
for(int i = 0; i < NUN_SUBS; i++) {
if (newstatus[i] == null) {
newstatus[i] = new ChPermissionStatus(rootstatus);
}
}
args[0]="/test/testDistCh/sub0:sub1::";
newstatus[0] = new ChPermissionStatus(rootstatus, "sub1", "", "");
args[1]="/test/testDistCh/sub1::sub2:";
newstatus[1] = new ChPermissionStatus(rootstatus, "", "sub2", "");
args[2]="/test/testDistCh/sub2:::437";
newstatus[2] = new ChPermissionStatus(rootstatus, "", "", "437");
args[3]="/test/testDistCh/sub3:sub1:sub2:447";
newstatus[3] = new ChPermissionStatus(rootstatus, "sub1", "sub2", "447");
args[4]="/test/testDistCh/sub4::sub5:437";
newstatus[4] = new ChPermissionStatus(rootstatus, "", "sub5", "437");
args[5]="/test/testDistCh/sub5:sub1:sub5:";
newstatus[5] = new ChPermissionStatus(rootstatus, "sub1", "sub5", "");
args[6]="/test/testDistCh/sub6:sub3::437";
newstatus[6] = new ChPermissionStatus(rootstatus, "sub3", "", "437");
System.out.println("args=" + Arrays.asList(args).toString().replace(",", ",\n "));
System.out.println("newstatus=" + Arrays.asList(newstatus).toString().replace(",", ",\n "));
//run DistCh
new DistCh(mr.createJobConf()).run(args);
new DistCh(MiniMRClientClusterFactory.create(this.getClass(), 2, conf).getConfig()).run(args);
runLsr(shell, tree.root, 0);
//check results
@ -184,7 +185,7 @@ public class TestDistCh extends junit.framework.TestCase {
}
}
static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
static void checkFileStatus(PermissionStatus expected, FileStatus actual) {
assertEquals(expected.getUserName(), actual.getOwner());

View File

@ -0,0 +1,132 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.EnumSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.junit.Assert;
import org.junit.Test;
public class TestLogalyzer {
private static String EL = System.getProperty("line.separator");
private static String TAB = "\t";
private static final Log LOG = LogFactory.getLog(TestLogalyzer.class);
private static File workSpace = new File("target",
TestLogalyzer.class.getName() + "-workSpace");
private static File outdir = new File(workSpace.getAbsoluteFile()
+ File.separator + "out");
@Test
public void testLogalyzer() throws Exception {
Path f = createLogFile();
String[] args = new String[10];
args[0] = "-archiveDir";
args[1] = f.toString();
args[2] = "-grep";
args[3] = "44";
args[4] = "-sort";
args[5] = "0";
args[6] = "-analysis";
args[7] = outdir.getAbsolutePath();
args[8] = "-separator";
args[9] = " ";
Logalyzer.main(args);
checkResult();
}
private void checkResult() throws Exception {
File result = new File(outdir.getAbsolutePath() + File.separator
+ "part-00000");
File success = new File(outdir.getAbsolutePath() + File.separator
+ "_SUCCESS");
Assert.assertTrue(success.exists());
FileInputStream fis = new FileInputStream(result);
BufferedReader br = new BufferedReader(new InputStreamReader(fis, "UTF-8"));
String line = br.readLine();
Assert.assertTrue(("1 44" + TAB + "2").equals(line));
line = br.readLine();
Assert.assertTrue(("3 44" + TAB + "1").equals(line));
line = br.readLine();
Assert.assertTrue(("4 44" + TAB + "1").equals(line));
br.close();
}
/**
* Create simple log file
*
* @return
* @throws IOException
*/
private Path createLogFile() throws IOException {
FileContext files = FileContext.getLocalFSFileContext();
Path ws = new Path(workSpace.getAbsoluteFile().getAbsolutePath());
files.delete(ws, true);
Path workSpacePath = new Path(workSpace.getAbsolutePath(), "log");
files.mkdir(workSpacePath, null, true);
LOG.info("create logfile.log");
Path logfile1 = new Path(workSpacePath, "logfile.log");
FSDataOutputStream os = files.create(logfile1,
EnumSet.of(CreateFlag.CREATE));
os.writeBytes("4 3" + EL + "1 3" + EL + "4 44" + EL);
os.writeBytes("2 3" + EL + "1 3" + EL + "0 45" + EL);
os.writeBytes("4 3" + EL + "1 3" + EL + "1 44" + EL);
os.flush();
os.close();
LOG.info("create logfile1.log");
Path logfile2 = new Path(workSpacePath, "logfile1.log");
os = files.create(logfile2, EnumSet.of(CreateFlag.CREATE));
os.writeBytes("4 3" + EL + "1 3" + EL + "3 44" + EL);
os.writeBytes("2 3" + EL + "1 3" + EL + "0 45" + EL);
os.writeBytes("4 3" + EL + "1 3" + EL + "1 44" + EL);
os.flush();
os.close();
return workSpacePath;
}
}

View File

@ -163,6 +163,9 @@ Release 0.23.6 - UNRELEASED
YARN-151. Browser thinks RM main page JS is taking too long
(Ravi Prakash via bobby)
YARN-204. test coverage for org.apache.hadoop.tools (Aleksey Gorshkov via
bobby)
Release 0.23.5 - UNRELEASED
INCOMPATIBLE CHANGES