svn merge -c 1329403. FIXES: MAPREDUCE-4133. MR over viewfs is broken (John George via bobby)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1329405 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Joseph Evans 2012-04-23 19:46:16 +00:00
parent 1fca6cfe03
commit 88d736ce2b
4 changed files with 9 additions and 7 deletions

View File

@ -286,6 +286,7 @@ Release 0.23.3 - UNRELEASED
MAPREDUCE-4190. Improve web UI for task attempts userlog link (Tom Graves MAPREDUCE-4190. Improve web UI for task attempts userlog link (Tom Graves
via bobby) via bobby)
MAPREDUCE-4133. MR over viewfs is broken (John George via bobby)
Release 0.23.2 - UNRELEASED Release 0.23.2 - UNRELEASED

View File

@ -827,7 +827,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|| (sysMaxReduces == 1); || (sysMaxReduces == 1);
long sysMaxBytes = conf.getLong(MRJobConfig.JOB_UBERTASK_MAXBYTES, long sysMaxBytes = conf.getLong(MRJobConfig.JOB_UBERTASK_MAXBYTES,
fs.getDefaultBlockSize()); // FIXME: this is wrong; get FS from fs.getDefaultBlockSize(this.remoteJobSubmitDir)); // FIXME: this is wrong; get FS from
// [File?]InputFormat and default block size // [File?]InputFormat and default block size
// from that // from that

View File

@ -128,7 +128,7 @@ public class DistributedFSCheck extends TestCase {
if (rootStatus.isFile()) { if (rootStatus.isFile()) {
nrFiles++; nrFiles++;
// For a regular file generate <fName,offset> pairs // For a regular file generate <fName,offset> pairs
long blockSize = fs.getDefaultBlockSize(); long blockSize = fs.getDefaultBlockSize(rootFile);
long fileLength = rootStatus.getLen(); long fileLength = rootStatus.getLen();
for(long offset = 0; offset < fileLength; offset += blockSize) for(long offset = 0; offset < fileLength; offset += blockSize)
writer.append(new Text(rootFile.toString()), new LongWritable(offset)); writer.append(new Text(rootFile.toString()), new LongWritable(offset));
@ -160,15 +160,16 @@ public class DistributedFSCheck extends TestCase {
) throws IOException { ) throws IOException {
// open file // open file
FSDataInputStream in = null; FSDataInputStream in = null;
Path p = new Path(name);
try { try {
in = fs.open(new Path(name)); in = fs.open(p);
} catch(IOException e) { } catch(IOException e) {
return name + "@(missing)"; return name + "@(missing)";
} }
in.seek(offset); in.seek(offset);
long actualSize = 0; long actualSize = 0;
try { try {
long blockSize = fs.getDefaultBlockSize(); long blockSize = fs.getDefaultBlockSize(p);
reporter.setStatus("reading " + name + "@" + reporter.setStatus("reading " + name + "@" +
offset + "/" + blockSize); offset + "/" + blockSize);
for( int curSize = bufferSize; for( int curSize = bufferSize;

View File

@ -287,7 +287,7 @@ public class JobHistory {
FSDataOutputStream out = logDirFs.create(logFile, FSDataOutputStream out = logDirFs.create(logFile,
new FsPermission(JobHistory.HISTORY_FILE_PERMISSION), new FsPermission(JobHistory.HISTORY_FILE_PERMISSION),
true, defaultBufferSize, true, defaultBufferSize,
logDirFs.getDefaultReplication(), logDirFs.getDefaultReplication(logFile),
jobHistoryBlockSize, null); jobHistoryBlockSize, null);
EventWriter writer = new EventWriter(out); EventWriter writer = new EventWriter(out);
@ -306,8 +306,8 @@ public class JobHistory {
jobFileOut = logDirFs.create(logDirConfPath, jobFileOut = logDirFs.create(logDirConfPath,
new FsPermission(JobHistory.HISTORY_FILE_PERMISSION), new FsPermission(JobHistory.HISTORY_FILE_PERMISSION),
true, defaultBufferSize, true, defaultBufferSize,
logDirFs.getDefaultReplication(), logDirFs.getDefaultReplication(logDirConfPath),
logDirFs.getDefaultBlockSize(), null); logDirFs.getDefaultBlockSize(logDirConfPath), null);
jobConf.writeXml(jobFileOut); jobConf.writeXml(jobFileOut);
jobFileOut.close(); jobFileOut.close();
} }