MAPREDUCE-4133. MR over viewfs is broken (John George via bobby)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1329403 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
796e1a48ac
commit
80ab78c217
|
@ -395,6 +395,7 @@ Release 0.23.3 - UNRELEASED
|
|||
MAPREDUCE-4190. Improve web UI for task attempts userlog link (Tom Graves
|
||||
via bobby)
|
||||
|
||||
MAPREDUCE-4133. MR over viewfs is broken (John George via bobby)
|
||||
|
||||
Release 0.23.2 - UNRELEASED
|
||||
|
||||
|
|
|
@ -827,7 +827,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
|||
|| (sysMaxReduces == 1);
|
||||
|
||||
long sysMaxBytes = conf.getLong(MRJobConfig.JOB_UBERTASK_MAXBYTES,
|
||||
fs.getDefaultBlockSize()); // FIXME: this is wrong; get FS from
|
||||
fs.getDefaultBlockSize(this.remoteJobSubmitDir)); // FIXME: this is wrong; get FS from
|
||||
// [File?]InputFormat and default block size
|
||||
// from that
|
||||
|
||||
|
|
|
@ -128,7 +128,7 @@ public class DistributedFSCheck extends TestCase {
|
|||
if (rootStatus.isFile()) {
|
||||
nrFiles++;
|
||||
// For a regular file generate <fName,offset> pairs
|
||||
long blockSize = fs.getDefaultBlockSize();
|
||||
long blockSize = fs.getDefaultBlockSize(rootFile);
|
||||
long fileLength = rootStatus.getLen();
|
||||
for(long offset = 0; offset < fileLength; offset += blockSize)
|
||||
writer.append(new Text(rootFile.toString()), new LongWritable(offset));
|
||||
|
@ -160,15 +160,16 @@ public class DistributedFSCheck extends TestCase {
|
|||
) throws IOException {
|
||||
// open file
|
||||
FSDataInputStream in = null;
|
||||
Path p = new Path(name);
|
||||
try {
|
||||
in = fs.open(new Path(name));
|
||||
in = fs.open(p);
|
||||
} catch(IOException e) {
|
||||
return name + "@(missing)";
|
||||
}
|
||||
in.seek(offset);
|
||||
long actualSize = 0;
|
||||
try {
|
||||
long blockSize = fs.getDefaultBlockSize();
|
||||
long blockSize = fs.getDefaultBlockSize(p);
|
||||
reporter.setStatus("reading " + name + "@" +
|
||||
offset + "/" + blockSize);
|
||||
for( int curSize = bufferSize;
|
||||
|
|
|
@ -287,7 +287,7 @@ public class JobHistory {
|
|||
FSDataOutputStream out = logDirFs.create(logFile,
|
||||
new FsPermission(JobHistory.HISTORY_FILE_PERMISSION),
|
||||
true, defaultBufferSize,
|
||||
logDirFs.getDefaultReplication(),
|
||||
logDirFs.getDefaultReplication(logFile),
|
||||
jobHistoryBlockSize, null);
|
||||
|
||||
EventWriter writer = new EventWriter(out);
|
||||
|
@ -306,8 +306,8 @@ public class JobHistory {
|
|||
jobFileOut = logDirFs.create(logDirConfPath,
|
||||
new FsPermission(JobHistory.HISTORY_FILE_PERMISSION),
|
||||
true, defaultBufferSize,
|
||||
logDirFs.getDefaultReplication(),
|
||||
logDirFs.getDefaultBlockSize(), null);
|
||||
logDirFs.getDefaultReplication(logDirConfPath),
|
||||
logDirFs.getDefaultBlockSize(logDirConfPath), null);
|
||||
jobConf.writeXml(jobFileOut);
|
||||
jobFileOut.close();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue