HADOOP-11529. Fix findbugs warnings in hadoop-archives. Contributed by Masatake Iwasaki.

This commit is contained in:
Haohui Mai 2015-02-03 10:53:17 -08:00
parent 410830fe8c
commit 6c5ae1571c
2 changed files with 9 additions and 10 deletions

View File

@ -464,6 +464,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-10181. GangliaContext does not work with multicast ganglia setup. HADOOP-10181. GangliaContext does not work with multicast ganglia setup.
(Andrew Johnson via cnauroth) (Andrew Johnson via cnauroth)
HADOOP-11529. Fix findbugs warnings in hadoop-archives.
(Masatake Iwasaki via wheat9)
Release 2.6.1 - UNRELEASED Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -68,6 +68,7 @@ import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Charsets;
/** /**
* a archive creation utility. * a archive creation utility.
@ -237,7 +238,6 @@ public class HadoopArchives implements Tool {
ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits); ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
LongWritable key = new LongWritable(); LongWritable key = new LongWritable();
final HarEntry value = new HarEntry(); final HarEntry value = new HarEntry();
SequenceFile.Reader reader = null;
// the remaining bytes in the file split // the remaining bytes in the file split
long remaining = fstatus.getLen(); long remaining = fstatus.getLen();
// the count of sizes calculated till now // the count of sizes calculated till now
@ -249,8 +249,7 @@ public class HadoopArchives implements Tool {
long targetSize = totalSize/numSplits; long targetSize = totalSize/numSplits;
// create splits of size target size so that all the maps // create splits of size target size so that all the maps
// have equals sized data to read and write to. // have equals sized data to read and write to.
try { try (SequenceFile.Reader reader = new SequenceFile.Reader(fs, src, jconf)) {
reader = new SequenceFile.Reader(fs, src, jconf);
while(reader.next(key, value)) { while(reader.next(key, value)) {
if (currentCount + key.get() > targetSize && currentCount != 0){ if (currentCount + key.get() > targetSize && currentCount != 0){
long size = lastPos - startPos; long size = lastPos - startPos;
@ -267,9 +266,6 @@ public class HadoopArchives implements Tool {
splits.add(new FileSplit(src, startPos, remaining, (String[])null)); splits.add(new FileSplit(src, startPos, remaining, (String[])null));
} }
} }
finally {
reader.close();
}
return splits.toArray(new FileSplit[splits.size()]); return splits.toArray(new FileSplit[splits.size()]);
} }
@ -741,7 +737,7 @@ public class HadoopArchives implements Tool {
indexStream = fs.create(index); indexStream = fs.create(index);
outStream = fs.create(masterIndex); outStream = fs.create(masterIndex);
String version = VERSION + " \n"; String version = VERSION + " \n";
outStream.write(version.getBytes()); outStream.write(version.getBytes(Charsets.UTF_8));
} catch(IOException e) { } catch(IOException e) {
throw new RuntimeException(e); throw new RuntimeException(e);
@ -760,7 +756,7 @@ public class HadoopArchives implements Tool {
while(values.hasNext()) { while(values.hasNext()) {
Text value = values.next(); Text value = values.next();
String towrite = value.toString() + "\n"; String towrite = value.toString() + "\n";
indexStream.write(towrite.getBytes()); indexStream.write(towrite.getBytes(Charsets.UTF_8));
written++; written++;
if (written > numIndexes -1) { if (written > numIndexes -1) {
// every 1000 indexes we report status // every 1000 indexes we report status
@ -769,7 +765,7 @@ public class HadoopArchives implements Tool {
endIndex = keyVal; endIndex = keyVal;
String masterWrite = startIndex + " " + endIndex + " " + startPos String masterWrite = startIndex + " " + endIndex + " " + startPos
+ " " + indexStream.getPos() + " \n" ; + " " + indexStream.getPos() + " \n" ;
outStream.write(masterWrite.getBytes()); outStream.write(masterWrite.getBytes(Charsets.UTF_8));
startPos = indexStream.getPos(); startPos = indexStream.getPos();
startIndex = endIndex; startIndex = endIndex;
written = 0; written = 0;
@ -782,7 +778,7 @@ public class HadoopArchives implements Tool {
if (written > 0) { if (written > 0) {
String masterWrite = startIndex + " " + keyVal + " " + startPos + String masterWrite = startIndex + " " + keyVal + " " + startPos +
" " + indexStream.getPos() + " \n"; " " + indexStream.getPos() + " \n";
outStream.write(masterWrite.getBytes()); outStream.write(masterWrite.getBytes(Charsets.UTF_8));
} }
// close the streams // close the streams
outStream.close(); outStream.close();