HADOOP-8818. Use equals instead == in MD5MD5CRC32FileChecksum and TFileDumper. Contributed by Brandon Li.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1385374 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
0de9642514
commit
0c53ed4cd1
|
@ -208,6 +208,9 @@ Trunk (Unreleased)
|
|||
HADOOP-8767. Secondary namenode is started on slave nodes instead of
|
||||
master nodes. (Giovanni Delussu via suresh)
|
||||
|
||||
HADOOP-8818. Use equals instead == in MD5MD5CRC32FileChecksum
|
||||
and TFileDumper. (Brandon Li via suresh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
||||
|
|
|
@ -133,7 +133,7 @@ public class MD5MD5CRC32FileChecksum extends FileChecksum {
|
|||
|
||||
try {
|
||||
// old versions don't support crcType.
|
||||
if (crcType == null || crcType == "") {
|
||||
if (crcType == null || crcType.equals("")) {
|
||||
finalCrcType = DataChecksum.Type.CRC32;
|
||||
} else {
|
||||
finalCrcType = DataChecksum.Type.valueOf(crcType);
|
||||
|
|
|
@ -125,7 +125,7 @@ class TFileDumper {
|
|||
dataSizeUncompressed += region.getRawSize();
|
||||
}
|
||||
properties.put("Data Block Bytes", Long.toString(dataSize));
|
||||
if (reader.readerBCF.getDefaultCompressionName() != "none") {
|
||||
if (!reader.readerBCF.getDefaultCompressionName().equals("none")) {
|
||||
properties.put("Data Block Uncompressed Bytes", Long
|
||||
.toString(dataSizeUncompressed));
|
||||
properties.put("Data Block Compression Ratio", String.format(
|
||||
|
|
Loading…
Reference in New Issue