MAPREDUCE-2936. Contrib Raid compilation broken after HDFS-1620. Contributed by Vinod Kumar Vavilapalli.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1165657 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Vinod Kumar Vavilapalli 2011-09-06 13:19:10 +00:00
parent 9e95326a2c
commit e20f135174
3 changed files with 11 additions and 9 deletions

View File

@ -1228,6 +1228,8 @@ Release 0.22.0 - Unreleased
MAPREDUCE-2169. Integrated Reed-Solomon code with RaidNode. (Ramkumar
Vadali via schen)
MAPREDUCE-2936. Contrib Raid compilation broken after HDFS-1620. (vinodkv)
IMPROVEMENTS
MAPREDUCE-2141. Add an "extra data" field to Task for use by Mesos. (matei)

View File

@ -31,7 +31,7 @@ import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.SocketOutputStream;
@ -389,7 +389,7 @@ public class RaidBlockSender implements java.io.Closeable {
streamForSendChunks = baseStream;
// assure a mininum buffer size.
maxChunksPerPacket = (Math.max(FSConstants.IO_FILE_BUFFER_SIZE,
maxChunksPerPacket = (Math.max(HdfsConstants.IO_FILE_BUFFER_SIZE,
MIN_BUFFER_WITH_TRANSFERTO)
+ bytesPerChecksum - 1)/bytesPerChecksum;
@ -397,7 +397,7 @@ public class RaidBlockSender implements java.io.Closeable {
pktSize += checksumSize * maxChunksPerPacket;
} else {
maxChunksPerPacket = Math.max(1,
(FSConstants.IO_FILE_BUFFER_SIZE + bytesPerChecksum - 1)/bytesPerChecksum);
(HdfsConstants.IO_FILE_BUFFER_SIZE + bytesPerChecksum - 1)/bytesPerChecksum);
pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
}

View File

@ -46,11 +46,11 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.datatransfer.*;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.FSDataset;
import org.apache.hadoop.hdfs.server.datanode.RaidBlockSender;
import org.apache.commons.logging.Log;
@ -741,17 +741,17 @@ public abstract class BlockFixer extends Configured implements Runnable {
int readTimeout =
getConf().getInt(BLOCKFIX_READ_TIMEOUT,
HdfsConstants.READ_TIMEOUT);
HdfsServerConstants.READ_TIMEOUT);
NetUtils.connect(sock, target, readTimeout);
sock.setSoTimeout(readTimeout);
int writeTimeout = getConf().getInt(BLOCKFIX_WRITE_TIMEOUT,
HdfsConstants.WRITE_TIMEOUT);
HdfsServerConstants.WRITE_TIMEOUT);
OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout);
DataOutputStream out =
new DataOutputStream(new BufferedOutputStream(baseStream,
FSConstants.
HdfsConstants.
SMALL_BUFFER_SIZE));
boolean corruptChecksumOk = false;