MAPREDUCE-2936. Contrib Raid compilation broken after HDFS-1620. Contributed by Vinod Kumar Vavilapalli.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1169866 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2011-09-12 18:58:09 +00:00
parent 63c7aec4a4
commit aae8027cd3
3 changed files with 11 additions and 9 deletions

View File

@ -1291,6 +1291,8 @@ Release 0.22.0 - Unreleased
MAPREDUCE-2169. Integrated Reed-Solomon code with RaidNode. (Ramkumar MAPREDUCE-2169. Integrated Reed-Solomon code with RaidNode. (Ramkumar
Vadali via schen) Vadali via schen)
MAPREDUCE-2936. Contrib Raid compilation broken after HDFS-1620. (vinodkv)
IMPROVEMENTS IMPROVEMENTS
MAPREDUCE-2141. Add an "extra data" field to Task for use by Mesos. (matei) MAPREDUCE-2141. Add an "extra data" field to Task for use by Mesos. (matei)

View File

@ -31,7 +31,7 @@ import java.util.Arrays;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.SocketOutputStream; import org.apache.hadoop.net.SocketOutputStream;
@ -389,7 +389,7 @@ public class RaidBlockSender implements java.io.Closeable {
streamForSendChunks = baseStream; streamForSendChunks = baseStream;
// assure a mininum buffer size. // assure a mininum buffer size.
maxChunksPerPacket = (Math.max(FSConstants.IO_FILE_BUFFER_SIZE, maxChunksPerPacket = (Math.max(HdfsConstants.IO_FILE_BUFFER_SIZE,
MIN_BUFFER_WITH_TRANSFERTO) MIN_BUFFER_WITH_TRANSFERTO)
+ bytesPerChecksum - 1)/bytesPerChecksum; + bytesPerChecksum - 1)/bytesPerChecksum;
@ -397,7 +397,7 @@ public class RaidBlockSender implements java.io.Closeable {
pktSize += checksumSize * maxChunksPerPacket; pktSize += checksumSize * maxChunksPerPacket;
} else { } else {
maxChunksPerPacket = Math.max(1, maxChunksPerPacket = Math.max(1,
(FSConstants.IO_FILE_BUFFER_SIZE + bytesPerChecksum - 1)/bytesPerChecksum); (HdfsConstants.IO_FILE_BUFFER_SIZE + bytesPerChecksum - 1)/bytesPerChecksum);
pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket; pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
} }

View File

@ -46,11 +46,11 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.datatransfer.*; import org.apache.hadoop.hdfs.protocol.datatransfer.*;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.HdfsConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.FSDataset; import org.apache.hadoop.hdfs.server.datanode.FSDataset;
import org.apache.hadoop.hdfs.server.datanode.RaidBlockSender; import org.apache.hadoop.hdfs.server.datanode.RaidBlockSender;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -741,17 +741,17 @@ public abstract class BlockFixer extends Configured implements Runnable {
int readTimeout = int readTimeout =
getConf().getInt(BLOCKFIX_READ_TIMEOUT, getConf().getInt(BLOCKFIX_READ_TIMEOUT,
HdfsConstants.READ_TIMEOUT); HdfsServerConstants.READ_TIMEOUT);
NetUtils.connect(sock, target, readTimeout); NetUtils.connect(sock, target, readTimeout);
sock.setSoTimeout(readTimeout); sock.setSoTimeout(readTimeout);
int writeTimeout = getConf().getInt(BLOCKFIX_WRITE_TIMEOUT, int writeTimeout = getConf().getInt(BLOCKFIX_WRITE_TIMEOUT,
HdfsConstants.WRITE_TIMEOUT); HdfsServerConstants.WRITE_TIMEOUT);
OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout); OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout);
DataOutputStream out = DataOutputStream out =
new DataOutputStream(new BufferedOutputStream(baseStream, new DataOutputStream(new BufferedOutputStream(baseStream,
FSConstants. HdfsConstants.
SMALL_BUFFER_SIZE)); SMALL_BUFFER_SIZE));
boolean corruptChecksumOk = false; boolean corruptChecksumOk = false;