HDFS-7439. Add BlockOpResponseProto's message to the exception messages. Contributed by Takanobu Asanuma

This commit is contained in:
Tsz-Wo Nicholas Sze 2015-03-02 15:03:58 +08:00
parent c40293c828
commit a5f3156b30
7 changed files with 55 additions and 62 deletions

View File

@ -393,6 +393,9 @@ Release 2.7.0 - UNRELEASED
HDFS-5853. Add "hadoop.user.group.metrics.percentiles.intervals" to HDFS-5853. Add "hadoop.user.group.metrics.percentiles.intervals" to
hdfs-default.xml. (aajisaka) hdfs-default.xml. (aajisaka)
HDFS-7439. Add BlockOpResponseProto's message to the exception messages.
(Takanobu Asanuma via szetszwo)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode. HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

View File

@ -175,6 +175,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.Op; import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
@ -2259,15 +2260,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
final BlockOpResponseProto reply = final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
if (reply.getStatus() != Status.SUCCESS) { String logInfo = "for block " + block + " from datanode " + datanodes[j];
if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) { DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
throw new InvalidBlockTokenException();
} else {
throw new IOException("Bad response " + reply + " for block "
+ block + " from datanode " + datanodes[j]);
}
}
OpBlockChecksumResponseProto checksumData = OpBlockChecksumResponseProto checksumData =
reply.getChecksumResponse(); reply.getChecksumResponse();
@ -2424,16 +2419,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
0, 1, true, CachingStrategy.newDefaultStrategy()); 0, 1, true, CachingStrategy.newDefaultStrategy());
final BlockOpResponseProto reply = final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
if (reply.getStatus() != Status.SUCCESS) { DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException();
} else {
throw new IOException("Bad response " + reply + " trying to read "
+ lb.getBlock() + " from datanode " + dn);
}
}
return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType()); return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
} finally { } finally {
IOUtils.cleanup(null, pair.in, pair.out); IOUtils.cleanup(null, pair.in, pair.out);

View File

@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException; import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
@ -1469,16 +1470,10 @@ public class DFSOutputStream extends FSOutputSummer
checkRestart = true; checkRestart = true;
throw new IOException("A datanode is restarting."); throw new IOException("A datanode is restarting.");
} }
if (pipelineStatus != SUCCESS) {
if (pipelineStatus == Status.ERROR_ACCESS_TOKEN) { String logInfo = "ack with firstBadLink as " + firstBadLink;
throw new InvalidBlockTokenException( DataTransferProtoUtil.checkBlockOpStatus(resp, logInfo);
"Got access token error for connect ack with firstBadLink as "
+ firstBadLink);
} else {
throw new IOException("Bad connect ack with firstBadLink as "
+ firstBadLink);
}
}
assert null == blockStream : "Previous blockStream unclosed"; assert null == blockStream : "Previous blockStream unclosed";
blockStream = out; blockStream = out;
result = true; // success result = true; // success

View File

@ -45,7 +45,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumIn
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap; import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
@ -448,22 +447,13 @@ public class RemoteBlockReader2 implements BlockReader {
BlockOpResponseProto status, Peer peer, BlockOpResponseProto status, Peer peer,
ExtendedBlock block, String file) ExtendedBlock block, String file)
throws IOException { throws IOException {
if (status.getStatus() != Status.SUCCESS) { String logInfo = "for OP_READ_BLOCK"
if (status.getStatus() == Status.ERROR_ACCESS_TOKEN) { + ", self=" + peer.getLocalAddressString()
throw new InvalidBlockTokenException( + ", remote=" + peer.getRemoteAddressString()
"Got access token error for OP_READ_BLOCK, self=" + ", for file " + file
+ peer.getLocalAddressString() + ", remote=" + ", for pool " + block.getBlockPoolId()
+ peer.getRemoteAddressString() + ", for file " + file + " block " + block.getBlockId() + "_" + block.getGenerationStamp();
+ ", for pool " + block.getBlockPoolId() + " block " DataTransferProtoUtil.checkBlockOpStatus(status, logInfo);
+ block.getBlockId() + "_" + block.getGenerationStamp());
} else {
throw new IOException("Got error for OP_READ_BLOCK, self="
+ peer.getLocalAddressString() + ", remote="
+ peer.getRemoteAddressString() + ", for file " + file
+ ", for pool " + block.getBlockPoolId() + " block "
+ block.getBlockId() + "_" + block.getGenerationStamp());
}
}
} }
@Override @Override

View File

@ -17,11 +17,16 @@
*/ */
package org.apache.hadoop.hdfs.protocol.datatransfer; package org.apache.hadoop.hdfs.protocol.datatransfer;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
@ -29,6 +34,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTrac
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.apache.htrace.Span; import org.apache.htrace.Span;
@ -119,4 +125,24 @@ public abstract class DataTransferProtoUtil {
} }
return scope; return scope;
} }
public static void checkBlockOpStatus(
BlockOpResponseProto response,
String logInfo) throws IOException {
if (response.getStatus() != Status.SUCCESS) {
if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException(
"Got access token error"
+ ", status message " + response.getMessage()
+ ", " + logInfo
);
} else {
throw new IOException(
"Got error"
+ ", status message " + response.getMessage()
+ ", " + logInfo
);
}
}
}
} }

View File

@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
@ -359,12 +360,8 @@ public class Dispatcher {
// read intermediate responses // read intermediate responses
response = BlockOpResponseProto.parseFrom(vintPrefixed(in)); response = BlockOpResponseProto.parseFrom(vintPrefixed(in));
} }
if (response.getStatus() != Status.SUCCESS) { String logInfo = "block move is failed";
if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) { DataTransferProtoUtil.checkBlockOpStatus(response, logInfo);
throw new IOException("block move failed due to access token error");
}
throw new IOException("block move is failed: " + response.getMessage());
}
} }
/** reset the object */ /** reset the object */

View File

@ -1116,16 +1116,10 @@ class DataXceiver extends Receiver implements Runnable {
BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom( BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom(
PBHelper.vintPrefixed(proxyReply)); PBHelper.vintPrefixed(proxyReply));
if (copyResponse.getStatus() != SUCCESS) { String logInfo = "copy block " + block + " from "
if (copyResponse.getStatus() == ERROR_ACCESS_TOKEN) { + proxySock.getRemoteSocketAddress();
throw new IOException("Copy block " + block + " from " DataTransferProtoUtil.checkBlockOpStatus(copyResponse, logInfo);
+ proxySock.getRemoteSocketAddress()
+ " failed due to access token error");
}
throw new IOException("Copy block " + block + " from "
+ proxySock.getRemoteSocketAddress() + " failed");
}
// get checksum info about the block we're copying // get checksum info about the block we're copying
ReadOpChecksumInfoProto checksumInfo = copyResponse.getReadOpChecksumInfo(); ReadOpChecksumInfoProto checksumInfo = copyResponse.getReadOpChecksumInfo();
DataChecksum remoteChecksum = DataTransferProtoUtil.fromProto( DataChecksum remoteChecksum = DataTransferProtoUtil.fromProto(