HDFS-5199. Merge change r1523140 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1524300 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2013-09-18 06:19:36 +00:00
parent 3a7e3e0881
commit 0ce8f0b3c2
6 changed files with 39 additions and 17 deletions

View File

@ -39,6 +39,12 @@
public class Nfs3Utils {
public final static String INODEID_PATH_PREFIX = "/.reserved/.inodes/";
public final static String READ_RPC_START = "READ_RPC_CALL_START____";
public final static String READ_RPC_END = "READ_RPC_CALL_END______";
public final static String WRITE_RPC_START = "WRITE_RPC_CALL_START____";
public final static String WRITE_RPC_END = "WRITE_RPC_CALL_END______";
public static String getFileIdPath(FileHandle handle) {
return getFileIdPath(handle.getFileId());
}
@ -102,7 +108,10 @@ public static WccData createWccData(final WccAttr preOpAttr,
/**
* Send a write response to the netty network socket channel
*/
public static void writeChannel(Channel channel, XDR out) {
public static void writeChannel(Channel channel, XDR out, int xid) {
if (RpcProgramNfs3.LOG.isDebugEnabled()) {
RpcProgramNfs3.LOG.debug(WRITE_RPC_END + xid);
}
ChannelBuffer outBuf = XDR.writeMessageTcp(out, true);
channel.write(outBuf);
}

View File

@ -293,7 +293,7 @@ public void receivedNewWrite(DFSClient dfsClient, WRITE3Request request,
WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
} else {
// Handle repeated write requests(same xid or not).
// If already replied, send reply again. If not replied, drop the
@ -315,7 +315,7 @@ public void receivedNewWrite(DFSClient dfsClient, WRITE3Request request,
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, request.getCount(), request.getStableHow(),
Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
}
updateLastAccessTime();
@ -369,7 +369,7 @@ private void receivedNewWriteInternal(DFSClient dfsClient,
WccData fileWcc = new WccData(preOpAttr, postOpAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
writeCtx.setReplied(true);
}
@ -394,7 +394,7 @@ private void receivedNewWriteInternal(DFSClient dfsClient,
WccData fileWcc = new WccData(preOpAttr, postOpAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
writeCtx.setReplied(true);
}
@ -420,7 +420,7 @@ private void receivedNewWriteInternal(DFSClient dfsClient,
}
updateLastAccessTime();
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
}
}
@ -715,7 +715,7 @@ private void doSingleWrite(final WriteCtx writeCtx) {
WccData fileWcc = new WccData(preOpAttr, latestAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
}
} catch (IOException e) {
@ -723,7 +723,7 @@ private void doSingleWrite(final WriteCtx writeCtx) {
+ offset + " and length " + data.length, e);
if (!writeCtx.getReplied()) {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
// Keep stream open. Either client retries or SteamMonitor closes it.
}
@ -761,7 +761,7 @@ private void cleanup() {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
fileWcc, 0, writeCtx.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(writeCtx.getChannel(),
response.send(new XDR(), writeCtx.getXid()));
response.send(new XDR(), writeCtx.getXid()), writeCtx.getXid());
}
}

View File

@ -125,7 +125,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
public static final FsPermission umask = new FsPermission(
(short) DEFAULT_UMASK);
private static final Log LOG = LogFactory.getLog(RpcProgramNfs3.class);
static final Log LOG = LogFactory.getLog(RpcProgramNfs3.class);
private static final int MAX_READ_TRANSFER_SIZE = 64 * 1024;
private static final int MAX_WRITE_TRANSFER_SIZE = 64 * 1024;
private static final int MAX_READDIR_TRANSFER_SIZE = 64 * 1024;
@ -1814,9 +1814,19 @@ public XDR handleInternal(RpcCall rpcCall, final XDR xdr, XDR out,
} else if (nfsproc3 == NFSPROC3.READLINK) {
response = readlink(xdr, securityHandler, client);
} else if (nfsproc3 == NFSPROC3.READ) {
if (LOG.isDebugEnabled()) {
LOG.debug(Nfs3Utils.READ_RPC_START + xid);
}
response = read(xdr, securityHandler, client);
if (LOG.isDebugEnabled() && (nfsproc3 == NFSPROC3.READ)) {
LOG.debug(Nfs3Utils.READ_RPC_END + xid);
}
} else if (nfsproc3 == NFSPROC3.WRITE) {
if (LOG.isDebugEnabled()) {
LOG.debug(Nfs3Utils.WRITE_RPC_START + xid);
}
response = write(xdr, channel, xid, securityHandler, client);
// Write end debug trace is in Nfs3Utils.writeChannel
} else if (nfsproc3 == NFSPROC3.CREATE) {
response = create(xdr, securityHandler, client);
} else if (nfsproc3 == NFSPROC3.MKDIR) {
@ -1853,6 +1863,7 @@ public XDR handleInternal(RpcCall rpcCall, final XDR xdr, XDR out,
if (response != null) {
out = response.send(out, xid);
}
return out;
}

View File

@ -118,7 +118,7 @@ void handleWrite(DFSClient dfsClient, WRITE3Request request, Channel channel,
byte[] data = request.getData().array();
if (data.length < count) {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
return;
}
@ -155,7 +155,7 @@ void handleWrite(DFSClient dfsClient, WRITE3Request request, Channel channel,
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
fileWcc, count, request.getStableHow(),
Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
return;
}
@ -182,10 +182,10 @@ void handleWrite(DFSClient dfsClient, WRITE3Request request, Channel channel,
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, request.getStableHow(),
Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
} else {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid));
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
}
}

View File

@ -174,11 +174,11 @@ public static void main(String[] args) throws InterruptedException {
XDR writeReq;
writeReq = write(handle, 0x8000005c, 2000, 1000, data3);
Nfs3Utils.writeChannel(channel, writeReq);
Nfs3Utils.writeChannel(channel, writeReq, 1);
writeReq = write(handle, 0x8000005d, 1000, 1000, data2);
Nfs3Utils.writeChannel(channel, writeReq);
Nfs3Utils.writeChannel(channel, writeReq, 2);
writeReq = write(handle, 0x8000005e, 0, 1000, data1);
Nfs3Utils.writeChannel(channel, writeReq);
Nfs3Utils.writeChannel(channel, writeReq, 3);
// TODO: convert to Junit test, and validate result automatically
}

View File

@ -120,6 +120,8 @@ Release 2.1.1-beta - 2013-09-23
HDFS-5067 Support symlink operations in NFS gateway. (brandonli)
HDFS-5199 Add more debug trace for NFS READ and WRITE. (brandonli)
IMPROVEMENTS
HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may