diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml
index c0b4e14177c..baabb0fdc6e 100644
--- a/hadoop-common-project/hadoop-nfs/pom.xml
+++ b/hadoop-common-project/hadoop-nfs/pom.xml
@@ -90,7 +90,7 @@
io.netty
- netty
+ netty-all
compile
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
index 0ff3084bf3e..58d3e51f2bd 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
@@ -41,6 +41,8 @@ abstract public class MountdBase {
private final RpcProgram rpcProgram;
private int udpBoundPort; // Will set after server starts
private int tcpBoundPort; // Will set after server starts
+ private SimpleUdpServer udpServer = null;
+ private SimpleTcpServer tcpServer = null;
public RpcProgram getRpcProgram() {
return rpcProgram;
@@ -57,7 +59,7 @@ abstract public class MountdBase {
/* Start UDP server */
private void startUDPServer() {
- SimpleUdpServer udpServer = new SimpleUdpServer(rpcProgram.getPort(),
+ udpServer = new SimpleUdpServer(rpcProgram.getPort(),
rpcProgram, 1);
rpcProgram.startDaemons();
try {
@@ -76,7 +78,7 @@ abstract public class MountdBase {
/* Start TCP server */
private void startTCPServer() {
- SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
+ tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
rpcProgram, 1);
rpcProgram.startDaemons();
try {
@@ -118,6 +120,14 @@ abstract public class MountdBase {
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
tcpBoundPort = 0;
}
+ if (udpServer != null) {
+ udpServer.shutdown();
+ udpServer = null;
+ }
+ if (tcpServer != null) {
+ tcpServer.shutdown();
+ tcpServer = null;
+ }
}
/**
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
index ff83a5f19be..e6ea29b42bf 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
@@ -35,6 +35,7 @@ public abstract class Nfs3Base {
public static final Logger LOG = LoggerFactory.getLogger(Nfs3Base.class);
private final RpcProgram rpcProgram;
private int nfsBoundPort; // Will set after server starts
+ private SimpleTcpServer tcpServer = null;
public RpcProgram getRpcProgram() {
return rpcProgram;
@@ -61,7 +62,7 @@ public abstract class Nfs3Base {
}
private void startTCPServer() {
- SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
+ tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
rpcProgram, 0);
rpcProgram.startDaemons();
try {
@@ -84,6 +85,10 @@ public abstract class Nfs3Base {
nfsBoundPort = 0;
}
rpcProgram.stopDaemons();
+ if (tcpServer != null) {
+ tcpServer.shutdown();
+ tcpServer = null;
+ }
}
/**
* Priority of the nfsd shutdown hook.
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RegistrationClient.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RegistrationClient.java
index c8528ba4d55..c96f1d53bb4 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RegistrationClient.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RegistrationClient.java
@@ -19,10 +19,9 @@ package org.apache.hadoop.oncrpc;
import java.util.Arrays;
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.ChannelHandlerContext;
import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.channel.ChannelHandlerContext;
-import org.jboss.netty.channel.MessageEvent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -58,10 +57,10 @@ public class RegistrationClient extends SimpleTcpClient {
}
@Override
- public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
- ChannelBuffer buf = (ChannelBuffer) e.getMessage(); // Read reply
+ public void channelRead(ChannelHandlerContext ctx, Object msg) {
+ ByteBuf buf = (ByteBuf) msg; // Read reply
if (!validMessageLength(buf.readableBytes())) {
- e.getChannel().close();
+ ctx.channel().close();
return;
}
@@ -83,7 +82,7 @@ public class RegistrationClient extends SimpleTcpClient {
RpcDeniedReply deniedReply = (RpcDeniedReply) reply;
handle(deniedReply);
}
- e.getChannel().close(); // shutdown now that request is complete
+ ctx.channel().close(); // shutdown now that request is complete
}
private void handle(RpcDeniedReply deniedReply) {
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcInfo.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcInfo.java
index b434d79285c..aba8e9ea262 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcInfo.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcInfo.java
@@ -19,9 +19,9 @@ package org.apache.hadoop.oncrpc;
import java.net.SocketAddress;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.channel.Channel;
-import org.jboss.netty.channel.ChannelHandlerContext;
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandlerContext;
/**
* RpcInfo records all contextual information of an RPC message. It contains
@@ -29,11 +29,11 @@ import org.jboss.netty.channel.ChannelHandlerContext;
*/
public final class RpcInfo {
private final RpcMessage header;
- private final ChannelBuffer data;
+ private final ByteBuf data;
private final Channel channel;
private final SocketAddress remoteAddress;
- public RpcInfo(RpcMessage header, ChannelBuffer data,
+ public RpcInfo(RpcMessage header, ByteBuf data,
ChannelHandlerContext channelContext, Channel channel,
SocketAddress remoteAddress) {
this.header = header;
@@ -46,7 +46,7 @@ public final class RpcInfo {
return header;
}
- public ChannelBuffer data() {
+ public ByteBuf data() {
return data;
}
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
index bafb49716b6..8b8d558255f 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
@@ -22,16 +22,15 @@ import java.net.DatagramSocket;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelInboundHandlerAdapter;
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.portmap.PortmapRequest;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBuffers;
-import org.jboss.netty.channel.ChannelHandlerContext;
-import org.jboss.netty.channel.MessageEvent;
-import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -39,7 +38,7 @@ import org.slf4j.LoggerFactory;
* Class for writing RPC server programs based on RFC 1050. Extend this class
* and implement {@link #handleInternal} to handle the requests received.
*/
-public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
+public abstract class RpcProgram extends ChannelInboundHandlerAdapter {
static final Logger LOG = LoggerFactory.getLogger(RpcProgram.class);
public static final int RPCB_PORT = 111;
private final String program;
@@ -161,9 +160,9 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
public void stopDaemons() {}
@Override
- public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
+ public void channelRead(ChannelHandlerContext ctx, Object msg)
throws Exception {
- RpcInfo info = (RpcInfo) e.getMessage();
+ RpcInfo info = (RpcInfo) msg;
RpcCall call = (RpcCall) info.header();
SocketAddress remoteAddress = info.remoteAddress();
@@ -221,7 +220,7 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
out.writeInt(lowProgVersion);
out.writeInt(highProgVersion);
}
- ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
+ ByteBuf b = Unpooled.wrappedBuffer(out.asReadOnlyWrap()
.buffer());
RpcResponse rsp = new RpcResponse(b, remoteAddress);
RpcUtil.sendRpcResponse(ctx, rsp);
@@ -234,7 +233,7 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
RpcReply.ReplyState.MSG_DENIED,
RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone());
reply.write(out);
- ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
+ ByteBuf buf = Unpooled.wrappedBuffer(out.asReadOnlyWrap()
.buffer());
RpcResponse rsp = new RpcResponse(buf, remoteAddress);
RpcUtil.sendRpcResponse(ctx, rsp);
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcResponse.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcResponse.java
index 2e45e6100b1..0d6431f68bd 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcResponse.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcResponse.java
@@ -19,27 +19,30 @@ package org.apache.hadoop.oncrpc;
import java.net.SocketAddress;
-import org.jboss.netty.buffer.ChannelBuffer;
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.DefaultAddressedEnvelope;
/**
* RpcResponse encapsulates a response to a RPC request. It contains the data
* that is going to cross the wire, as well as the information of the remote
* peer.
*/
-public class RpcResponse {
- private final ChannelBuffer data;
- private final SocketAddress remoteAddress;
-
- public RpcResponse(ChannelBuffer data, SocketAddress remoteAddress) {
- this.data = data;
- this.remoteAddress = remoteAddress;
+public class RpcResponse extends
+ DefaultAddressedEnvelope {
+ public RpcResponse(ByteBuf message, SocketAddress recipient) {
+ super(message, recipient, null);
}
- public ChannelBuffer data() {
- return data;
+ public RpcResponse(ByteBuf message, SocketAddress recipient,
+ SocketAddress sender) {
+ super(message, recipient, sender);
+ }
+
+ public ByteBuf data() {
+ return this.content();
}
public SocketAddress remoteAddress() {
- return remoteAddress;
+ return this.recipient();
}
}
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
index cebebd27d0c..e8bc27d687f 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
@@ -17,16 +17,18 @@
*/
package org.apache.hadoop.oncrpc;
+import java.net.SocketAddress;
import java.nio.ByteBuffer;
+import java.util.List;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBuffers;
-import org.jboss.netty.channel.Channel;
-import org.jboss.netty.channel.ChannelHandlerContext;
-import org.jboss.netty.channel.Channels;
-import org.jboss.netty.channel.MessageEvent;
-import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
-import org.jboss.netty.handler.codec.frame.FrameDecoder;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelInboundHandlerAdapter;
+import io.netty.channel.socket.DatagramPacket;
+import io.netty.handler.codec.ByteToMessageDecoder;
+import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -43,16 +45,16 @@ public final class RpcUtil {
public static void sendRpcResponse(ChannelHandlerContext ctx,
RpcResponse response) {
- Channels.fireMessageReceived(ctx, response);
+ ctx.fireChannelRead(response);
}
- public static FrameDecoder constructRpcFrameDecoder() {
+ public static ByteToMessageDecoder constructRpcFrameDecoder() {
return new RpcFrameDecoder();
}
- public static final SimpleChannelUpstreamHandler STAGE_RPC_MESSAGE_PARSER = new RpcMessageParserStage();
- public static final SimpleChannelUpstreamHandler STAGE_RPC_TCP_RESPONSE = new RpcTcpResponseStage();
- public static final SimpleChannelUpstreamHandler STAGE_RPC_UDP_RESPONSE = new RpcUdpResponseStage();
+ public static final ChannelInboundHandlerAdapter STAGE_RPC_MESSAGE_PARSER = new RpcMessageParserStage();
+ public static final ChannelInboundHandlerAdapter STAGE_RPC_TCP_RESPONSE = new RpcTcpResponseStage();
+ public static final ChannelInboundHandlerAdapter STAGE_RPC_UDP_RESPONSE = new RpcUdpResponseStage();
/**
* An RPC client can separate a RPC message into several frames (i.e.,
@@ -62,44 +64,39 @@ public final class RpcUtil {
* RpcFrameDecoder is a stateful pipeline stage. It has to be constructed for
* each RPC client.
*/
- static class RpcFrameDecoder extends FrameDecoder {
+ static class RpcFrameDecoder extends ByteToMessageDecoder {
public static final Logger LOG =
LoggerFactory.getLogger(RpcFrameDecoder.class);
- private ChannelBuffer currentFrame;
+ private volatile boolean isLast;
@Override
- protected Object decode(ChannelHandlerContext ctx, Channel channel,
- ChannelBuffer buf) {
+ protected void decode(ChannelHandlerContext ctx, ByteBuf buf,
+ List
io.netty
- netty
+ netty-all
compile
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
index 3b0327ad4a1..2ba1bb060ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
@@ -26,6 +26,10 @@ import java.util.Collections;
import java.util.List;
import java.util.HashMap;
+import io.netty.channel.ChannelHandler;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import io.netty.channel.ChannelHandlerContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.FileSystem;
@@ -51,15 +55,13 @@ import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBuffers;
-import org.jboss.netty.channel.ChannelHandlerContext;
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
/**
* RPC program corresponding to mountd daemon. See {@link Mountd}.
*/
+@ChannelHandler.Sharable
public class RpcProgramMountd extends RpcProgram implements MountInterface {
private static final Logger LOG =
LoggerFactory.getLogger(RpcProgramMountd.class);
@@ -262,8 +264,8 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
out);
}
- ChannelBuffer buf =
- ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
+ ByteBuf buf =
+ Unpooled.wrappedBuffer(out.asReadOnlyWrap().buffer());
RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
RpcUtil.sendRpcResponse(ctx, rsp);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
index c6da1981f37..c58dc5976b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
@@ -22,6 +22,8 @@ import java.net.InetSocketAddress;
import java.net.URI;
import java.nio.file.FileSystemException;
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.Channel;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsConstants;
@@ -39,8 +41,6 @@ import org.apache.hadoop.nfs.nfs3.response.WccAttr;
import org.apache.hadoop.nfs.nfs3.response.WccData;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.security.IdMappingServiceProvider;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.channel.Channel;
/**
* Utility/helper methods related to NFS
@@ -147,16 +147,16 @@ public class Nfs3Utils {
if (RpcProgramNfs3.LOG.isDebugEnabled()) {
RpcProgramNfs3.LOG.debug(WRITE_RPC_END + xid);
}
- ChannelBuffer outBuf = XDR.writeMessageTcp(out, true);
- channel.write(outBuf);
+ ByteBuf outBuf = XDR.writeMessageTcp(out, true);
+ channel.writeAndFlush(outBuf);
}
public static void writeChannelCommit(Channel channel, XDR out, int xid) {
if (RpcProgramNfs3.LOG.isDebugEnabled()) {
RpcProgramNfs3.LOG.debug("Commit done:" + xid);
}
- ChannelBuffer outBuf = XDR.writeMessageTcp(out, true);
- channel.write(outBuf);
+ ByteBuf outBuf = XDR.writeMessageTcp(out, true);
+ channel.writeAndFlush(outBuf);
}
private static boolean isSet(int access, int bits) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
index 528ead7a003..8358c056cac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
@@ -31,6 +31,7 @@ import java.util.concurrent.ConcurrentNavigableMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.atomic.AtomicLong;
+import io.netty.channel.Channel;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
@@ -55,7 +56,6 @@ import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.apache.hadoop.security.IdMappingServiceProvider;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.Time;
-import org.jboss.netty.channel.Channel;
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index d436eac598b..f6cb4350e40 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -28,6 +28,11 @@ import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.EnumSet;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelHandlerContext;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
@@ -129,10 +134,6 @@ import org.apache.hadoop.security.ShellBasedIdMapping;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.util.JvmPauseMonitor;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.buffer.ChannelBuffers;
-import org.jboss.netty.channel.Channel;
-import org.jboss.netty.channel.ChannelHandlerContext;
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
@@ -141,6 +142,7 @@ import org.slf4j.LoggerFactory;
/**
* RPC program corresponding to nfs daemon. See {@link Nfs3}.
*/
+@ChannelHandler.Sharable
public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
public static final int DEFAULT_UMASK = 0022;
public static final FsPermission umask = new FsPermission(
@@ -2180,7 +2182,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone());
rdr.write(reply);
- ChannelBuffer buf = ChannelBuffers.wrappedBuffer(reply.asReadOnlyWrap()
+ ByteBuf buf = Unpooled.wrappedBuffer(reply.asReadOnlyWrap()
.buffer());
RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
RpcUtil.sendRpcResponse(ctx, rsp);
@@ -2291,7 +2293,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
// TODO: currently we just return VerifierNone
out = response.serialize(out, xid, new VerifierNone());
- ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
+ ByteBuf buf = Unpooled.wrappedBuffer(out.asReadOnlyWrap()
.buffer());
RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
index 76859247bf2..d5c9d4f5592 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
@@ -22,12 +22,12 @@ import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
+import io.netty.channel.Channel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
-import org.jboss.netty.channel.Channel;
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
index 28893710408..a1b6e12eebf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.nfs.nfs3;
import java.io.IOException;
import java.util.EnumSet;
+import io.netty.channel.Channel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -43,7 +44,6 @@ import org.apache.hadoop.nfs.nfs3.response.WccData;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.apache.hadoop.security.IdMappingServiceProvider;
-import org.jboss.netty.channel.Channel;
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
index 4e53c72bec8..31528a2db87 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
@@ -21,6 +21,12 @@ package org.apache.hadoop.hdfs.nfs;
import java.nio.ByteBuffer;
import java.util.Arrays;
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelInitializer;
+import io.netty.channel.ChannelPipeline;
+import io.netty.channel.socket.SocketChannel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
@@ -42,13 +48,6 @@ import org.apache.hadoop.oncrpc.SimpleTcpClientHandler;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.CredentialsNone;
import org.apache.hadoop.oncrpc.security.VerifierNone;
-import org.jboss.netty.buffer.ChannelBuffer;
-import org.jboss.netty.channel.Channel;
-import org.jboss.netty.channel.ChannelHandlerContext;
-import org.jboss.netty.channel.ChannelPipeline;
-import org.jboss.netty.channel.ChannelPipelineFactory;
-import org.jboss.netty.channel.Channels;
-import org.jboss.netty.channel.MessageEvent;
public class TestOutOfOrderWrite {
public final static Logger LOG =
@@ -100,9 +99,9 @@ public class TestOutOfOrderWrite {
}
@Override
- public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
+ public void channelRead(ChannelHandlerContext ctx, Object msg) {
// Get handle from create response
- ChannelBuffer buf = (ChannelBuffer) e.getMessage();
+ ByteBuf buf = (ByteBuf) msg;
XDR rsp = new XDR(buf.array());
if (rsp.getBytes().length == 0) {
LOG.info("rsp length is zero, why?");
@@ -125,7 +124,7 @@ public class TestOutOfOrderWrite {
rsp.readBoolean(); // value follow
handle = new FileHandle();
handle.deserialize(rsp);
- channel = e.getChannel();
+ channel = ctx.channel();
}
}
@@ -136,16 +135,17 @@ public class TestOutOfOrderWrite {
}
@Override
- protected ChannelPipelineFactory setPipelineFactory() {
- this.pipelineFactory = new ChannelPipelineFactory() {
+ protected ChannelInitializer setChannelHandler() {
+ return new ChannelInitializer() {
@Override
- public ChannelPipeline getPipeline() {
- return Channels.pipeline(
+ protected void initChannel(SocketChannel ch) throws Exception {
+ ChannelPipeline p = ch.pipeline();
+ p.addLast(
RpcUtil.constructRpcFrameDecoder(),
- new WriteHandler(request));
+ new WriteHandler(request)
+ );
}
};
- return this.pipelineFactory;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
index 30ecc0b824b..07954c00d64 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
@@ -28,6 +28,7 @@ import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.util.EnumSet;
+import io.netty.channel.Channel;
import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -92,7 +93,6 @@ import org.apache.hadoop.oncrpc.security.SecurityHandler;
import org.apache.hadoop.security.IdMappingConstant;
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.jboss.netty.channel.Channel;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
index f7a92fac535..0f03c6da93b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
@@ -27,6 +27,7 @@ import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.concurrent.ConcurrentNavigableMap;
+import io.netty.channel.Channel;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -52,7 +53,6 @@ import org.apache.hadoop.oncrpc.security.SecurityHandler;
import org.apache.hadoop.security.ShellBasedIdMapping;
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.jboss.netty.channel.Channel;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;