HDFS-6378. Merging change r1610545 from branch-2

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2.5@1610546 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Brandon Li 2014-07-14 22:01:13 +00:00
parent a08f3d0b76
commit b8391fc397
5 changed files with 32 additions and 11 deletions

View File

@ -19,12 +19,16 @@ package org.apache.hadoop.mount;
import java.io.IOException; import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcProgram; import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.SimpleTcpServer; import org.apache.hadoop.oncrpc.SimpleTcpServer;
import org.apache.hadoop.oncrpc.SimpleUdpServer; import org.apache.hadoop.oncrpc.SimpleUdpServer;
import org.apache.hadoop.portmap.PortmapMapping; import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.ShutdownHookManager;
import static org.apache.hadoop.util.ExitUtil.terminate;
/** /**
* Main class for starting mountd daemon. This daemon implements the NFS * Main class for starting mountd daemon. This daemon implements the NFS
* mount protocol. When receiving a MOUNT request from an NFS client, it checks * mount protocol. When receiving a MOUNT request from an NFS client, it checks
@ -33,6 +37,7 @@ import org.apache.hadoop.util.ShutdownHookManager;
* handle for requested directory and returns it to the client. * handle for requested directory and returns it to the client.
*/ */
abstract public class MountdBase { abstract public class MountdBase {
public static final Log LOG = LogFactory.getLog(MountdBase.class);
private final RpcProgram rpcProgram; private final RpcProgram rpcProgram;
private int udpBoundPort; // Will set after server starts private int udpBoundPort; // Will set after server starts
private int tcpBoundPort; // Will set after server starts private int tcpBoundPort; // Will set after server starts
@ -40,11 +45,11 @@ abstract public class MountdBase {
public RpcProgram getRpcProgram() { public RpcProgram getRpcProgram() {
return rpcProgram; return rpcProgram;
} }
/** /**
* Constructor * Constructor
* @param program * @param program
* @throws IOException * @throws IOException
*/ */
public MountdBase(RpcProgram program) throws IOException { public MountdBase(RpcProgram program) throws IOException {
rpcProgram = program; rpcProgram = program;
@ -74,11 +79,16 @@ abstract public class MountdBase {
if (register) { if (register) {
ShutdownHookManager.get().addShutdownHook(new Unregister(), ShutdownHookManager.get().addShutdownHook(new Unregister(),
SHUTDOWN_HOOK_PRIORITY); SHUTDOWN_HOOK_PRIORITY);
rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort); try {
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort); rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
} catch (Throwable e) {
LOG.fatal("Failed to start the server. Cause:", e);
terminate(1, e);
}
} }
} }
/** /**
* Priority of the mountd shutdown hook. * Priority of the mountd shutdown hook.
*/ */
@ -91,5 +101,5 @@ abstract public class MountdBase {
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, tcpBoundPort); rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
} }
} }
} }

View File

@ -25,6 +25,8 @@ import org.apache.hadoop.oncrpc.SimpleTcpServer;
import org.apache.hadoop.portmap.PortmapMapping; import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.ShutdownHookManager;
import static org.apache.hadoop.util.ExitUtil.terminate;
/** /**
* Nfs server. Supports NFS v3 using {@link RpcProgram}. * Nfs server. Supports NFS v3 using {@link RpcProgram}.
* Currently Mountd program is also started inside this class. * Currently Mountd program is also started inside this class.
@ -34,7 +36,7 @@ public abstract class Nfs3Base {
public static final Log LOG = LogFactory.getLog(Nfs3Base.class); public static final Log LOG = LogFactory.getLog(Nfs3Base.class);
private final RpcProgram rpcProgram; private final RpcProgram rpcProgram;
private int nfsBoundPort; // Will set after server starts private int nfsBoundPort; // Will set after server starts
public RpcProgram getRpcProgram() { public RpcProgram getRpcProgram() {
return rpcProgram; return rpcProgram;
} }
@ -46,11 +48,16 @@ public abstract class Nfs3Base {
public void start(boolean register) { public void start(boolean register) {
startTCPServer(); // Start TCP server startTCPServer(); // Start TCP server
if (register) { if (register) {
ShutdownHookManager.get().addShutdownHook(new Unregister(), ShutdownHookManager.get().addShutdownHook(new Unregister(),
SHUTDOWN_HOOK_PRIORITY); SHUTDOWN_HOOK_PRIORITY);
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort); try {
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
} catch (Throwable e) {
LOG.fatal("Failed to start the server. Cause:", e);
terminate(1, e);
}
} }
} }
@ -61,7 +68,7 @@ public abstract class Nfs3Base {
tcpServer.run(); tcpServer.run();
nfsBoundPort = tcpServer.getBoundPort(); nfsBoundPort = tcpServer.getBoundPort();
} }
/** /**
* Priority of the nfsd shutdown hook. * Priority of the nfsd shutdown hook.
*/ */

View File

@ -131,7 +131,7 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
} catch (IOException e) { } catch (IOException e) {
String request = set ? "Registration" : "Unregistration"; String request = set ? "Registration" : "Unregistration";
LOG.error(request + " failure with " + host + ":" + port LOG.error(request + " failure with " + host + ":" + port
+ ", portmap entry: " + mapEntry, e); + ", portmap entry: " + mapEntry);
throw new RuntimeException(request + " failure", e); throw new RuntimeException(request + " failure", e);
} }
} }

View File

@ -60,6 +60,7 @@ public class SimpleUdpClient {
DatagramPacket sendPacket = new DatagramPacket(sendData, sendData.length, DatagramPacket sendPacket = new DatagramPacket(sendData, sendData.length,
IPAddress, port); IPAddress, port);
socket.send(sendPacket); socket.send(sendPacket);
socket.setSoTimeout(500);
DatagramPacket receivePacket = new DatagramPacket(receiveData, DatagramPacket receivePacket = new DatagramPacket(receiveData,
receiveData.length); receiveData.length);
socket.receive(receivePacket); socket.receive(receivePacket);

View File

@ -524,6 +524,9 @@ Release 2.5.0 - UNRELEASED
HDFS-6647. Edit log corruption when pipeline recovery occurs for deleted HDFS-6647. Edit log corruption when pipeline recovery occurs for deleted
file present in snapshot (kihwal) file present in snapshot (kihwal)
HDFS-6378. NFS registration should timeout instead of hanging when
portmap/rpcbind is not available (Abhiraj Butala via brandonli)
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)