From ec9ec0084eccdd45a8c3e37ef8121fb8bd44ecd0 Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Sun, 10 Nov 2013 04:07:39 +0000 Subject: [PATCH] HDFS-5469. Add configuration property for the sub-directroy export path. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1540443 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/mount/MountEntry.java | 10 +-- .../apache/hadoop/mount/MountResponse.java | 4 +- .../org/apache/hadoop/mount/MountdBase.java | 13 ++-- .../org/apache/hadoop/nfs/nfs3/Nfs3Base.java | 12 ++-- .../apache/hadoop/nfs/nfs3/Nfs3Constant.java | 6 +- .../org/apache/hadoop/oncrpc/RpcProgram.java | 22 +++---- .../apache/hadoop/oncrpc/SimpleTcpServer.java | 15 ++++- .../apache/hadoop/oncrpc/SimpleUdpServer.java | 15 ++++- .../org/apache/hadoop/portmap/Portmap.java | 4 +- .../apache/hadoop/hdfs/nfs/mount/Mountd.java | 19 ++---- .../hdfs/nfs/mount/RpcProgramMountd.java | 26 ++++---- .../hdfs/nfs/nfs3/AsyncDataService.java | 2 + .../hadoop/hdfs/nfs/nfs3/DFSClientCache.java | 1 + .../org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java | 31 ++++----- .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 7 +- .../apache/hadoop/hdfs/nfs/TestMountd.java | 12 ++-- .../hadoop/hdfs/nfs/TestOutOfOrderWrite.java | 5 +- .../apache/hadoop/hdfs/nfs/TestReaddir.java | 11 ++-- .../hdfs/nfs/nfs3/TestExportsTable.java | 65 +++++++++++++++++++ .../hadoop/hdfs/nfs/nfs3/TestWrites.java | 13 ++-- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + 21 files changed, 185 insertions(+), 111 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountEntry.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountEntry.java index ab75ef974e7..d73122b59d7 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountEntry.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountEntry.java @@ -21,9 +21,9 @@ package org.apache.hadoop.mount; * Represents a mount entry. */ public class MountEntry { - /** Host correspoinding to the mount entry */ + /** Host corresponding to the mount entry */ private final String host; - /** Path correspoinding to the mount entry */ + /** Path corresponding to the mount entry */ private final String path; public MountEntry(String host, String path) { @@ -31,11 +31,11 @@ public class MountEntry { this.path = path; } - public String host() { + public String getHost() { return this.host; } - public String path() { + public String getPath() { return this.path; } @@ -49,7 +49,7 @@ public class MountEntry { } MountEntry m = (MountEntry) o; - return host().equals(m.host()) && path().equals(m.path()); + return getHost().equals(m.getHost()) && getPath().equals(m.getPath()); } @Override diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java index 88b023c6819..5e101c1de89 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java @@ -54,8 +54,8 @@ public class MountResponse { RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr); for (MountEntry mountEntry : mounts) { xdr.writeBoolean(true); // Value follows yes - xdr.writeString(mountEntry.host()); - xdr.writeString(mountEntry.path()); + xdr.writeString(mountEntry.getHost()); + xdr.writeString(mountEntry.getPath()); } xdr.writeBoolean(false); // Value follows no return xdr; diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java index 3878cbc74a2..1203e893ef8 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java @@ -18,7 +18,6 @@ package org.apache.hadoop.mount; import java.io.IOException; -import java.util.List; import org.apache.hadoop.oncrpc.RpcProgram; import org.apache.hadoop.oncrpc.SimpleTcpServer; @@ -34,6 +33,8 @@ import org.apache.hadoop.portmap.PortmapMapping; */ abstract public class MountdBase { private final RpcProgram rpcProgram; + private int udpBoundPort; // Will set after server starts + private int tcpBoundPort; // Will set after server starts public RpcProgram getRpcProgram() { return rpcProgram; @@ -41,10 +42,10 @@ abstract public class MountdBase { /** * Constructor - * @param exports + * @param program * @throws IOException */ - public MountdBase(List exports, RpcProgram program) throws IOException { + public MountdBase(RpcProgram program) throws IOException { rpcProgram = program; } @@ -54,6 +55,7 @@ abstract public class MountdBase { rpcProgram, 1); rpcProgram.startDaemons(); udpServer.run(); + udpBoundPort = udpServer.getBoundPort(); } /* Start TCP server */ @@ -62,14 +64,15 @@ abstract public class MountdBase { rpcProgram, 1); rpcProgram.startDaemons(); tcpServer.run(); + tcpBoundPort = tcpServer.getBoundPort(); } public void start(boolean register) { startUDPServer(); startTCPServer(); if (register) { - rpcProgram.register(PortmapMapping.TRANSPORT_UDP); - rpcProgram.register(PortmapMapping.TRANSPORT_TCP); + rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort); + rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort); } } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java index 0089bb0f3e9..2b6943aada9 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java @@ -33,6 +33,7 @@ public abstract class Nfs3Base { public static final Log LOG = LogFactory.getLog(Nfs3Base.class); private final RpcProgram rpcProgram; private final int nfsPort; + private int nfsBoundPort; // Will set after server starts public RpcProgram getRpcProgram() { return rpcProgram; @@ -40,20 +41,16 @@ public abstract class Nfs3Base { protected Nfs3Base(RpcProgram rpcProgram, Configuration conf) { this.rpcProgram = rpcProgram; - this.nfsPort = conf.getInt("nfs3.server.port", Nfs3Constant.PORT); + this.nfsPort = conf.getInt(Nfs3Constant.NFS3_SERVER_PORT, + Nfs3Constant.NFS3_SERVER_PORT_DEFAULT); LOG.info("NFS server port set to: " + nfsPort); } - protected Nfs3Base(RpcProgram rpcProgram) { - this.rpcProgram = rpcProgram; - this.nfsPort = Nfs3Constant.PORT; - } - public void start(boolean register) { startTCPServer(); // Start TCP server if (register) { - rpcProgram.register(PortmapMapping.TRANSPORT_TCP); + rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort); } } @@ -62,5 +59,6 @@ public abstract class Nfs3Base { rpcProgram, 0); rpcProgram.startDaemons(); tcpServer.run(); + nfsBoundPort = tcpServer.getBoundPort(); } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java index d7a0b037355..b49aef462cd 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java @@ -26,7 +26,8 @@ public class Nfs3Constant { public final static int SUN_RPCBIND = 111; // The IP port number for NFS. - public final static int PORT = 2049; + public final static String NFS3_SERVER_PORT = "nfs3.server.port"; + public final static int NFS3_SERVER_PORT_DEFAULT = 2049; // The RPC program number for NFS. public final static int PROGRAM = 100003; @@ -213,4 +214,7 @@ public class Nfs3Constant { public final static String UNKNOWN_USER = "nobody"; public final static String UNKNOWN_GROUP = "nobody"; + + public final static String EXPORT_POINT = "dfs.nfs3.export.point"; + public final static String EXPORT_POINT_DEFAULT = "/"; } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java index a8024172988..69adcedc7b5 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java @@ -40,7 +40,7 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler { public static final int RPCB_PORT = 111; private final String program; private final String host; - private final int port; + private int port; // Ephemeral port is chosen later private final int progNumber; private final int lowProgVersion; private final int highProgVersion; @@ -68,22 +68,20 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler { /** * Register this program with the local portmapper. */ - public void register(int transport) { + public void register(int transport, int boundPort) { + if (boundPort != port) { + LOG.info("The bound port is " + boundPort + + ", different with configured port " + port); + port = boundPort; + } // Register all the program versions with portmapper for a given transport for (int vers = lowProgVersion; vers <= highProgVersion; vers++) { - register(vers, transport); + PortmapMapping mapEntry = new PortmapMapping(progNumber, vers, transport, + port); + register(mapEntry); } } - /** - * Register this program with the local portmapper. - */ - private void register(int progVersion, int transport) { - PortmapMapping mapEntry = new PortmapMapping(progNumber, progVersion, - transport, port); - register(mapEntry); - } - /** * Register the program with Portmap or Rpcbind */ diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java index 57ef77a95fe..949fdca5dc3 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java @@ -23,6 +23,7 @@ import java.util.concurrent.Executors; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.jboss.netty.bootstrap.ServerBootstrap; +import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelFactory; import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.ChannelPipelineFactory; @@ -36,6 +37,7 @@ import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory; public class SimpleTcpServer { public static final Log LOG = LogFactory.getLog(SimpleTcpServer.class); protected final int port; + protected int boundPort = -1; // Will be set after server starts protected final SimpleChannelUpstreamHandler rpcProgram; /** The maximum number of I/O worker threads */ @@ -79,9 +81,16 @@ public class SimpleTcpServer { bootstrap.setOption("child.keepAlive", true); // Listen to TCP port - bootstrap.bind(new InetSocketAddress(port)); - - LOG.info("Started listening to TCP requests at port " + port + " for " + Channel ch = bootstrap.bind(new InetSocketAddress(port)); + InetSocketAddress socketAddr = (InetSocketAddress) ch.getLocalAddress(); + boundPort = socketAddr.getPort(); + + LOG.info("Started listening to TCP requests at port " + boundPort + " for " + rpcProgram + " with workerCount " + workerCount); } + + // boundPort will be set only after server starts + public int getBoundPort() { + return this.boundPort; + } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java index 438eebc5378..8e77fce36b3 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java @@ -23,6 +23,7 @@ import java.util.concurrent.Executors; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.jboss.netty.bootstrap.ConnectionlessBootstrap; +import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.Channels; import org.jboss.netty.channel.SimpleChannelUpstreamHandler; import org.jboss.netty.channel.socket.DatagramChannelFactory; @@ -39,6 +40,7 @@ public class SimpleUdpServer { protected final int port; protected final SimpleChannelUpstreamHandler rpcProgram; protected final int workerCount; + protected int boundPort = -1; // Will be set after server starts public SimpleUdpServer(int port, SimpleChannelUpstreamHandler program, int workerCount) { this.port = port; @@ -61,9 +63,16 @@ public class SimpleUdpServer { b.setOption("receiveBufferSize", RECEIVE_BUFFER_SIZE); // Listen to the UDP port - b.bind(new InetSocketAddress(port)); - - LOG.info("Started listening to UDP requests at port " + port + " for " + Channel ch = b.bind(new InetSocketAddress(port)); + InetSocketAddress socketAddr = (InetSocketAddress) ch.getLocalAddress(); + boundPort = socketAddr.getPort(); + + LOG.info("Started listening to UDP requests at port " + boundPort + " for " + rpcProgram + " with workerCount " + workerCount); } + + // boundPort will be set only after server starts + public int getBoundPort() { + return this.boundPort; + } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java index 6a3e86c13da..21e99b6a951 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java @@ -31,14 +31,14 @@ public class Portmap { public static final Log LOG = LogFactory.getLog(Portmap.class); private static void startUDPServer(RpcProgramPortmap rpcProgram) { - rpcProgram.register(PortmapMapping.TRANSPORT_UDP); + rpcProgram.register(PortmapMapping.TRANSPORT_UDP, RpcProgram.RPCB_PORT); SimpleUdpServer udpServer = new SimpleUdpServer(RpcProgram.RPCB_PORT, rpcProgram, 1); udpServer.run(); } private static void startTCPServer(final RpcProgramPortmap rpcProgram) { - rpcProgram.register(PortmapMapping.TRANSPORT_TCP); + rpcProgram.register(PortmapMapping.TRANSPORT_TCP, RpcProgram.RPCB_PORT); SimpleTcpServer tcpServer = new SimpleTcpServer(RpcProgram.RPCB_PORT, rpcProgram, 1); tcpServer.run(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java index 9fca31f2587..98ec9f661db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdfs.nfs.mount; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mount.MountdBase; @@ -32,23 +30,14 @@ import org.apache.hadoop.mount.MountdBase; * handle for requested directory and returns it to the client. */ public class Mountd extends MountdBase { - /** - * Constructor - * @param exports - * @throws IOException - */ - public Mountd(List exports) throws IOException { - super(exports, new RpcProgramMountd(exports)); - } - public Mountd(List exports, Configuration config) throws IOException { - super(exports, new RpcProgramMountd(exports, config)); + public Mountd(Configuration config) throws IOException { + super(new RpcProgramMountd(config)); } public static void main(String[] args) throws IOException { - List exports = new ArrayList(); - exports.add("/"); - Mountd mountd = new Mountd(exports); + Configuration config = new Configuration(); + Mountd mountd = new Mountd(config); mountd.start(true); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java index f8ac1dc1e4e..b7e669a698f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java @@ -36,6 +36,7 @@ import org.apache.hadoop.mount.MountResponse; import org.apache.hadoop.nfs.AccessPrivilege; import org.apache.hadoop.nfs.NfsExports; import org.apache.hadoop.nfs.nfs3.FileHandle; +import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.RpcCall; @@ -49,6 +50,8 @@ import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.channel.ChannelHandlerContext; +import com.google.common.annotations.VisibleForTesting; + /** * RPC program corresponding to mountd daemon. See {@link Mountd}. */ @@ -71,23 +74,15 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface { private final NfsExports hostsMatcher; - public RpcProgramMountd() throws IOException { - this(new ArrayList(0)); - } - - public RpcProgramMountd(List exports) throws IOException { - this(exports, new Configuration()); - } - - public RpcProgramMountd(List exports, Configuration config) - throws IOException { + public RpcProgramMountd(Configuration config) throws IOException { // Note that RPC cache is not enabled super("mountd", "localhost", config.getInt("nfs3.mountd.port", PORT), PROGRAM, VERSION_1, VERSION_3); - + exports = new ArrayList(); + exports.add(config.get(Nfs3Constant.EXPORT_POINT, + Nfs3Constant.EXPORT_POINT_DEFAULT)); this.hostsMatcher = NfsExports.getInstance(config); this.mounts = Collections.synchronizedList(new ArrayList()); - this.exports = Collections.unmodifiableList(exports); this.dfsClient = new DFSClient(NameNode.getAddress(config), config); } @@ -200,7 +195,7 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface { } else if (mntproc == MNTPROC.UMNTALL) { umntall(out, xid, client); } else if (mntproc == MNTPROC.EXPORT) { - // Currently only support one NFS export "/" + // Currently only support one NFS export List hostsMatchers = new ArrayList(); hostsMatchers.add(hostsMatcher); out = MountResponse.writeExportList(out, xid, exports, hostsMatchers); @@ -220,4 +215,9 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface { // Not required, because cache is turned off return false; } + + @VisibleForTesting + public List getExports() { + return this.exports; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java index 420ab8825f0..08fd77c34d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java @@ -46,6 +46,7 @@ public class AsyncDataService { public AsyncDataService() { threadFactory = new ThreadFactory() { + @Override public Thread newThread(Runnable r) { return new Thread(threadGroup, r); } @@ -129,6 +130,7 @@ public class AsyncDataService { + openFileCtx.getNextOffset(); } + @Override public void run() { try { openFileCtx.executeWriteBack(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java index c7265ea2dd7..8ec99de4860 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java @@ -118,6 +118,7 @@ class DFSClientCache { // Guava requires CacheLoader never returns null. return ugi.doAs(new PrivilegedExceptionAction() { + @Override public DFSClient run() throws IOException { return new DFSClient(NameNode.getAddress(config), config); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java index b81504bddc5..241090528f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java @@ -18,12 +18,9 @@ package org.apache.hadoop.hdfs.nfs.nfs3; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.nfs.mount.Mountd; -import org.apache.hadoop.mount.MountdBase; import org.apache.hadoop.nfs.nfs3.Nfs3Base; import org.apache.hadoop.util.StringUtils; @@ -42,28 +39,24 @@ public class Nfs3 extends Nfs3Base { Configuration.addDefaultResource("hdfs-site.xml"); } - public Nfs3(List exports) throws IOException { - super(new RpcProgramNfs3()); - mountd = new Mountd(exports); - } - - @VisibleForTesting - public Nfs3(List exports, Configuration config) throws IOException { - super(new RpcProgramNfs3(config), config); - mountd = new Mountd(exports, config); + public Nfs3(Configuration conf) throws IOException { + super(new RpcProgramNfs3(conf), conf); + mountd = new Mountd(conf); } public Mountd getMountd() { return mountd; } + @VisibleForTesting + public void startServiceInternal(boolean register) throws IOException { + mountd.start(register); // Start mountd + start(register); + } + public static void main(String[] args) throws IOException { - StringUtils.startupShutdownMessage(Nfs3.class, args, LOG); - List exports = new ArrayList(); - exports.add("/"); - - final Nfs3 nfsServer = new Nfs3(exports); - nfsServer.mountd.start(true); // Start mountd - nfsServer.start(true); + StringUtils.startupShutdownMessage(Nfs3.class, args, LOG); + final Nfs3 nfsServer = new Nfs3(new Configuration()); + nfsServer.startServiceInternal(true); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index 59412382573..91ce8ef24dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -163,12 +163,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { private final RpcCallCache rpcCallCache; - public RpcProgramNfs3() throws IOException { - this(new Configuration()); - } - public RpcProgramNfs3(Configuration config) throws IOException { - super("NFS3", "localhost", Nfs3Constant.PORT, Nfs3Constant.PROGRAM, + super("NFS3", "localhost", config.getInt(Nfs3Constant.NFS3_SERVER_PORT, + Nfs3Constant.NFS3_SERVER_PORT_DEFAULT), Nfs3Constant.PROGRAM, Nfs3Constant.VERSION, Nfs3Constant.VERSION); config.set(FsPermission.UMASK_LABEL, "000"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java index 977902a6845..1a31998f358 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java @@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.nfs; import java.io.IOException; import java.net.InetAddress; -import java.util.ArrayList; -import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -45,11 +43,13 @@ public class TestMountd { .build(); cluster.waitActive(); + // Use emphral port in case tests are running in parallel + config.setInt("nfs3.mountd.port", 0); + config.setInt("nfs3.server.port", 0); + // Start nfs - List exports = new ArrayList(); - exports.add("/"); - Nfs3 nfs3 = new Nfs3(exports, config); - nfs3.start(false); + Nfs3 nfs3 = new Nfs3(config); + nfs3.startServiceInternal(false); RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd() .getRpcProgram(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java index 1f0ba43f39b..578539886d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java @@ -23,6 +23,7 @@ import java.util.Arrays; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3Utils; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; @@ -154,7 +155,9 @@ public class TestOutOfOrderWrite { Arrays.fill(data3, (byte) 9); // NFS3 Create request - WriteClient client = new WriteClient("localhost", Nfs3Constant.PORT, + Configuration conf = new Configuration(); + WriteClient client = new WriteClient("localhost", conf.getInt( + Nfs3Constant.NFS3_SERVER_PORT, Nfs3Constant.NFS3_SERVER_PORT_DEFAULT), create(), false); client.run(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java index 8fdf4bb7f26..e1493b60ea2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java @@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.net.InetAddress; -import java.util.ArrayList; import java.util.List; import org.apache.hadoop.conf.Configuration; @@ -67,11 +66,13 @@ public class TestReaddir { hdfs = cluster.getFileSystem(); nn = cluster.getNameNode(); + // Use emphral port in case tests are running in parallel + config.setInt("nfs3.mountd.port", 0); + config.setInt("nfs3.server.port", 0); + // Start nfs - List exports = new ArrayList(); - exports.add("/"); - Nfs3 nfs3 = new Nfs3(exports, config); - nfs3.start(false); + Nfs3 nfs3 = new Nfs3(config); + nfs3.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs3.getRpcProgram(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java new file mode 100644 index 00000000000..5180d3ad38a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.nfs.nfs3; + +import static org.junit.Assert.assertTrue; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.nfs.mount.Mountd; +import org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd; +import org.apache.hadoop.nfs.nfs3.Nfs3Constant; +import org.junit.Test; + +public class TestExportsTable { + + @Test + public void testExportPoint() throws IOException { + Configuration config = new Configuration(); + MiniDFSCluster cluster = null; + + String exportPoint = "/myexport1"; + config.setStrings(Nfs3Constant.EXPORT_POINT, exportPoint); + // Use emphral port in case tests are running in parallel + config.setInt("nfs3.mountd.port", 0); + config.setInt("nfs3.server.port", 0); + + try { + cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); + cluster.waitActive(); + + // Start nfs + final Nfs3 nfsServer = new Nfs3(config); + nfsServer.startServiceInternal(false); + + Mountd mountd = nfsServer.getMountd(); + RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram(); + assertTrue(rpcMount.getExports().size() == 1); + + String exportInMountd = rpcMount.getExports().get(0); + assertTrue(exportInMountd.equals(exportPoint)); + + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java index e721db69c16..c4451364564 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java @@ -23,10 +23,7 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.net.InetAddress; import java.nio.ByteBuffer; -import java.util.ArrayList; import java.util.Arrays; -import java.util.List; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentNavigableMap; import junit.framework.Assert; @@ -215,11 +212,13 @@ public class TestWrites { cluster.waitActive(); client = new DFSClient(NameNode.getAddress(config), config); + // Use emphral port in case tests are running in parallel + config.setInt("nfs3.mountd.port", 0); + config.setInt("nfs3.server.port", 0); + // Start nfs - List exports = new ArrayList(); - exports.add("/"); - Nfs3 nfs3 = new Nfs3(exports, config); - nfs3.start(false); + Nfs3 nfs3 = new Nfs3(config); + nfs3.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs3.getRpcProgram(); HdfsFileStatus status = client.getFileInfo("/"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2d77109cdff..1918cac2e2a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -613,6 +613,9 @@ Release 2.2.1 - UNRELEASED HDFS-5364. Add OpenFileCtx cache. (brandonli) + HDFS-5469. Add configuration property for the sub-directroy export path + (brandonli) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES