From cbca1668317f3f2d295eea53d7bd020bda4a810f Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Sat, 31 Aug 2013 21:12:22 +0000 Subject: [PATCH] HDFS-5136 MNT EXPORT should give the full group list which can mount the exports. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1519222 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/mount/MountResponse.java | 24 +++++++++--- .../apache/hadoop/nfs/nfs3/Nfs3Constant.java | 6 +-- .../hadoop}/nfs/security/AccessPrivilege.java | 2 +- .../hadoop}/nfs/security/NfsExports.java | 38 ++++++++++++++++++- .../hadoop}/nfs/security/TestNfsExports.java | 4 +- .../hdfs/nfs/mount/RpcProgramMountd.java | 9 +++-- .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 6 +-- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ 8 files changed, 72 insertions(+), 20 deletions(-) rename {hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs => hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop}/nfs/security/AccessPrivilege.java (95%) rename {hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs => hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop}/nfs/security/NfsExports.java (93%) rename {hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs => hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop}/nfs/security/TestNfsExports.java (97%) diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java index 03cb1ae52bd..3839acc1966 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java @@ -19,6 +19,7 @@ import java.util.List; +import org.apache.hadoop.nfs.security.NfsExports; import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; @@ -59,15 +60,28 @@ public static XDR writeMountList(XDR xdr, int xid, List mounts) { xdr.writeBoolean(false); // Value follows no return xdr; } - + /** Response for RPC call {@link MountInterface.MNTPROC#EXPORT} */ - public static XDR writeExportList(XDR xdr, int xid, List exports) { + public static XDR writeExportList(XDR xdr, int xid, List exports, + List hostMatcher) { + assert (exports.size() == hostMatcher.size()); + RpcAcceptedReply.voidReply(xdr, xid); - for (String export : exports) { + for (int i = 0; i < exports.size(); i++) { xdr.writeBoolean(true); // Value follows - yes - xdr.writeString(export); - xdr.writeInt(0); + xdr.writeString(exports.get(i)); + + // List host groups + String[] hostGroups = hostMatcher.get(i).getHostGroupList(); + if (hostGroups.length > 0) { + for (int j = 0; j < hostGroups.length; j++) { + xdr.writeBoolean(true); // Value follows - yes + xdr.writeVariableOpaque(hostGroups[j].getBytes()); + } + } + xdr.writeBoolean(false); // Value follows - no more group } + xdr.writeBoolean(false); // Value follows - no return xdr; } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java index 8e9a8f10764..706c99f47c4 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java @@ -192,13 +192,13 @@ public static WriteStableHow fromValue(int id) { public static final String EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";"; /** Allowed hosts for nfs exports */ - public static final String EXPORTS_ALLOWED_HOSTS_KEY = "hdfs.nfs.exports.allowed.hosts"; + public static final String EXPORTS_ALLOWED_HOSTS_KEY = "dfs.nfs.exports.allowed.hosts"; public static final String EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw"; /** Size for nfs exports cache */ - public static final String EXPORTS_CACHE_SIZE_KEY = "hdfs.nfs.exports.cache.size"; + public static final String EXPORTS_CACHE_SIZE_KEY = "dfs.nfs.exports.cache.size"; public static final int EXPORTS_CACHE_SIZE_DEFAULT = 512; /** Expiration time for nfs exports cache entry */ - public static final String EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "hdfs.nfs.exports.cache.expirytime.millis"; + public static final String EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "dfs.nfs.exports.cache.expirytime.millis"; public static final long EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT = 15 * 60 * 1000; // 15 min public static final String FILE_DUMP_DIR_KEY = "dfs.nfs3.dump.dir"; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/AccessPrivilege.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/AccessPrivilege.java similarity index 95% rename from hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/AccessPrivilege.java rename to hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/AccessPrivilege.java index 43a0d001f26..8789ecfb4e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/AccessPrivilege.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/AccessPrivilege.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.nfs.security; +package org.apache.hadoop.nfs.security; public enum AccessPrivilege { READ_ONLY, diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/NfsExports.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/NfsExports.java similarity index 93% rename from hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/NfsExports.java rename to hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/NfsExports.java index ad194e9e2d3..301f2f0ff72 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/NfsExports.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/NfsExports.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.nfs.security; +package org.apache.hadoop.nfs.security; import java.net.InetAddress; import java.util.ArrayList; @@ -153,6 +153,19 @@ public long getExpirationTime() { } } + /** + * Return the configured group list + */ + public String[] getHostGroupList() { + int listSize = mMatches.size(); + String[] hostGroups = new String[listSize]; + + for (int i = 0; i < mMatches.size(); i++) { + hostGroups[i] = mMatches.get(i).getHostGroup(); + } + return hostGroups; + } + public AccessPrivilege getAccessPrivilege(InetAddress addr) { return getAccessPrivilege(addr.getHostAddress(), addr.getCanonicalHostName()); @@ -191,6 +204,7 @@ private Match(AccessPrivilege accessPrivilege) { } public abstract boolean isIncluded(String address, String hostname); + public abstract String getHostGroup(); } /** @@ -202,9 +216,14 @@ private AnonymousMatch(AccessPrivilege accessPrivilege) { } @Override - public boolean isIncluded(String ip, String hostname) { + public boolean isIncluded(String address, String hostname) { return true; } + + @Override + public String getHostGroup() { + return "*"; + } } /** @@ -235,6 +254,11 @@ public boolean isIncluded(String address, String hostname) { } return false; } + + @Override + public String getHostGroup() { + return subnetInfo.getAddress() + "/" + subnetInfo.getNetmask(); + } } /** @@ -264,6 +288,11 @@ public boolean isIncluded(String address, String hostname) { } return false; } + + @Override + public String getHostGroup() { + return ipOrHost; + } } /** @@ -293,6 +322,11 @@ public boolean isIncluded(String address, String hostname) { } return false; } + + @Override + public String getHostGroup() { + return pattern.toString(); + } } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/security/TestNfsExports.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/security/TestNfsExports.java similarity index 97% rename from hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/security/TestNfsExports.java rename to hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/security/TestNfsExports.java index 9448e18632e..dbadd8ba339 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/security/TestNfsExports.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/security/TestNfsExports.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.nfs.security; +package org.apache.hadoop.nfs.security; import junit.framework.Assert; -import org.apache.hadoop.hdfs.nfs.security.AccessPrivilege; -import org.apache.hadoop.hdfs.nfs.security.NfsExports; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java index cec235c7d4a..5b5ea511d19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java @@ -27,8 +27,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSClient; -import org.apache.hadoop.hdfs.nfs.security.AccessPrivilege; -import org.apache.hadoop.hdfs.nfs.security.NfsExports; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.mount.MountEntry; @@ -36,6 +34,8 @@ import org.apache.hadoop.mount.MountResponse; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Status; +import org.apache.hadoop.nfs.security.AccessPrivilege; +import org.apache.hadoop.nfs.security.NfsExports; import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.RpcProgram; @@ -184,7 +184,10 @@ public XDR handleInternal(RpcCall rpcCall, XDR xdr, XDR out, } else if (mntproc == MNTPROC.UMNTALL) { umntall(out, xid, client); } else if (mntproc == MNTPROC.EXPORT) { - out = MountResponse.writeExportList(out, xid, exports); + // Currently only support one NFS export "/" + List hostsMatchers = new ArrayList(); + hostsMatchers.add(hostsMatcher); + out = MountResponse.writeExportList(out, xid, exports, hostsMatchers); } else { // Invalid procedure RpcAcceptedReply.voidReply(out, xid, diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index 1f39ace973f..404cf3e73ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -26,10 +26,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem.Statistics; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.Options; @@ -38,8 +38,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; -import org.apache.hadoop.hdfs.nfs.security.AccessPrivilege; -import org.apache.hadoop.hdfs.nfs.security.NfsExports; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -98,6 +96,8 @@ import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; import org.apache.hadoop.nfs.nfs3.response.WccAttr; import org.apache.hadoop.nfs.nfs3.response.WccData; +import org.apache.hadoop.nfs.security.AccessPrivilege; +import org.apache.hadoop.nfs.security.NfsExports; import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; import org.apache.hadoop.oncrpc.RpcAuthSys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 068f7003175..4d7116f4faa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -313,6 +313,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5078 Support file append in NFSv3 gateway to enable data streaming to HDFS (brandonli) + HDFS-5136 MNT EXPORT should give the full group list which can mount the + exports (brandonli) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may