From 7f0efe96f85704e39349f20ab6a11bfaa81c6a75 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Mon, 29 Sep 2014 14:14:22 -0700 Subject: [PATCH 1/5] HDFS-7124. Remove EncryptionZoneManager.NULL_EZ. (clamb via wang) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java | 3 +-- .../ClientNamenodeProtocolServerSideTranslatorPB.java | 4 +++- .../hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java | 6 +++++- .../hadoop/hdfs/server/namenode/EncryptionZoneManager.java | 6 +----- .../org/apache/hadoop/hdfs/server/namenode/FSDirectory.java | 3 +-- .../hadoop-hdfs/src/main/proto/encryption.proto | 2 +- 7 files changed, 14 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 89a47e3a225..41d80661c7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -520,6 +520,8 @@ Release 2.6.0 - UNRELEASED HDFS-7104. Fix and clarify INodeInPath getter functions. (Zhe Zhang via wang) + HDFS-7124. Remove EncryptionZoneManager.NULL_EZ. (clamb via wang) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index bdfea127477..d83d8cb727b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2940,8 +2940,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, throws IOException { checkOpen(); try { - final EncryptionZone ez = namenode.getEZForPath(src); - return (ez.getId() < 0) ? null : ez; + return namenode.getEZForPath(src); } catch (RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, UnresolvedPathException.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 27904e1087a..a92d4555e6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -1340,7 +1340,9 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements GetEZForPathResponseProto.Builder builder = GetEZForPathResponseProto.newBuilder(); final EncryptionZone ret = server.getEZForPath(req.getSrc()); - builder.setZone(PBHelper.convert(ret)); + if (ret != null) { + builder.setZone(PBHelper.convert(ret)); + } return builder.build(); } catch (IOException e) { throw new ServiceException(e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index df8965cf4d4..077a3e9489c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -1347,7 +1347,11 @@ public class ClientNamenodeProtocolTranslatorPB implements try { final EncryptionZonesProtos.GetEZForPathResponseProto response = rpcProxy.getEZForPath(null, req); - return PBHelper.convert(response.getZone()); + if (response.hasZone()) { + return PBHelper.convert(response.getZone()); + } else { + return null; + } } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index 0e83583fb35..0d7ced9274d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -57,10 +57,6 @@ public class EncryptionZoneManager { public static Logger LOG = LoggerFactory.getLogger(EncryptionZoneManager .class); - public static final EncryptionZone NULL_EZ = - new EncryptionZone(-1, "", CipherSuite.UNKNOWN, - CryptoProtocolVersion.UNKNOWN, ""); - /** * EncryptionZoneInt is the internal representation of an encryption zone. The * external representation of an EZ is embodied in an EncryptionZone and @@ -226,7 +222,7 @@ public class EncryptionZoneManager { EncryptionZone getEZINodeForPath(INodesInPath iip) { final EncryptionZoneInt ezi = getEncryptionZoneForPath(iip); if (ezi == null) { - return NULL_EZ; + return null; } else { return new EncryptionZone(ezi.getINodeId(), getFullPathName(ezi), ezi.getSuite(), ezi.getVersion(), ezi.getKeyName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index a25e3fa6222..b5cf6c67a74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -2845,8 +2845,7 @@ public class FSDirectory implements Closeable { iip = getINodesInPath(inode.getFullPathName(), true); } EncryptionZone encryptionZone = getEZForPath(iip); - if (encryptionZone == null || - encryptionZone.equals(EncryptionZoneManager.NULL_EZ)) { + if (encryptionZone == null) { // not an encrypted file return null; } else if(encryptionZone.getPath() == null diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto index bb291eaff23..68b2f3af29c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto @@ -63,5 +63,5 @@ message GetEZForPathRequestProto { } message GetEZForPathResponseProto { - required EncryptionZoneProto zone = 1; + optional EncryptionZoneProto zone = 1; } From e10eeaabce2a21840cfd5899493c9d2d4fe2e322 Mon Sep 17 00:00:00 2001 From: Jonathan Eagles Date: Mon, 29 Sep 2014 16:37:23 -0500 Subject: [PATCH 2/5] YARN-2606. Application History Server tries to access hdfs before doing secure login (Mit Desai via jeagles) --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../FileSystemApplicationHistoryStore.java | 5 +++-- .../TestFileSystemApplicationHistoryStore.java | 8 ++++---- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 7fa83bec42e..6f23eeec165 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -465,6 +465,9 @@ Release 2.6.0 - UNRELEASED YARN-2608. FairScheduler: Potential deadlocks in loading alloc files and clock access. (Wei Yan via kasha) + YARN-2606. Application History Server tries to access hdfs before doing + secure login (Mit Desai via jeagles) + Release 2.5.1 - 2014-09-05 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java index 7840dd18660..6d76864b07f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java @@ -115,7 +115,8 @@ public class FileSystemApplicationHistoryStore extends AbstractService } @Override - public void serviceInit(Configuration conf) throws Exception { + public void serviceStart() throws Exception { + Configuration conf = getConfig(); Path fsWorkingPath = new Path(conf.get(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI, conf.get("hadoop.tmp.dir") + "/yarn/timeline/generic-history")); @@ -132,7 +133,7 @@ public class FileSystemApplicationHistoryStore extends AbstractService LOG.error("Error when initializing FileSystemHistoryStorage", e); throw e; } - super.serviceInit(conf); + super.serviceStart(); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java index 552a5e50b0e..4ac6f4d0aba 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java @@ -61,10 +61,10 @@ public class TestFileSystemApplicationHistoryStore extends @Before public void setup() throws Exception { fs = new RawLocalFileSystem(); - initStore(fs); + initAndStartStore(fs); } - private void initStore(final FileSystem fs) throws IOException, + private void initAndStartStore(final FileSystem fs) throws IOException, URISyntaxException { Configuration conf = new Configuration(); fs.initialize(new URI("/"), conf); @@ -272,7 +272,7 @@ public class TestFileSystemApplicationHistoryStore extends doReturn(true).when(fs).isDirectory(any(Path.class)); try { - initStore(fs); + initAndStartStore(fs); } catch (Exception e) { Assert.fail("Exception should not be thrown: " + e); } @@ -293,7 +293,7 @@ public class TestFileSystemApplicationHistoryStore extends doThrow(new IOException()).when(fs).mkdirs(any(Path.class)); try { - initStore(fs); + initAndStartStore(fs); Assert.fail("Exception should have been thrown"); } catch (Exception e) { // Expected failure From c6c3247dc0dcb8c72ea00f3fb14a0879fcf49c56 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Mon, 29 Sep 2014 14:50:28 -0700 Subject: [PATCH 3/5] HDFS-7122. Use of ThreadLocal results in poor block placement. (wang) --- .../apache/hadoop/net/NetworkTopology.java | 25 +++---------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ 2 files changed, 7 insertions(+), 21 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java index 5f11367d491..aaa5ae3f14d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java @@ -673,27 +673,11 @@ public class NetworkTopology { return node1.getParent()==node2.getParent(); } - private static final ThreadLocal r = new ThreadLocal(); - - /** - * Getter for thread-local Random, which provides better performance than - * a shared Random (even though Random is thread-safe). - * - * @return Thread-local Random. - */ - protected Random getRandom() { - Random rand = r.get(); - if (rand == null) { - rand = new Random(); - r.set(rand); - } - return rand; - } + private static final Random r = new Random(); @VisibleForTesting void setRandomSeed(long seed) { - Random rand = getRandom(); - rand.setSeed(seed); + r.setSeed(seed); } /** randomly choose one node from scope @@ -745,7 +729,7 @@ public class NetworkTopology { "Failed to find datanode (scope=\"" + String.valueOf(scope) + "\" excludedScope=\"" + String.valueOf(excludedScope) + "\")."); } - int leaveIndex = getRandom().nextInt(numOfDatanodes); + int leaveIndex = r.nextInt(numOfDatanodes); return innerNode.getLeaf(leaveIndex, node); } @@ -918,11 +902,10 @@ public class NetworkTopology { list.add(node); } - Random rand = getRandom(); int idx = 0; for (List list: tree.values()) { if (list != null) { - Collections.shuffle(list, rand); + Collections.shuffle(list, r); for (Node n: list) { nodes[idx] = n; idx++; diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 41d80661c7a..1b3053ca164 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -532,6 +532,9 @@ Release 2.6.0 - UNRELEASED HDFS-6865. Byte array native checksumming on client side (James Thomas via todd) + HDFS-7122. Use of ThreadLocal results in poor block placement. + (wang) + BUG FIXES HDFS-6823. dfs.web.authentication.kerberos.principal shows up in logs for From 64aef189656919327b11fe5e90ea680ca7ba83df Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Mon, 29 Sep 2014 15:02:29 -0700 Subject: [PATCH 4/5] HADOOP-11153. Make number of KMS threads configurable. (wang) --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh | 4 ++++ .../hadoop-kms/src/main/libexec/kms-config.sh | 7 +++++++ hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh | 1 + .../hadoop-kms/src/main/tomcat/server.xml | 1 + .../hadoop-kms/src/main/tomcat/ssl-server.xml | 2 +- hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm | 2 ++ 7 files changed, 18 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 4cb1b7327ed..ea1d8fde58b 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -554,6 +554,8 @@ Release 2.6.0 - UNRELEASED HADOOP-10954. Adding site documents of hadoop-tools (Masatake Iwasaki via aw) + HADOOP-11153. Make number of KMS threads configurable. (wang) + OPTIMIZATIONS HADOOP-10838. Byte array native checksumming. (James Thomas via todd) diff --git a/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh b/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh index 1bbec7f1856..88a2b8644ea 100644 --- a/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh +++ b/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh @@ -36,6 +36,10 @@ # # export KMS_ADMIN_PORT=`expr ${KMS_HTTP_PORT} + 1` +# The maximum number of Tomcat handler threads +# +# export KMS_MAX_THREADS=1000 + # The location of the SSL keystore if using SSL # # export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore diff --git a/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh b/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh index 3e203995de4..dc603d4f909 100644 --- a/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh +++ b/hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh @@ -136,6 +136,13 @@ else print "Using KMS_ADMIN_PORT: ${KMS_ADMIN_PORT}" fi +if [ "${KMS_MAX_THREADS}" = "" ]; then + export KMS_MAX_THREADS=1000 + print "Setting KMS_MAX_THREADS: ${KMS_MAX_THREADS}" +else + print "Using KMS_MAX_THREADS: ${KMS_MAX_THREADS}" +fi + if [ "${KMS_SSL_KEYSTORE_FILE}" = "" ]; then export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore print "Setting KMS_SSL_KEYSTORE_FILE: ${KMS_SSL_KEYSTORE_FILE}" diff --git a/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh b/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh index 588c2c87acd..b43225f62d7 100644 --- a/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh +++ b/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh @@ -44,6 +44,7 @@ catalina_opts="${catalina_opts} -Dkms.log.dir=${KMS_LOG}"; catalina_opts="${catalina_opts} -Dkms.temp.dir=${KMS_TEMP}"; catalina_opts="${catalina_opts} -Dkms.admin.port=${KMS_ADMIN_PORT}"; catalina_opts="${catalina_opts} -Dkms.http.port=${KMS_HTTP_PORT}"; +catalina_opts="${catalina_opts} -Dkms.max.threads=${KMS_MAX_THREADS}"; catalina_opts="${catalina_opts} -Dkms.ssl.keystore.file=${KMS_SSL_KEYSTORE_FILE}"; catalina_opts="${catalina_opts} -Dkms.ssl.keystore.pass=${KMS_SSL_KEYSTORE_PASS}"; diff --git a/hadoop-common-project/hadoop-kms/src/main/tomcat/server.xml b/hadoop-common-project/hadoop-kms/src/main/tomcat/server.xml index d070b58f9ed..516a6a9d0ca 100644 --- a/hadoop-common-project/hadoop-kms/src/main/tomcat/server.xml +++ b/hadoop-common-project/hadoop-kms/src/main/tomcat/server.xml @@ -73,6 +73,7 @@ Define a non-SSL HTTP/1.1 Connector on port ${kms.http.port} --> diff --git a/hadoop-common-project/hadoop-kms/src/main/tomcat/ssl-server.xml b/hadoop-common-project/hadoop-kms/src/main/tomcat/ssl-server.xml index 3a464ca47cf..6235b8b6bff 100644 --- a/hadoop-common-project/hadoop-kms/src/main/tomcat/ssl-server.xml +++ b/hadoop-common-project/hadoop-kms/src/main/tomcat/ssl-server.xml @@ -69,7 +69,7 @@ connector should be using the OpenSSL style configuration described in the APR documentation --> diff --git a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm index e32893b377f..9b105f2825d 100644 --- a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm +++ b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm @@ -152,6 +152,8 @@ hadoop-${project.version} $ sbin/kms.sh start * KMS_ADMIN_PORT + * KMS_MAX_THREADS + * KMS_LOG NOTE: You need to restart the KMS for the configuration changes to take From c88c6c57aa79e8a1f33d2e075491656f472c858b Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Mon, 29 Sep 2014 15:05:43 -0700 Subject: [PATCH 5/5] HADOOP-11130. NFS updateMaps OS check is reversed. Contributed by Brandon Li --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ .../org/apache/hadoop/nfs/nfs3/IdUserGroup.java | 17 ++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index ea1d8fde58b..9d0816f0414 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -763,6 +763,8 @@ Release 2.6.0 - UNRELEASED HADOOP-1110. JavaKeystoreProvider should not report a key as created if it was not flushed to the backing file. + HADOOP-11130. NFS updateMaps OS check is reversed (brandonli) + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS HADOOP-10734. Implement high-performance secure random number sources. diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java index 1a7a3f442c6..0098b47f4bc 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java @@ -45,8 +45,8 @@ public class IdUserGroup { private final static String OS = System.getProperty("os.name"); /** Shell commands to get users and groups */ - static final String LINUX_GET_ALL_USERS_CMD = "getent passwd | cut -d: -f1,3"; - static final String LINUX_GET_ALL_GROUPS_CMD = "getent group | cut -d: -f1,3"; + static final String GET_ALL_USERS_CMD = "getent passwd | cut -d: -f1,3"; + static final String GET_ALL_GROUPS_CMD = "getent group | cut -d: -f1,3"; static final String MAC_GET_ALL_USERS_CMD = "dscl . -list /Users UniqueID"; static final String MAC_GET_ALL_GROUPS_CMD = "dscl . -list /Groups PrimaryGroupID"; @@ -223,17 +223,16 @@ public class IdUserGroup { + "' does not exist."); } - if (OS.startsWith("Linux")) { - updateMapInternal(uMap, "user", LINUX_GET_ALL_USERS_CMD, ":", - staticMapping.uidMapping); - updateMapInternal(gMap, "group", LINUX_GET_ALL_GROUPS_CMD, ":", - staticMapping.gidMapping); - } else { - // Mac + if (OS.startsWith("Mac")) { updateMapInternal(uMap, "user", MAC_GET_ALL_USERS_CMD, "\\s+", staticMapping.uidMapping); updateMapInternal(gMap, "group", MAC_GET_ALL_GROUPS_CMD, "\\s+", staticMapping.gidMapping); + } else { + updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":", + staticMapping.uidMapping); + updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":", + staticMapping.gidMapping); } uidNameMap = uMap;