HDFS-5767. NFS implementation assumes userName userId mapping to be unique, which is not true sometimes. Contributed by Yongjun Zhang

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1564141 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Brandon Li 2014-02-04 00:27:25 +00:00
parent 60c0fe8f6d
commit 6439cd0f69
4 changed files with 61 additions and 39 deletions

View File

@ -425,9 +425,9 @@ checkJavadocWarnings () {
echo "" echo ""
echo "There appear to be $javadocWarnings javadoc warnings generated by the patched build." echo "There appear to be $javadocWarnings javadoc warnings generated by the patched build."
#There are 14 warnings that are caused by things that are caused by using sun internal APIs. #There are 12 warnings that are caused by things that are caused by using sun internal APIs.
#There are 2 warnings that are caused by the Apache DS Dn class used in MiniKdc. #There are 2 warnings that are caused by the Apache DS Dn class used in MiniKdc.
OK_JAVADOC_WARNINGS=16; OK_JAVADOC_WARNINGS=14;
### if current warnings greater than OK_JAVADOC_WARNINGS ### if current warnings greater than OK_JAVADOC_WARNINGS
if [[ $javadocWarnings -ne $OK_JAVADOC_WARNINGS ]] ; then if [[ $javadocWarnings -ne $OK_JAVADOC_WARNINGS ]] ; then
JIRA_COMMENT="$JIRA_COMMENT JIRA_COMMENT="$JIRA_COMMENT

View File

@ -51,14 +51,6 @@ public class IdUserGroup {
private long lastUpdateTime = 0; // Last time maps were updated private long lastUpdateTime = 0; // Last time maps were updated
static public class DuplicateNameOrIdException extends IOException {
private static final long serialVersionUID = 1L;
public DuplicateNameOrIdException(String msg) {
super(msg);
}
}
public IdUserGroup() throws IOException { public IdUserGroup() throws IOException {
updateMaps(); updateMaps();
} }
@ -80,7 +72,8 @@ public class IdUserGroup {
} }
} }
private static final String DUPLICATE_NAME_ID_DEBUG_INFO = "NFS gateway can't start with duplicate name or id on the host system.\n" private static final String DUPLICATE_NAME_ID_DEBUG_INFO =
"NFS gateway could have problem starting with duplicate name or id on the host system.\n"
+ "This is because HDFS (non-kerberos cluster) uses name as the only way to identify a user or group.\n" + "This is because HDFS (non-kerberos cluster) uses name as the only way to identify a user or group.\n"
+ "The host system with duplicated user/group name or id might work fine most of the time by itself.\n" + "The host system with duplicated user/group name or id might work fine most of the time by itself.\n"
+ "However when NFS gateway talks to HDFS, HDFS accepts only user and group name.\n" + "However when NFS gateway talks to HDFS, HDFS accepts only user and group name.\n"
@ -88,6 +81,16 @@ public class IdUserGroup {
+ "<getent passwd | cut -d: -f1,3> and <getent group | cut -d: -f1,3> on Linux systms,\n" + "<getent passwd | cut -d: -f1,3> and <getent group | cut -d: -f1,3> on Linux systms,\n"
+ "<dscl . -list /Users UniqueID> and <dscl . -list /Groups PrimaryGroupID> on MacOS."; + "<dscl . -list /Users UniqueID> and <dscl . -list /Groups PrimaryGroupID> on MacOS.";
private static void reportDuplicateEntry(final String header,
final Integer key, final String value,
final Integer ekey, final String evalue) {
LOG.warn("\n" + header + String.format(
"new entry (%d, %s), existing entry: (%d, %s).\n%s\n%s",
key, value, ekey, evalue,
"The new entry is to be ignored for the following reason.",
DUPLICATE_NAME_ID_DEBUG_INFO));
}
/** /**
* Get the whole list of users and groups and save them in the maps. * Get the whole list of users and groups and save them in the maps.
* @throws IOException * @throws IOException
@ -108,22 +111,27 @@ public class IdUserGroup {
} }
LOG.debug("add to " + mapName + "map:" + nameId[0] + " id:" + nameId[1]); LOG.debug("add to " + mapName + "map:" + nameId[0] + " id:" + nameId[1]);
// HDFS can't differentiate duplicate names with simple authentication // HDFS can't differentiate duplicate names with simple authentication
Integer key = Integer.valueOf(nameId[1]); final Integer key = Integer.valueOf(nameId[1]);
String value = nameId[0]; final String value = nameId[0];
if (map.containsKey(key)) { if (map.containsKey(key)) {
LOG.error(String.format( final String prevValue = map.get(key);
"Got duplicate id:(%d, %s), existing entry: (%d, %s).\n%s", key, if (value.equals(prevValue)) {
value, key, map.get(key), DUPLICATE_NAME_ID_DEBUG_INFO)); // silently ignore equivalent entries
throw new DuplicateNameOrIdException("Got duplicate id."); continue;
}
reportDuplicateEntry(
"Got multiple names associated with the same id: ",
key, value, key, prevValue);
continue;
} }
if (map.containsValue(nameId[0])) { if (map.containsValue(value)) {
LOG.error(String.format( final Integer prevKey = map.inverse().get(value);
"Got duplicate name:(%d, %s), existing entry: (%d, %s) \n%s", reportDuplicateEntry(
key, value, map.inverse().get(value), value, "Got multiple ids associated with the same name: ",
DUPLICATE_NAME_ID_DEBUG_INFO)); key, value, prevKey, value);
throw new DuplicateNameOrIdException("Got duplicate name"); continue;
} }
map.put(Integer.valueOf(nameId[1]), nameId[0]); map.put(key, value);
} }
LOG.info("Updated " + mapName + " map size:" + map.size()); LOG.info("Updated " + mapName + " map size:" + map.size());

View File

@ -17,11 +17,10 @@
*/ */
package org.apache.hadoop.nfs.nfs3; package org.apache.hadoop.nfs.nfs3;
import static org.junit.Assert.fail; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.IdUserGroup.DuplicateNameOrIdException;
import org.junit.Test; import org.junit.Test;
import com.google.common.collect.BiMap; import com.google.common.collect.BiMap;
@ -33,24 +32,36 @@ public class TestIdUserGroup {
public void testDuplicates() throws IOException { public void testDuplicates() throws IOException {
String GET_ALL_USERS_CMD = "echo \"root:x:0:0:root:/root:/bin/bash\n" String GET_ALL_USERS_CMD = "echo \"root:x:0:0:root:/root:/bin/bash\n"
+ "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n" + "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+ "hdfs:x:11502:10788:Grid Distributed File System:/home/hdfs:/bin/bash\"" + "hdfs:x:11502:10788:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+ "hdfs1:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+ "hdfs2:x:11502:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+ "bin:x:2:2:bin:/bin:/bin/sh\n"
+ "bin:x:1:1:bin:/bin:/sbin/nologin\n"
+ "daemon:x:1:1:daemon:/usr/sbin:/bin/sh\n"
+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""
+ " | cut -d: -f1,3"; + " | cut -d: -f1,3";
String GET_ALL_GROUPS_CMD = "echo \"hdfs:*:11501:hrt_hdfs\n" String GET_ALL_GROUPS_CMD = "echo \"hdfs:*:11501:hrt_hdfs\n"
+ "mapred:x:497\n" + "mapred2:x:497\"" + " | cut -d: -f1,3"; + "mapred:x:497\n"
+ "mapred2:x:497\n"
+ "mapred:x:498\n"
+ "mapred3:x:498\""
+ " | cut -d: -f1,3";
// Maps for id to name map // Maps for id to name map
BiMap<Integer, String> uMap = HashBiMap.create(); BiMap<Integer, String> uMap = HashBiMap.create();
BiMap<Integer, String> gMap = HashBiMap.create(); BiMap<Integer, String> gMap = HashBiMap.create();
try { IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":");
IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":"); assertTrue(uMap.size() == 5);
fail("didn't detect the duplicate name"); assertEquals(uMap.get(0), "root");
} catch (DuplicateNameOrIdException e) { assertEquals(uMap.get(11501), "hdfs");
} assertEquals(uMap.get(11502), "hdfs2");
assertEquals(uMap.get(2), "bin");
assertEquals(uMap.get(1), "daemon");
try { IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":");
IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":"); assertTrue(gMap.size() == 3);
fail("didn't detect the duplicate id"); assertEquals(gMap.get(11501), "hdfs");
} catch (DuplicateNameOrIdException e) { assertEquals(gMap.get(497), "mapred");
} assertEquals(gMap.get(498), "mapred3");
} }
} }

View File

@ -328,6 +328,9 @@ Release 2.4.0 - UNRELEASED
the same node group when dfs.namenode.avoid.write.stale.datanode is true. the same node group when dfs.namenode.avoid.write.stale.datanode is true.
(Buddy via junping_du) (Buddy via junping_du)
HDFS-5767. NFS implementation assumes userName userId mapping to be unique,
which is not true sometimes (Yongjun Zhang via brandonli)
Release 2.3.0 - UNRELEASED Release 2.3.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES