HADOOP-11195. Move Id-Name mapping in NFS to the hadoop-common area for better maintenance. Contributed by Yongjun Zhang
This commit is contained in:
parent
b056048114
commit
72a556d3b0
|
@ -674,6 +674,10 @@ Release 2.6.0 - UNRELEASED
|
|||
|
||||
HADOOP-11194. Ignore .keep files. (kasha)
|
||||
|
||||
HADOOP-11195. Move Id-Name mapping in NFS to the hadoop-common area for
|
||||
better maintenance (Yongjun Zhang via brandonli)
|
||||
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-11182. GraphiteSink emits wrong timestamps (Sascha Coenen via raviprak)
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.security;
|
||||
|
||||
/**
|
||||
* Some constants for IdMapping
|
||||
*/
|
||||
public class IdMappingConstant {
|
||||
|
||||
/** Do user/group update every 15 minutes by default, minimum 1 minute */
|
||||
public final static String USERGROUPID_UPDATE_MILLIS_KEY = "usergroupid.update.millis";
|
||||
public final static long USERGROUPID_UPDATE_MILLIS_DEFAULT = 15 * 60 * 1000; // ms
|
||||
public final static long USERGROUPID_UPDATE_MILLIS_MIN = 1 * 60 * 1000; // ms
|
||||
|
||||
public final static String UNKNOWN_USER = "nobody";
|
||||
public final static String UNKNOWN_GROUP = "nobody";
|
||||
|
||||
// Used for finding the configured static mapping file.
|
||||
public static final String STATIC_ID_MAPPING_FILE_KEY = "static.id.mapping.file";
|
||||
public static final String STATIC_ID_MAPPING_FILE_DEFAULT = "/etc/usergroupid.map";
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.security;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
|
||||
/**
|
||||
* An interface for the implementation of <userId, userName> mapping
|
||||
* and <groupId, groupName> mapping
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public interface IdMappingServiceProvider {
|
||||
|
||||
// Return uid for given user name
|
||||
public int getUid(String user) throws IOException;
|
||||
|
||||
// Return gid for given group name
|
||||
public int getGid(String group) throws IOException;
|
||||
|
||||
// Return user name for given user id uid, if not found, return
|
||||
// <unknown> passed to this method
|
||||
public String getUserName(int uid, String unknown);
|
||||
|
||||
// Return group name for given groupd id gid, if not found, return
|
||||
// <unknown> passed to this method
|
||||
public String getGroupName(int gid, String unknown);
|
||||
|
||||
// Return uid for given user name.
|
||||
// When can't map user, return user name's string hashcode
|
||||
public int getUidAllowingUnknown(String user);
|
||||
|
||||
// Return gid for given group name.
|
||||
// When can't map group, return group name's string hashcode
|
||||
public int getGidAllowingUnknown(String group);
|
||||
}
|
|
@ -15,7 +15,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.nfs.nfs3;
|
||||
package org.apache.hadoop.security;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
|
@ -37,11 +37,15 @@ import com.google.common.collect.BiMap;
|
|||
import com.google.common.collect.HashBiMap;
|
||||
|
||||
/**
|
||||
* A simple shell-based implementation of {@link IdMappingServiceProvider}
|
||||
* Map id to user name or group name. It does update every 15 minutes. Only a
|
||||
* single instance of this class is expected to be on the server.
|
||||
*/
|
||||
public class IdUserGroup {
|
||||
static final Log LOG = LogFactory.getLog(IdUserGroup.class);
|
||||
public class ShellBasedIdMapping implements IdMappingServiceProvider {
|
||||
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(ShellBasedIdMapping.class);
|
||||
|
||||
private final static String OS = System.getProperty("os.name");
|
||||
|
||||
/** Shell commands to get users and groups */
|
||||
|
@ -66,26 +70,31 @@ public class IdUserGroup {
|
|||
|
||||
private long lastUpdateTime = 0; // Last time maps were updated
|
||||
|
||||
public IdUserGroup(Configuration conf) throws IOException {
|
||||
public ShellBasedIdMapping(Configuration conf,
|
||||
final String defaultStaticIdMappingFile) throws IOException {
|
||||
long updateTime = conf.getLong(
|
||||
Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY,
|
||||
Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT);
|
||||
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY,
|
||||
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_DEFAULT);
|
||||
// Minimal interval is 1 minute
|
||||
if (updateTime < Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_MIN) {
|
||||
if (updateTime < IdMappingConstant.USERGROUPID_UPDATE_MILLIS_MIN) {
|
||||
LOG.info("User configured user account update time is less"
|
||||
+ " than 1 minute. Use 1 minute instead.");
|
||||
timeout = Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_MIN;
|
||||
timeout = IdMappingConstant.USERGROUPID_UPDATE_MILLIS_MIN;
|
||||
} else {
|
||||
timeout = updateTime;
|
||||
}
|
||||
|
||||
String staticFilePath = conf.get(Nfs3Constant.NFS_STATIC_MAPPING_FILE_KEY,
|
||||
Nfs3Constant.NFS_STATIC_MAPPING_FILE_DEFAULT);
|
||||
String staticFilePath = conf.get(IdMappingConstant.STATIC_ID_MAPPING_FILE_KEY,
|
||||
defaultStaticIdMappingFile);
|
||||
staticMappingFile = new File(staticFilePath);
|
||||
|
||||
updateMaps();
|
||||
}
|
||||
|
||||
public ShellBasedIdMapping(Configuration conf) throws IOException {
|
||||
this(conf, IdMappingConstant.STATIC_ID_MAPPING_FILE_DEFAULT);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public long getTimeout() {
|
||||
return timeout;
|
|
@ -15,7 +15,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.nfs.nfs3;
|
||||
package org.apache.hadoop.security;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
@ -27,14 +27,14 @@ import java.io.OutputStream;
|
|||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.nfs.nfs3.IdUserGroup.PassThroughMap;
|
||||
import org.apache.hadoop.nfs.nfs3.IdUserGroup.StaticMapping;
|
||||
import org.apache.hadoop.security.ShellBasedIdMapping.PassThroughMap;
|
||||
import org.apache.hadoop.security.ShellBasedIdMapping.StaticMapping;
|
||||
import org.junit.Test;
|
||||
|
||||
import com.google.common.collect.BiMap;
|
||||
import com.google.common.collect.HashBiMap;
|
||||
|
||||
public class TestIdUserGroup {
|
||||
public class TestShellBasedIdMapping {
|
||||
|
||||
private static final Map<Integer, Integer> EMPTY_PASS_THROUGH_MAP =
|
||||
new PassThroughMap<Integer>();
|
||||
|
@ -57,7 +57,7 @@ public class TestIdUserGroup {
|
|||
OutputStream out = new FileOutputStream(tempStaticMapFile);
|
||||
out.write(staticMapFileContents.getBytes());
|
||||
out.close();
|
||||
StaticMapping parsedMap = IdUserGroup.parseStaticMap(tempStaticMapFile);
|
||||
StaticMapping parsedMap = ShellBasedIdMapping.parseStaticMap(tempStaticMapFile);
|
||||
|
||||
assertEquals(10, (int)parsedMap.uidMapping.get(100));
|
||||
assertEquals(11, (int)parsedMap.uidMapping.get(201));
|
||||
|
@ -93,9 +93,9 @@ public class TestIdUserGroup {
|
|||
+ "mapred2:x:498\""
|
||||
+ " | cut -d: -f1,3";
|
||||
|
||||
IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":",
|
||||
ShellBasedIdMapping.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":",
|
||||
uidStaticMap);
|
||||
IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":",
|
||||
ShellBasedIdMapping.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":",
|
||||
gidStaticMap);
|
||||
|
||||
assertEquals("hdfs", uMap.get(10));
|
||||
|
@ -133,7 +133,7 @@ public class TestIdUserGroup {
|
|||
BiMap<Integer, String> uMap = HashBiMap.create();
|
||||
BiMap<Integer, String> gMap = HashBiMap.create();
|
||||
|
||||
IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":",
|
||||
ShellBasedIdMapping.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":",
|
||||
EMPTY_PASS_THROUGH_MAP);
|
||||
assertEquals(5, uMap.size());
|
||||
assertEquals("root", uMap.get(0));
|
||||
|
@ -142,7 +142,7 @@ public class TestIdUserGroup {
|
|||
assertEquals("bin", uMap.get(2));
|
||||
assertEquals("daemon", uMap.get(1));
|
||||
|
||||
IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":",
|
||||
ShellBasedIdMapping.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":",
|
||||
EMPTY_PASS_THROUGH_MAP);
|
||||
assertTrue(gMap.size() == 3);
|
||||
assertEquals("hdfs",gMap.get(11501));
|
||||
|
@ -174,7 +174,7 @@ public class TestIdUserGroup {
|
|||
BiMap<Integer, String> uMap = HashBiMap.create();
|
||||
BiMap<Integer, String> gMap = HashBiMap.create();
|
||||
|
||||
IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":",
|
||||
ShellBasedIdMapping.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":",
|
||||
EMPTY_PASS_THROUGH_MAP);
|
||||
assertTrue(uMap.size() == 7);
|
||||
assertEquals("nfsnobody", uMap.get(-2));
|
||||
|
@ -185,7 +185,7 @@ public class TestIdUserGroup {
|
|||
assertEquals("hdfs",uMap.get(11501));
|
||||
assertEquals("daemon", uMap.get(2));
|
||||
|
||||
IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":",
|
||||
ShellBasedIdMapping.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":",
|
||||
EMPTY_PASS_THROUGH_MAP);
|
||||
assertTrue(gMap.size() == 7);
|
||||
assertEquals("hdfs",gMap.get(11501));
|
||||
|
@ -199,19 +199,19 @@ public class TestIdUserGroup {
|
|||
|
||||
@Test
|
||||
public void testUserUpdateSetting() throws IOException {
|
||||
IdUserGroup iug = new IdUserGroup(new Configuration());
|
||||
ShellBasedIdMapping iug = new ShellBasedIdMapping(new Configuration());
|
||||
assertEquals(iug.getTimeout(),
|
||||
Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT);
|
||||
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_DEFAULT);
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
conf.setLong(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY, 0);
|
||||
iug = new IdUserGroup(conf);
|
||||
assertEquals(iug.getTimeout(), Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_MIN);
|
||||
conf.setLong(IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY, 0);
|
||||
iug = new ShellBasedIdMapping(conf);
|
||||
assertEquals(iug.getTimeout(), IdMappingConstant.USERGROUPID_UPDATE_MILLIS_MIN);
|
||||
|
||||
conf.setLong(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY,
|
||||
Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT * 2);
|
||||
iug = new IdUserGroup(conf);
|
||||
conf.setLong(IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY,
|
||||
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_DEFAULT * 2);
|
||||
iug = new ShellBasedIdMapping(conf);
|
||||
assertEquals(iug.getTimeout(),
|
||||
Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT * 2);
|
||||
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_DEFAULT * 2);
|
||||
}
|
||||
}
|
|
@ -194,15 +194,11 @@ public class Nfs3Constant {
|
|||
public static final String NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "nfs.exports.cache.expirytime.millis";
|
||||
public static final long NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT = 15 * 60 * 1000; // 15 min
|
||||
|
||||
/** Do user/group update every 15 minutes by default, minimum 1 minute */
|
||||
@Deprecated
|
||||
public final static String NFS_USERGROUP_UPDATE_MILLIS_KEY = "nfs.usergroup.update.millis";
|
||||
public final static long NFS_USERGROUP_UPDATE_MILLIS_DEFAULT = 15 * 60 * 1000; // ms
|
||||
final static long NFS_USERGROUP_UPDATE_MILLIS_MIN = 1 * 60 * 1000; // ms
|
||||
|
||||
public final static String UNKNOWN_USER = "nobody";
|
||||
public final static String UNKNOWN_GROUP = "nobody";
|
||||
|
||||
// Used for finding the configured static mapping file.
|
||||
@Deprecated
|
||||
public static final String NFS_STATIC_MAPPING_FILE_KEY = "nfs.static.mapping.file";
|
||||
public static final String NFS_STATIC_MAPPING_FILE_DEFAULT = "/etc/nfs.map";
|
||||
}
|
||||
|
|
|
@ -17,24 +17,26 @@
|
|||
*/
|
||||
package org.apache.hadoop.oncrpc.security;
|
||||
|
||||
import org.apache.hadoop.nfs.nfs3.IdUserGroup;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
||||
import org.apache.hadoop.oncrpc.RpcCall;
|
||||
import org.apache.hadoop.security.IdMappingConstant;
|
||||
import org.apache.hadoop.security.IdMappingServiceProvider;
|
||||
|
||||
public class SysSecurityHandler extends SecurityHandler {
|
||||
|
||||
private final IdUserGroup iug;
|
||||
private final IdMappingServiceProvider iug;
|
||||
private final CredentialsSys mCredentialsSys;
|
||||
|
||||
public SysSecurityHandler(CredentialsSys credentialsSys,
|
||||
IdUserGroup iug) {
|
||||
IdMappingServiceProvider iug) {
|
||||
this.mCredentialsSys = credentialsSys;
|
||||
this.iug = iug;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getUser() {
|
||||
return iug.getUserName(mCredentialsSys.getUID(), Nfs3Constant.UNKNOWN_USER);
|
||||
return iug.getUserName(mCredentialsSys.getUID(),
|
||||
IdMappingConstant.UNKNOWN_USER);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.nfs.conf;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
||||
import org.apache.hadoop.security.IdMappingConstant;
|
||||
|
||||
/**
|
||||
* Adds deprecated keys into the configuration.
|
||||
|
@ -41,7 +42,11 @@ public class NfsConfiguration extends HdfsConfiguration {
|
|||
new DeprecationDelta("dfs.nfs.exports.cache.expirytime.millis",
|
||||
Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY),
|
||||
new DeprecationDelta("hadoop.nfs.userupdate.milly",
|
||||
Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY),
|
||||
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY),
|
||||
new DeprecationDelta(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY,
|
||||
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY),
|
||||
new DeprecationDelta(Nfs3Constant.NFS_STATIC_MAPPING_FILE_KEY,
|
||||
IdMappingConstant.STATIC_ID_MAPPING_FILE_KEY),
|
||||
new DeprecationDelta("dfs.nfs3.enableDump",
|
||||
NfsConfigKeys.DFS_NFS_FILE_DUMP_KEY),
|
||||
new DeprecationDelta("dfs.nfs3.dump.dir",
|
||||
|
|
|
@ -24,12 +24,12 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
|||
import org.apache.hadoop.nfs.NfsFileType;
|
||||
import org.apache.hadoop.nfs.NfsTime;
|
||||
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||
import org.apache.hadoop.nfs.nfs3.IdUserGroup;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.response.WccAttr;
|
||||
import org.apache.hadoop.nfs.nfs3.response.WccData;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.security.IdMappingServiceProvider;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
|
||||
|
@ -59,7 +59,7 @@ public class Nfs3Utils {
|
|||
}
|
||||
|
||||
public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(
|
||||
HdfsFileStatus fs, IdUserGroup iug) {
|
||||
HdfsFileStatus fs, IdMappingServiceProvider iug) {
|
||||
/**
|
||||
* Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit
|
||||
* client takes only the lower 32bit of the fileId and treats it as signed
|
||||
|
@ -75,7 +75,7 @@ public class Nfs3Utils {
|
|||
}
|
||||
|
||||
public static Nfs3FileAttributes getFileAttr(DFSClient client,
|
||||
String fileIdPath, IdUserGroup iug) throws IOException {
|
||||
String fileIdPath, IdMappingServiceProvider iug) throws IOException {
|
||||
HdfsFileStatus fs = getFileStatus(client, fileIdPath);
|
||||
return fs == null ? null : getNfs3FileAttrFromFileStatus(fs, iug);
|
||||
}
|
||||
|
@ -100,7 +100,8 @@ public class Nfs3Utils {
|
|||
|
||||
// TODO: maybe not efficient
|
||||
public static WccData createWccData(final WccAttr preOpAttr,
|
||||
DFSClient dfsClient, final String fileIdPath, final IdUserGroup iug)
|
||||
DFSClient dfsClient, final String fileIdPath,
|
||||
final IdMappingServiceProvider iug)
|
||||
throws IOException {
|
||||
Nfs3FileAttributes postOpDirAttr = getFileAttr(dfsClient, fileIdPath, iug);
|
||||
return new WccData(preOpAttr, postOpDirAttr);
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.nfs.nfs3.WriteCtx.DataState;
|
|||
import org.apache.hadoop.io.BytesWritable.Comparator;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||
import org.apache.hadoop.nfs.nfs3.IdUserGroup;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
|
@ -55,6 +54,7 @@ import org.apache.hadoop.nfs.nfs3.response.WccAttr;
|
|||
import org.apache.hadoop.nfs.nfs3.response.WccData;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.apache.hadoop.security.IdMappingServiceProvider;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
|
@ -101,7 +101,7 @@ class OpenFileCtx {
|
|||
}
|
||||
|
||||
private final DFSClient client;
|
||||
private final IdUserGroup iug;
|
||||
private final IdMappingServiceProvider iug;
|
||||
|
||||
// The stream status. False means the stream is closed.
|
||||
private volatile boolean activeState;
|
||||
|
@ -223,13 +223,13 @@ class OpenFileCtx {
|
|||
}
|
||||
|
||||
OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr,
|
||||
String dumpFilePath, DFSClient client, IdUserGroup iug) {
|
||||
String dumpFilePath, DFSClient client, IdMappingServiceProvider iug) {
|
||||
this(fos, latestAttr, dumpFilePath, client, iug, false,
|
||||
new NfsConfiguration());
|
||||
}
|
||||
|
||||
OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr,
|
||||
String dumpFilePath, DFSClient client, IdUserGroup iug,
|
||||
String dumpFilePath, DFSClient client, IdMappingServiceProvider iug,
|
||||
boolean aixCompatMode, NfsConfiguration config) {
|
||||
this.fos = fos;
|
||||
this.latestAttr = latestAttr;
|
||||
|
@ -439,7 +439,7 @@ class OpenFileCtx {
|
|||
|
||||
public void receivedNewWrite(DFSClient dfsClient, WRITE3Request request,
|
||||
Channel channel, int xid, AsyncDataService asyncDataService,
|
||||
IdUserGroup iug) {
|
||||
IdMappingServiceProvider iug) {
|
||||
|
||||
if (!activeState) {
|
||||
LOG.info("OpenFileCtx is inactive, fileId:"
|
||||
|
@ -594,7 +594,7 @@ class OpenFileCtx {
|
|||
|
||||
/** Process an overwrite write request */
|
||||
private void processOverWrite(DFSClient dfsClient, WRITE3Request request,
|
||||
Channel channel, int xid, IdUserGroup iug) {
|
||||
Channel channel, int xid, IdMappingServiceProvider iug) {
|
||||
WccData wccData = new WccData(latestAttr.getWccAttr(), null);
|
||||
long offset = request.getOffset();
|
||||
int count = request.getCount();
|
||||
|
@ -653,7 +653,7 @@ class OpenFileCtx {
|
|||
|
||||
private void receivedNewWriteInternal(DFSClient dfsClient,
|
||||
WRITE3Request request, Channel channel, int xid,
|
||||
AsyncDataService asyncDataService, IdUserGroup iug) {
|
||||
AsyncDataService asyncDataService, IdMappingServiceProvider iug) {
|
||||
WriteStableHow stableHow = request.getStableHow();
|
||||
WccAttr preOpAttr = latestAttr.getWccAttr();
|
||||
int count = request.getCount();
|
||||
|
@ -702,7 +702,7 @@ class OpenFileCtx {
|
|||
*/
|
||||
private WRITE3Response processPerfectOverWrite(DFSClient dfsClient,
|
||||
long offset, int count, WriteStableHow stableHow, byte[] data,
|
||||
String path, WccData wccData, IdUserGroup iug) {
|
||||
String path, WccData wccData, IdMappingServiceProvider iug) {
|
||||
WRITE3Response response;
|
||||
|
||||
// Read the content back
|
||||
|
|
|
@ -52,7 +52,6 @@ import org.apache.hadoop.nfs.NfsExports;
|
|||
import org.apache.hadoop.nfs.NfsFileType;
|
||||
import org.apache.hadoop.nfs.NfsTime;
|
||||
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||
import org.apache.hadoop.nfs.nfs3.IdUserGroup;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.NFSPROC3;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
|
||||
|
@ -123,7 +122,10 @@ import org.apache.hadoop.oncrpc.security.SysSecurityHandler;
|
|||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.IdMappingConstant;
|
||||
import org.apache.hadoop.security.IdMappingServiceProvider;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.ShellBasedIdMapping;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authorize.AuthorizationException;
|
||||
import org.apache.hadoop.util.JvmPauseMonitor;
|
||||
|
@ -146,7 +148,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
|||
|
||||
private final NfsConfiguration config;
|
||||
private final WriteManager writeManager;
|
||||
private final IdUserGroup iug;
|
||||
private final IdMappingServiceProvider iug;
|
||||
private final DFSClientCache clientCache;
|
||||
|
||||
private final NfsExports exports;
|
||||
|
@ -171,7 +173,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
|||
|
||||
this.config = config;
|
||||
config.set(FsPermission.UMASK_LABEL, "000");
|
||||
iug = new IdUserGroup(config);
|
||||
iug = new ShellBasedIdMapping(config,
|
||||
Nfs3Constant.NFS_STATIC_MAPPING_FILE_DEFAULT);
|
||||
|
||||
aixCompatMode = config.getBoolean(
|
||||
NfsConfigKeys.AIX_COMPAT_MODE_KEY,
|
||||
|
@ -341,9 +344,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
|||
if (updateFields.contains(SetAttrField.UID)
|
||||
|| updateFields.contains(SetAttrField.GID)) {
|
||||
String uname = updateFields.contains(SetAttrField.UID) ? iug.getUserName(
|
||||
newAttr.getUid(), Nfs3Constant.UNKNOWN_USER) : null;
|
||||
newAttr.getUid(), IdMappingConstant.UNKNOWN_USER) : null;
|
||||
String gname = updateFields.contains(SetAttrField.GID) ? iug
|
||||
.getGroupName(newAttr.getGid(), Nfs3Constant.UNKNOWN_GROUP) : null;
|
||||
.getGroupName(newAttr.getGid(), IdMappingConstant.UNKNOWN_GROUP) : null;
|
||||
dfsClient.setOwner(fileIdPath, uname, gname);
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
|
|||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.nfs.NfsFileType;
|
||||
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||
import org.apache.hadoop.nfs.nfs3.IdUserGroup;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
|
@ -41,6 +40,7 @@ import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
|
|||
import org.apache.hadoop.nfs.nfs3.response.WccData;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.apache.hadoop.security.IdMappingServiceProvider;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
@ -52,7 +52,7 @@ public class WriteManager {
|
|||
public static final Log LOG = LogFactory.getLog(WriteManager.class);
|
||||
|
||||
private final NfsConfiguration config;
|
||||
private final IdUserGroup iug;
|
||||
private final IdMappingServiceProvider iug;
|
||||
|
||||
private AsyncDataService asyncDataService;
|
||||
private boolean asyncDataServiceStarted = false;
|
||||
|
@ -80,7 +80,7 @@ public class WriteManager {
|
|||
return fileContextCache.put(h, ctx);
|
||||
}
|
||||
|
||||
WriteManager(IdUserGroup iug, final NfsConfiguration config,
|
||||
WriteManager(IdMappingServiceProvider iug, final NfsConfiguration config,
|
||||
boolean aixCompatMode) {
|
||||
this.iug = iug;
|
||||
this.config = config;
|
||||
|
@ -315,7 +315,7 @@ public class WriteManager {
|
|||
* If the file is in cache, update the size based on the cached data size
|
||||
*/
|
||||
Nfs3FileAttributes getFileAttr(DFSClient client, FileHandle fileHandle,
|
||||
IdUserGroup iug) throws IOException {
|
||||
IdMappingServiceProvider iug) throws IOException {
|
||||
String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle);
|
||||
Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug);
|
||||
if (attr != null) {
|
||||
|
|
|
@ -28,8 +28,8 @@ import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
|
|||
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx;
|
||||
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||
import org.apache.hadoop.nfs.nfs3.IdUserGroup;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.security.ShellBasedIdMapping;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
|
@ -49,15 +49,15 @@ public class TestOpenFileCtxCache {
|
|||
Mockito.when(fos.getPos()).thenReturn((long) 0);
|
||||
|
||||
OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath",
|
||||
dfsClient, new IdUserGroup(new NfsConfiguration()));
|
||||
dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
|
||||
OpenFileCtx context2 = new OpenFileCtx(fos, attr, "/dumpFilePath",
|
||||
dfsClient, new IdUserGroup(new NfsConfiguration()));
|
||||
dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
|
||||
OpenFileCtx context3 = new OpenFileCtx(fos, attr, "/dumpFilePath",
|
||||
dfsClient, new IdUserGroup(new NfsConfiguration()));
|
||||
dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
|
||||
OpenFileCtx context4 = new OpenFileCtx(fos, attr, "/dumpFilePath",
|
||||
dfsClient, new IdUserGroup(new NfsConfiguration()));
|
||||
dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
|
||||
OpenFileCtx context5 = new OpenFileCtx(fos, attr, "/dumpFilePath",
|
||||
dfsClient, new IdUserGroup(new NfsConfiguration()));
|
||||
dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
|
||||
|
||||
OpenFileCtxCache cache = new OpenFileCtxCache(conf, 10 * 60 * 100);
|
||||
|
||||
|
@ -108,13 +108,13 @@ public class TestOpenFileCtxCache {
|
|||
Mockito.when(fos.getPos()).thenReturn((long) 0);
|
||||
|
||||
OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath",
|
||||
dfsClient, new IdUserGroup(new NfsConfiguration()));
|
||||
dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
|
||||
OpenFileCtx context2 = new OpenFileCtx(fos, attr, "/dumpFilePath",
|
||||
dfsClient, new IdUserGroup(new NfsConfiguration()));
|
||||
dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
|
||||
OpenFileCtx context3 = new OpenFileCtx(fos, attr, "/dumpFilePath",
|
||||
dfsClient, new IdUserGroup(new NfsConfiguration()));
|
||||
dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
|
||||
OpenFileCtx context4 = new OpenFileCtx(fos, attr, "/dumpFilePath",
|
||||
dfsClient, new IdUserGroup(new NfsConfiguration()));
|
||||
dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
|
||||
|
||||
OpenFileCtxCache cache = new OpenFileCtxCache(conf, 10 * 60 * 100);
|
||||
|
||||
|
|
|
@ -89,6 +89,7 @@ import org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response;
|
|||
import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.SecurityHandler;
|
||||
import org.apache.hadoop.security.IdMappingConstant;
|
||||
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
|
||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
|
@ -802,7 +803,7 @@ public class TestRpcProgramNfs3 {
|
|||
Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY, 0) == 1000);
|
||||
|
||||
conf.setInt("hadoop.nfs.userupdate.milly", 10);
|
||||
assertTrue(conf.getInt(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY, 0) == 10);
|
||||
assertTrue(conf.getInt(IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY, 0) == 10);
|
||||
|
||||
conf.set("dfs.nfs3.dump.dir", "/nfs/tmp");
|
||||
assertTrue(conf.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY).equals(
|
||||
|
|
|
@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||
import org.apache.hadoop.nfs.nfs3.IdUserGroup;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
|
@ -50,6 +49,7 @@ import org.apache.hadoop.nfs.nfs3.response.CREATE3Response;
|
|||
import org.apache.hadoop.nfs.nfs3.response.READ3Response;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.SecurityHandler;
|
||||
import org.apache.hadoop.security.ShellBasedIdMapping;
|
||||
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
|
||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
|
@ -141,7 +141,7 @@ public class TestWrites {
|
|||
NfsConfiguration conf = new NfsConfiguration();
|
||||
conf.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false);
|
||||
OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
|
||||
new IdUserGroup(conf), false, conf);
|
||||
new ShellBasedIdMapping(conf), false, conf);
|
||||
|
||||
COMMIT_STATUS ret;
|
||||
|
||||
|
@ -207,7 +207,7 @@ public class TestWrites {
|
|||
NfsConfiguration conf = new NfsConfiguration();
|
||||
conf.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, true);
|
||||
OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
|
||||
new IdUserGroup(conf), false, conf);
|
||||
new ShellBasedIdMapping(conf), false, conf);
|
||||
|
||||
COMMIT_STATUS ret;
|
||||
|
||||
|
@ -273,7 +273,7 @@ public class TestWrites {
|
|||
conf.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false);
|
||||
// Enable AIX compatibility mode.
|
||||
OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
|
||||
new IdUserGroup(new NfsConfiguration()), true, conf);
|
||||
new ShellBasedIdMapping(new NfsConfiguration()), true, conf);
|
||||
|
||||
// Test fall-through to pendingWrites check in the event that commitOffset
|
||||
// is greater than the number of bytes we've so far flushed.
|
||||
|
@ -303,11 +303,11 @@ public class TestWrites {
|
|||
|
||||
config.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false);
|
||||
OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
|
||||
new IdUserGroup(config), false, config);
|
||||
new ShellBasedIdMapping(config), false, config);
|
||||
|
||||
FileHandle h = new FileHandle(1); // fake handle for "/dumpFilePath"
|
||||
COMMIT_STATUS ret;
|
||||
WriteManager wm = new WriteManager(new IdUserGroup(config), config, false);
|
||||
WriteManager wm = new WriteManager(new ShellBasedIdMapping(config), config, false);
|
||||
assertTrue(wm.addOpenFileStream(h, ctx));
|
||||
|
||||
// Test inactive open file context
|
||||
|
@ -372,11 +372,11 @@ public class TestWrites {
|
|||
|
||||
config.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, true);
|
||||
OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
|
||||
new IdUserGroup(config), false, config);
|
||||
new ShellBasedIdMapping(config), false, config);
|
||||
|
||||
FileHandle h = new FileHandle(1); // fake handle for "/dumpFilePath"
|
||||
COMMIT_STATUS ret;
|
||||
WriteManager wm = new WriteManager(new IdUserGroup(config), config, false);
|
||||
WriteManager wm = new WriteManager(new ShellBasedIdMapping(config), config, false);
|
||||
assertTrue(wm.addOpenFileStream(h, ctx));
|
||||
|
||||
// Test inactive open file context
|
||||
|
|
|
@ -367,7 +367,7 @@ HDFS NFS Gateway
|
|||
file in the event one wishes to access the HDFS NFS Gateway from a system with
|
||||
a completely disparate set of UIDs/GIDs. By default this file is located at
|
||||
"/etc/nfs.map", but a custom location can be configured by setting the
|
||||
"nfs.static.mapping.file" property to the path of the static mapping file.
|
||||
"static.id.mapping.file" property to the path of the static mapping file.
|
||||
The format of the static mapping file is similar to what is described in the
|
||||
exports(5) manual page, but roughly it is:
|
||||
|
||||
|
|
Loading…
Reference in New Issue