HDFS-4755. Fix AccessControlException message and moves "implements LinkedElement" from INode to INodeWithAdditionalFields.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1476009 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-04-26 01:05:47 +00:00
parent 43ac0739ce
commit 76b80b48ec
6 changed files with 42 additions and 27 deletions

View File

@ -305,3 +305,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4742. Fix appending to a renamed file with snapshot. (Jing Zhao via HDFS-4742. Fix appending to a renamed file with snapshot. (Jing Zhao via
szetszwo) szetszwo)
HDFS-4755. Fix AccessControlException message and moves "implements
LinkedElement" from INode to INodeWithAdditionalFields. (szetszwo)

View File

@ -116,7 +116,7 @@ public class FSDirectory implements Closeable {
private final int maxComponentLength; private final int maxComponentLength;
private final int maxDirItems; private final int maxDirItems;
private final int lsLimit; // max list limit private final int lsLimit; // max list limit
private GSet<INode, INode> inodeMap; // Synchronized by dirLock private GSet<INode, INodeWithAdditionalFields> inodeMap; // Synchronized by dirLock
// lock to protect the directory and BlockMap // lock to protect the directory and BlockMap
private ReentrantReadWriteLock dirLock; private ReentrantReadWriteLock dirLock;
@ -181,12 +181,12 @@ public class FSDirectory implements Closeable {
namesystem = ns; namesystem = ns;
} }
@VisibleForTesting private static GSet<INode, INodeWithAdditionalFields> initInodeMap(
static LightWeightGSet<INode, INode> initInodeMap(INodeDirectory rootDir) { INodeDirectory rootDir) {
// Compute the map capacity by allocating 1% of total memory // Compute the map capacity by allocating 1% of total memory
int capacity = LightWeightGSet.computeCapacity(1, "INodeMap"); int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
LightWeightGSet<INode, INode> map = new LightWeightGSet<INode, INode>( GSet<INode, INodeWithAdditionalFields> map
capacity); = new LightWeightGSet<INode, INodeWithAdditionalFields>(capacity);
map.put(rootDir); map.put(rootDir);
return map; return map;
} }
@ -1466,7 +1466,7 @@ public class FSDirectory implements Closeable {
Preconditions.checkState(hasWriteLock()); Preconditions.checkState(hasWriteLock());
oldnode.getParent().replaceChild(oldnode, newnode); oldnode.getParent().replaceChild(oldnode, newnode);
inodeMap.put(newnode); addToInodeMapUnprotected(newnode);
oldnode.clear(); oldnode.clear();
/* Currently oldnode and newnode are assumed to contain the same /* Currently oldnode and newnode are assumed to contain the same
@ -2200,7 +2200,7 @@ public class FSDirectory implements Closeable {
} else { } else {
// update parent node // update parent node
iip.setINode(pos - 1, child.getParent()); iip.setINode(pos - 1, child.getParent());
inodeMap.put(child); addToInodeMapUnprotected(child);
} }
return added; return added;
} }
@ -2232,7 +2232,7 @@ public class FSDirectory implements Closeable {
} }
if (parent != last.getParent()) { if (parent != last.getParent()) {
// parent is changed // parent is changed
inodeMap.put(last.getParent()); addToInodeMapUnprotected(last.getParent());
iip.setINode(-2, last.getParent()); iip.setINode(-2, last.getParent());
} }
@ -2278,7 +2278,9 @@ public class FSDirectory implements Closeable {
/** This method is always called with writeLock held */ /** This method is always called with writeLock held */
final void addToInodeMapUnprotected(INode inode) { final void addToInodeMapUnprotected(INode inode) {
inodeMap.put(inode); if (inode instanceof INodeWithAdditionalFields) {
inodeMap.put((INodeWithAdditionalFields)inode);
}
} }
/* This method is always called with writeLock held */ /* This method is always called with writeLock held */

View File

@ -560,9 +560,7 @@ public class FSImageFormat {
final byte[] localName = FSImageSerialization.readLocalName(in); final byte[] localName = FSImageSerialization.readLocalName(in);
INode inode = loadINode(localName, isSnapshotINode, in); INode inode = loadINode(localName, isSnapshotINode, in);
if (LayoutVersion.supports(Feature.ADD_INODE_ID, getLayoutVersion())) { if (LayoutVersion.supports(Feature.ADD_INODE_ID, getLayoutVersion())) {
if (!inode.isReference()) { // reference node does not have its id namesystem.dir.addToInodeMapUnprotected(inode);
namesystem.dir.addToInodeMapUnprotected(inode);
}
} }
return inode; return inode;
} }

View File

@ -42,6 +42,15 @@ import org.apache.hadoop.security.UserGroupInformation;
*/ */
class FSPermissionChecker { class FSPermissionChecker {
static final Log LOG = LogFactory.getLog(UserGroupInformation.class); static final Log LOG = LogFactory.getLog(UserGroupInformation.class);
/** @return a string for throwing {@link AccessControlException} */
private static String toAccessControlString(INode inode) {
return "\"" + inode.getFullPathName() + "\":"
+ inode.getUserName() + ":" + inode.getGroupName()
+ ":" + (inode.isDirectory()? "d": "-") + inode.getFsPermission();
}
private final UserGroupInformation ugi; private final UserGroupInformation ugi;
private final String user; private final String user;
/** A set with group namess. Not synchronized since it is unmodifiable */ /** A set with group namess. Not synchronized since it is unmodifiable */
@ -224,7 +233,7 @@ class FSPermissionChecker {
if (mode.getOtherAction().implies(access)) { return; } if (mode.getOtherAction().implies(access)) { return; }
} }
throw new AccessControlException("Permission denied: user=" + user throw new AccessControlException("Permission denied: user=" + user
+ ", access=" + access + ", inode=" + inode.getFullPathName()); + ", access=" + access + ", inode=" + toAccessControlString(inode));
} }
/** Guarded by {@link FSNamesystem#readLock()} */ /** Guarded by {@link FSNamesystem#readLock()} */

View File

@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.Diff; import org.apache.hadoop.hdfs.util.Diff;
import org.apache.hadoop.hdfs.util.LightWeightGSet.LinkedElement;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
@ -52,7 +51,7 @@ import com.google.common.base.Preconditions;
* directory inodes. * directory inodes.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public abstract class INode implements Diff.Element<byte[]>, LinkedElement { public abstract class INode implements Diff.Element<byte[]> {
public static final Log LOG = LogFactory.getLog(INode.class); public static final Log LOG = LogFactory.getLog(INode.class);
/** parent is either an {@link INodeDirectory} or an {@link INodeReference}.*/ /** parent is either an {@link INodeDirectory} or an {@link INodeReference}.*/
@ -110,7 +109,6 @@ public abstract class INode implements Diff.Element<byte[]>, LinkedElement {
* @return group name * @return group name
*/ */
abstract String getGroupName(Snapshot snapshot); abstract String getGroupName(Snapshot snapshot);
protected LinkedElement next = null;
/** The same as getGroupName(null). */ /** The same as getGroupName(null). */
public final String getGroupName() { public final String getGroupName() {
@ -742,14 +740,4 @@ public abstract class INode implements Diff.Element<byte[]>, LinkedElement {
toDeleteList.clear(); toDeleteList.clear();
} }
} }
@Override
public void setNext(LinkedElement next) {
this.next = next;
}
@Override
public LinkedElement getNext() {
return next;
}
} }

View File

@ -22,6 +22,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.LightWeightGSet.LinkedElement;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@ -30,7 +31,8 @@ import com.google.common.base.Preconditions;
* access time and modification time. * access time and modification time.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public abstract class INodeWithAdditionalFields extends INode { public abstract class INodeWithAdditionalFields extends INode
implements LinkedElement {
private static enum PermissionStatusFormat { private static enum PermissionStatusFormat {
MODE(0, 16), MODE(0, 16),
GROUP(MODE.OFFSET + MODE.LENGTH, 25), GROUP(MODE.OFFSET + MODE.LENGTH, 25),
@ -91,6 +93,9 @@ public abstract class INodeWithAdditionalFields extends INode {
/** The last access time*/ /** The last access time*/
private long accessTime = 0L; private long accessTime = 0L;
/** For implementing {@link LinkedElement}. */
private LinkedElement next = null;
private INodeWithAdditionalFields(INode parent, long id, byte[] name, private INodeWithAdditionalFields(INode parent, long id, byte[] name,
long permission, long modificationTime, long accessTime) { long permission, long modificationTime, long accessTime) {
super(parent); super(parent);
@ -114,6 +119,16 @@ public abstract class INodeWithAdditionalFields extends INode {
other.permission, other.modificationTime, other.accessTime); other.permission, other.modificationTime, other.accessTime);
} }
@Override
public void setNext(LinkedElement next) {
this.next = next;
}
@Override
public LinkedElement getNext() {
return next;
}
/** Get inode id */ /** Get inode id */
public final long getId() { public final long getId() {
return this.id; return this.id;