HDFS-5618. NameNode: persist ACLs in fsimage. Contributed by Haohui Mai.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4685@1553221 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Chris Nauroth 2013-12-23 21:45:03 +00:00
parent 72b0b1881f
commit 25e9d2e895
6 changed files with 68 additions and 11 deletions

View File

@ -16,6 +16,8 @@ HDFS-4685 (Unreleased)
HDFS-5685. Implement ACL as a INode feature. (Haohui Mai via cnauroth)
HDFS-5618. NameNode: persist ACLs in fsimage. (Haohui Mai via cnauroth)
OPTIMIZATIONS
BUG FIXES

View File

@ -109,7 +109,8 @@ public static enum Feature {
+ "enable rebuilding retry cache in case of HA failover"),
CACHING(-48, "Support for cache pools and path-based caching"),
ADD_DATANODE_AND_STORAGE_UUIDS(-49, "Replace StorageID with DatanodeUuid."
+ " Use distinct StorageUuid per storage directory.");
+ " Use distinct StorageUuid per storage directory."),
EXTENDED_ACL(-50, "Extended ACL");
final int lv;

View File

@ -48,6 +48,8 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclFsImageProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@ -102,6 +104,7 @@
* } when {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is not supported
* replicationFactor: short, modificationTime: long,
* accessTime: long, preferredBlockSize: long,
*
* numberOfBlocks: int (-1 for INodeDirectory, -2 for INodeSymLink),
* {
* nsQuota: long, dsQuota: long,
@ -109,7 +112,11 @@
* isINodeSnapshottable: byte,
* isINodeWithSnapshot: byte (if isINodeSnapshottable is false)
* } (when {@link Feature#SNAPSHOT} is supported),
* fsPermission: short, PermissionStatus
* fsPermission: short, PermissionStatus,
* AclEntries {
* size: int,
* protobuf encoding of {@link AclFsImageProto}
* }(when {@link Feature#EXTENDED_ACL} is supported),
* } for INodeDirectory
* or
* {
@ -124,9 +131,12 @@
* {clientName: short + byte[], clientMachine: short + byte[]} (when
* isINodeFileUnderConstructionSnapshot is true),
* } (when {@link Feature#SNAPSHOT} is supported and writing snapshotINode),
* fsPermission: short, PermissionStatus
* } for INodeFile
* }
* fsPermission: short, PermissionStatus,
* AclEntries {
* size: int,
* protobuf encoding of {@link AclFsImageProto}
* }(when {@link Feature#EXTENDED_ACL} is supported),
* } for INodeFile,
*
* INodeDirectoryInfo {
* fullPath of the directory: short + byte[],
@ -696,10 +706,15 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode,
modificationTime, atime, blocks, replication, blockSize);
if (underConstruction) {
file.toUnderConstruction(clientName, clientMachine, null);
return fileDiffs == null ? file : new INodeFile(file, fileDiffs);
} else {
return fileDiffs == null ? file : new INodeFile(file, fileDiffs);
}
AclFeature aclFeature = loadAclFeature(in, imgVersion);
if (aclFeature != null) {
file.addAclFeature(aclFeature);
}
return fileDiffs == null ? file : new INodeFile(file, fileDiffs);
} else if (numBlocks == -1) {
//directory
@ -731,6 +746,12 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode,
if (nsQuota >= 0 || dsQuota >= 0) {
dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota);
}
AclFeature aclFeature = loadAclFeature(in, imgVersion);
if (aclFeature != null) {
dir.addAclFeature(aclFeature);
}
if (withSnapshot) {
dir.addSnapshotFeature(null);
}
@ -771,6 +792,18 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode,
throw new IOException("Unknown inode type: numBlocks=" + numBlocks);
}
private AclFeature loadAclFeature(DataInput in, final int imgVersion)
throws IOException {
AclFeature aclFeature = null;
if (LayoutVersion.supports(Feature.EXTENDED_ACL, imgVersion)) {
AclFsImageProto p = AclFsImageProto
.parseDelimitedFrom((DataInputStream) in);
aclFeature = new AclFeature();
aclFeature.setEntries(PBHelper.convertAclEntry(p.getEntriesList()));
}
return aclFeature;
}
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
throws IOException {

View File

@ -21,6 +21,7 @@
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -34,6 +35,8 @@
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclFsImageProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
@ -204,6 +207,7 @@ public static void writeINodeFile(INodeFile file, DataOutput out,
}
writePermissionStatus(file, out);
writeAclFeature(file, out);
}
/** Serialize an {@link INodeFileAttributes}. */
@ -249,8 +253,9 @@ public static void writeINodeDirectory(INodeDirectory node, DataOutput out)
}
writePermissionStatus(node, out);
writeAclFeature(node, out);
}
/**
* Serialize a {@link INodeDirectory}
* @param a The node to write
@ -282,7 +287,19 @@ private static void writeINodeSymlink(INodeSymlink node, DataOutput out)
Text.writeString(out, node.getSymlinkString());
writePermissionStatus(node, out);
}
private static void writeAclFeature(INodeWithAdditionalFields node,
DataOutput out) throws IOException {
AclFsImageProto.Builder b = AclFsImageProto.newBuilder();
OutputStream os = (OutputStream) out;
AclFeature feature = node.getAclFeature();
if (feature != null)
b.addAllEntries(PBHelper.convertAclEntryProto(feature.getEntries()));
b.build().writeDelimitedTo(os);
}
/** Serialize a {@link INodeReference} node */
private static void writeINodeReference(INodeReference ref, DataOutput out,
boolean writeUnderConstruction, ReferenceMap referenceMap

View File

@ -126,7 +126,7 @@ class ImageLoaderCurrent implements ImageLoader {
new SimpleDateFormat("yyyy-MM-dd HH:mm");
private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
-24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
-40, -41, -42, -43, -44, -45, -46, -47, -48, -49 };
-40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50 };
private int imageVersion = 0;
private final Map<Long, Boolean> subtreeMap = new HashMap<Long, Boolean>();

View File

@ -60,6 +60,10 @@ message AclStatusProto {
repeated AclEntryProto entries = 4;
}
message AclFsImageProto {
repeated AclEntryProto entries = 1;
}
message ModifyAclEntriesRequestProto {
required string src = 1;
repeated AclEntryProto aclSpec = 2;