diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4685.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4685.txt index a49e85ef126..5e2e55050fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4685.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4685.txt @@ -16,6 +16,8 @@ HDFS-4685 (Unreleased) HDFS-5685. Implement ACL as a INode feature. (Haohui Mai via cnauroth) + HDFS-5618. NameNode: persist ACLs in fsimage. (Haohui Mai via cnauroth) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java index 094e4793cf2..9cd670a6475 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java @@ -109,7 +109,8 @@ public static enum Feature { + "enable rebuilding retry cache in case of HA failover"), CACHING(-48, "Support for cache pools and path-based caching"), ADD_DATANODE_AND_STORAGE_UUIDS(-49, "Replace StorageID with DatanodeUuid." - + " Use distinct StorageUuid per storage directory."); + + " Use distinct StorageUuid per storage directory."), + EXTENDED_ACL(-50, "Extended ACL"); final int lv; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index fe2929571bc..9aef7a6f0d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -48,6 +48,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclFsImageProto; +import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; @@ -102,6 +104,7 @@ * } when {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is not supported * replicationFactor: short, modificationTime: long, * accessTime: long, preferredBlockSize: long, + * * numberOfBlocks: int (-1 for INodeDirectory, -2 for INodeSymLink), * { * nsQuota: long, dsQuota: long, @@ -109,7 +112,11 @@ * isINodeSnapshottable: byte, * isINodeWithSnapshot: byte (if isINodeSnapshottable is false) * } (when {@link Feature#SNAPSHOT} is supported), - * fsPermission: short, PermissionStatus + * fsPermission: short, PermissionStatus, + * AclEntries { + * size: int, + * protobuf encoding of {@link AclFsImageProto} + * }(when {@link Feature#EXTENDED_ACL} is supported), * } for INodeDirectory * or * { @@ -124,9 +131,12 @@ * {clientName: short + byte[], clientMachine: short + byte[]} (when * isINodeFileUnderConstructionSnapshot is true), * } (when {@link Feature#SNAPSHOT} is supported and writing snapshotINode), - * fsPermission: short, PermissionStatus - * } for INodeFile - * } + * fsPermission: short, PermissionStatus, + * AclEntries { + * size: int, + * protobuf encoding of {@link AclFsImageProto} + * }(when {@link Feature#EXTENDED_ACL} is supported), + * } for INodeFile, * * INodeDirectoryInfo { * fullPath of the directory: short + byte[], @@ -696,10 +706,15 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode, modificationTime, atime, blocks, replication, blockSize); if (underConstruction) { file.toUnderConstruction(clientName, clientMachine, null); - return fileDiffs == null ? file : new INodeFile(file, fileDiffs); - } else { - return fileDiffs == null ? file : new INodeFile(file, fileDiffs); } + + AclFeature aclFeature = loadAclFeature(in, imgVersion); + if (aclFeature != null) { + file.addAclFeature(aclFeature); + } + + return fileDiffs == null ? file : new INodeFile(file, fileDiffs); + } else if (numBlocks == -1) { //directory @@ -731,6 +746,12 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode, if (nsQuota >= 0 || dsQuota >= 0) { dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota); } + + AclFeature aclFeature = loadAclFeature(in, imgVersion); + if (aclFeature != null) { + dir.addAclFeature(aclFeature); + } + if (withSnapshot) { dir.addSnapshotFeature(null); } @@ -771,6 +792,18 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode, throw new IOException("Unknown inode type: numBlocks=" + numBlocks); } + private AclFeature loadAclFeature(DataInput in, final int imgVersion) + throws IOException { + AclFeature aclFeature = null; + if (LayoutVersion.supports(Feature.EXTENDED_ACL, imgVersion)) { + AclFsImageProto p = AclFsImageProto + .parseDelimitedFrom((DataInputStream) in); + aclFeature = new AclFeature(); + aclFeature.setEntries(PBHelper.convertAclEntry(p.getEntriesList())); + } + return aclFeature; + } + /** Load {@link INodeFileAttributes}. */ public INodeFileAttributes loadINodeFileAttributes(DataInput in) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index 9d3fbcb6f7e..0ab480149be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -21,6 +21,7 @@ import java.io.DataOutput; import java.io.DataOutputStream; import java.io.IOException; +import java.io.OutputStream; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -34,6 +35,8 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; +import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclFsImageProto; +import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; @@ -204,6 +207,7 @@ public static void writeINodeFile(INodeFile file, DataOutput out, } writePermissionStatus(file, out); + writeAclFeature(file, out); } /** Serialize an {@link INodeFileAttributes}. */ @@ -249,8 +253,9 @@ public static void writeINodeDirectory(INodeDirectory node, DataOutput out) } writePermissionStatus(node, out); + writeAclFeature(node, out); } - + /** * Serialize a {@link INodeDirectory} * @param a The node to write @@ -282,7 +287,19 @@ private static void writeINodeSymlink(INodeSymlink node, DataOutput out) Text.writeString(out, node.getSymlinkString()); writePermissionStatus(node, out); } - + + private static void writeAclFeature(INodeWithAdditionalFields node, + DataOutput out) throws IOException { + AclFsImageProto.Builder b = AclFsImageProto.newBuilder(); + OutputStream os = (OutputStream) out; + + AclFeature feature = node.getAclFeature(); + if (feature != null) + b.addAllEntries(PBHelper.convertAclEntryProto(feature.getEntries())); + + b.build().writeDelimitedTo(os); + } + /** Serialize a {@link INodeReference} node */ private static void writeINodeReference(INodeReference ref, DataOutput out, boolean writeUnderConstruction, ReferenceMap referenceMap diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java index c851cb59a1a..8415c6bc30d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java @@ -126,7 +126,7 @@ class ImageLoaderCurrent implements ImageLoader { new SimpleDateFormat("yyyy-MM-dd HH:mm"); private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, - -40, -41, -42, -43, -44, -45, -46, -47, -48, -49 }; + -40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50 }; private int imageVersion = 0; private final Map subtreeMap = new HashMap(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto index 0d01dd9e634..7c7b2d2e00b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto @@ -60,6 +60,10 @@ message AclStatusProto { repeated AclEntryProto entries = 4; } +message AclFsImageProto { + repeated AclEntryProto entries = 1; +} + message ModifyAclEntriesRequestProto { required string src = 1; repeated AclEntryProto aclSpec = 2;