HDFS-5618. NameNode: persist ACLs in fsimage. Contributed by Haohui Mai.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4685@1553221 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
72b0b1881f
commit
25e9d2e895
|
@ -16,6 +16,8 @@ HDFS-4685 (Unreleased)
|
||||||
|
|
||||||
HDFS-5685. Implement ACL as a INode feature. (Haohui Mai via cnauroth)
|
HDFS-5685. Implement ACL as a INode feature. (Haohui Mai via cnauroth)
|
||||||
|
|
||||||
|
HDFS-5618. NameNode: persist ACLs in fsimage. (Haohui Mai via cnauroth)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
|
@ -109,7 +109,8 @@ public class LayoutVersion {
|
||||||
+ "enable rebuilding retry cache in case of HA failover"),
|
+ "enable rebuilding retry cache in case of HA failover"),
|
||||||
CACHING(-48, "Support for cache pools and path-based caching"),
|
CACHING(-48, "Support for cache pools and path-based caching"),
|
||||||
ADD_DATANODE_AND_STORAGE_UUIDS(-49, "Replace StorageID with DatanodeUuid."
|
ADD_DATANODE_AND_STORAGE_UUIDS(-49, "Replace StorageID with DatanodeUuid."
|
||||||
+ " Use distinct StorageUuid per storage directory.");
|
+ " Use distinct StorageUuid per storage directory."),
|
||||||
|
EXTENDED_ACL(-50, "Extended ACL");
|
||||||
|
|
||||||
|
|
||||||
final int lv;
|
final int lv;
|
||||||
|
|
|
@ -48,6 +48,8 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclFsImageProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
|
@ -102,6 +104,7 @@ import org.apache.hadoop.io.Text;
|
||||||
* } when {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is not supported
|
* } when {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is not supported
|
||||||
* replicationFactor: short, modificationTime: long,
|
* replicationFactor: short, modificationTime: long,
|
||||||
* accessTime: long, preferredBlockSize: long,
|
* accessTime: long, preferredBlockSize: long,
|
||||||
|
*
|
||||||
* numberOfBlocks: int (-1 for INodeDirectory, -2 for INodeSymLink),
|
* numberOfBlocks: int (-1 for INodeDirectory, -2 for INodeSymLink),
|
||||||
* {
|
* {
|
||||||
* nsQuota: long, dsQuota: long,
|
* nsQuota: long, dsQuota: long,
|
||||||
|
@ -109,7 +112,11 @@ import org.apache.hadoop.io.Text;
|
||||||
* isINodeSnapshottable: byte,
|
* isINodeSnapshottable: byte,
|
||||||
* isINodeWithSnapshot: byte (if isINodeSnapshottable is false)
|
* isINodeWithSnapshot: byte (if isINodeSnapshottable is false)
|
||||||
* } (when {@link Feature#SNAPSHOT} is supported),
|
* } (when {@link Feature#SNAPSHOT} is supported),
|
||||||
* fsPermission: short, PermissionStatus
|
* fsPermission: short, PermissionStatus,
|
||||||
|
* AclEntries {
|
||||||
|
* size: int,
|
||||||
|
* protobuf encoding of {@link AclFsImageProto}
|
||||||
|
* }(when {@link Feature#EXTENDED_ACL} is supported),
|
||||||
* } for INodeDirectory
|
* } for INodeDirectory
|
||||||
* or
|
* or
|
||||||
* {
|
* {
|
||||||
|
@ -124,9 +131,12 @@ import org.apache.hadoop.io.Text;
|
||||||
* {clientName: short + byte[], clientMachine: short + byte[]} (when
|
* {clientName: short + byte[], clientMachine: short + byte[]} (when
|
||||||
* isINodeFileUnderConstructionSnapshot is true),
|
* isINodeFileUnderConstructionSnapshot is true),
|
||||||
* } (when {@link Feature#SNAPSHOT} is supported and writing snapshotINode),
|
* } (when {@link Feature#SNAPSHOT} is supported and writing snapshotINode),
|
||||||
* fsPermission: short, PermissionStatus
|
* fsPermission: short, PermissionStatus,
|
||||||
* } for INodeFile
|
* AclEntries {
|
||||||
* }
|
* size: int,
|
||||||
|
* protobuf encoding of {@link AclFsImageProto}
|
||||||
|
* }(when {@link Feature#EXTENDED_ACL} is supported),
|
||||||
|
* } for INodeFile,
|
||||||
*
|
*
|
||||||
* INodeDirectoryInfo {
|
* INodeDirectoryInfo {
|
||||||
* fullPath of the directory: short + byte[],
|
* fullPath of the directory: short + byte[],
|
||||||
|
@ -696,10 +706,15 @@ public class FSImageFormat {
|
||||||
modificationTime, atime, blocks, replication, blockSize);
|
modificationTime, atime, blocks, replication, blockSize);
|
||||||
if (underConstruction) {
|
if (underConstruction) {
|
||||||
file.toUnderConstruction(clientName, clientMachine, null);
|
file.toUnderConstruction(clientName, clientMachine, null);
|
||||||
return fileDiffs == null ? file : new INodeFile(file, fileDiffs);
|
|
||||||
} else {
|
|
||||||
return fileDiffs == null ? file : new INodeFile(file, fileDiffs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
AclFeature aclFeature = loadAclFeature(in, imgVersion);
|
||||||
|
if (aclFeature != null) {
|
||||||
|
file.addAclFeature(aclFeature);
|
||||||
|
}
|
||||||
|
|
||||||
|
return fileDiffs == null ? file : new INodeFile(file, fileDiffs);
|
||||||
|
|
||||||
} else if (numBlocks == -1) {
|
} else if (numBlocks == -1) {
|
||||||
//directory
|
//directory
|
||||||
|
|
||||||
|
@ -731,6 +746,12 @@ public class FSImageFormat {
|
||||||
if (nsQuota >= 0 || dsQuota >= 0) {
|
if (nsQuota >= 0 || dsQuota >= 0) {
|
||||||
dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota);
|
dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
AclFeature aclFeature = loadAclFeature(in, imgVersion);
|
||||||
|
if (aclFeature != null) {
|
||||||
|
dir.addAclFeature(aclFeature);
|
||||||
|
}
|
||||||
|
|
||||||
if (withSnapshot) {
|
if (withSnapshot) {
|
||||||
dir.addSnapshotFeature(null);
|
dir.addSnapshotFeature(null);
|
||||||
}
|
}
|
||||||
|
@ -771,6 +792,18 @@ public class FSImageFormat {
|
||||||
throw new IOException("Unknown inode type: numBlocks=" + numBlocks);
|
throw new IOException("Unknown inode type: numBlocks=" + numBlocks);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private AclFeature loadAclFeature(DataInput in, final int imgVersion)
|
||||||
|
throws IOException {
|
||||||
|
AclFeature aclFeature = null;
|
||||||
|
if (LayoutVersion.supports(Feature.EXTENDED_ACL, imgVersion)) {
|
||||||
|
AclFsImageProto p = AclFsImageProto
|
||||||
|
.parseDelimitedFrom((DataInputStream) in);
|
||||||
|
aclFeature = new AclFeature();
|
||||||
|
aclFeature.setEntries(PBHelper.convertAclEntry(p.getEntriesList()));
|
||||||
|
}
|
||||||
|
return aclFeature;
|
||||||
|
}
|
||||||
|
|
||||||
/** Load {@link INodeFileAttributes}. */
|
/** Load {@link INodeFileAttributes}. */
|
||||||
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
|
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
|
@ -21,6 +21,7 @@ import java.io.DataInput;
|
||||||
import java.io.DataOutput;
|
import java.io.DataOutput;
|
||||||
import java.io.DataOutputStream;
|
import java.io.DataOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.OutputStream;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
@ -34,6 +35,8 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclFsImageProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
|
@ -204,6 +207,7 @@ public class FSImageSerialization {
|
||||||
}
|
}
|
||||||
|
|
||||||
writePermissionStatus(file, out);
|
writePermissionStatus(file, out);
|
||||||
|
writeAclFeature(file, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Serialize an {@link INodeFileAttributes}. */
|
/** Serialize an {@link INodeFileAttributes}. */
|
||||||
|
@ -249,6 +253,7 @@ public class FSImageSerialization {
|
||||||
}
|
}
|
||||||
|
|
||||||
writePermissionStatus(node, out);
|
writePermissionStatus(node, out);
|
||||||
|
writeAclFeature(node, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -283,6 +288,18 @@ public class FSImageSerialization {
|
||||||
writePermissionStatus(node, out);
|
writePermissionStatus(node, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static void writeAclFeature(INodeWithAdditionalFields node,
|
||||||
|
DataOutput out) throws IOException {
|
||||||
|
AclFsImageProto.Builder b = AclFsImageProto.newBuilder();
|
||||||
|
OutputStream os = (OutputStream) out;
|
||||||
|
|
||||||
|
AclFeature feature = node.getAclFeature();
|
||||||
|
if (feature != null)
|
||||||
|
b.addAllEntries(PBHelper.convertAclEntryProto(feature.getEntries()));
|
||||||
|
|
||||||
|
b.build().writeDelimitedTo(os);
|
||||||
|
}
|
||||||
|
|
||||||
/** Serialize a {@link INodeReference} node */
|
/** Serialize a {@link INodeReference} node */
|
||||||
private static void writeINodeReference(INodeReference ref, DataOutput out,
|
private static void writeINodeReference(INodeReference ref, DataOutput out,
|
||||||
boolean writeUnderConstruction, ReferenceMap referenceMap
|
boolean writeUnderConstruction, ReferenceMap referenceMap
|
||||||
|
|
|
@ -126,7 +126,7 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
new SimpleDateFormat("yyyy-MM-dd HH:mm");
|
new SimpleDateFormat("yyyy-MM-dd HH:mm");
|
||||||
private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
|
private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
|
||||||
-24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
|
-24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
|
||||||
-40, -41, -42, -43, -44, -45, -46, -47, -48, -49 };
|
-40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50 };
|
||||||
private int imageVersion = 0;
|
private int imageVersion = 0;
|
||||||
|
|
||||||
private final Map<Long, Boolean> subtreeMap = new HashMap<Long, Boolean>();
|
private final Map<Long, Boolean> subtreeMap = new HashMap<Long, Boolean>();
|
||||||
|
|
|
@ -60,6 +60,10 @@ message AclStatusProto {
|
||||||
repeated AclEntryProto entries = 4;
|
repeated AclEntryProto entries = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message AclFsImageProto {
|
||||||
|
repeated AclEntryProto entries = 1;
|
||||||
|
}
|
||||||
|
|
||||||
message ModifyAclEntriesRequestProto {
|
message ModifyAclEntriesRequestProto {
|
||||||
required string src = 1;
|
required string src = 1;
|
||||||
repeated AclEntryProto aclSpec = 2;
|
repeated AclEntryProto aclSpec = 2;
|
||||||
|
|
Loading…
Reference in New Issue