diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index cd167c8a3d5..e533f427bbf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -237,6 +237,20 @@ Trunk (Unreleased) HDFS-3934. duplicative dfs_hosts entries handled wrong. (Colin Patrick McCabe) +Release 2.2.0 - UNRELEASED + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + HDFS-4908. Reduce snapshot inode memory usage. (szetszwo) + + OPTIMIZATIONS + + BUG FIXES + Release 2.1.0-beta - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java index 58667060c54..95f6282e31e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java @@ -98,7 +98,13 @@ public class LayoutVersion { "add OP_UPDATE_BLOCKS"), RESERVED_REL1_2_0(-41, -32, "Reserved for release 1.2.0", true, CONCAT), ADD_INODE_ID(-42, -40, "Assign a unique inode id for each inode", false), - SNAPSHOT(-43, "Support for snapshot feature"); + SNAPSHOT(-43, "Support for snapshot feature"), + RESERVED_REL1_3_0(-44, -41, + "Reserved for release 1.3.0", true, ADD_INODE_ID, SNAPSHOT), + OPTIMIZE_SNAPSHOT_INODES(-45, -43, + "Reduce snapshot inode memory footprint", false); + + final int lv; final int ancestorLV; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 1421ba21ffe..a3d02380236 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -694,6 +694,50 @@ public class FSImageFormat { throw new IOException("Unknown inode type: numBlocks=" + numBlocks); } + /** Load {@link INodeFileAttributes}. */ + public INodeFileAttributes loadINodeFileAttributes(DataInput in) + throws IOException { + final int layoutVersion = getLayoutVersion(); + + if (!LayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) { + return loadINodeWithLocalName(true, in, false).asFile(); + } + + final byte[] name = FSImageSerialization.readLocalName(in); + final PermissionStatus permissions = PermissionStatus.read(in); + final long modificationTime = in.readLong(); + final long accessTime = in.readLong(); + + final short replication = namesystem.getBlockManager().adjustReplication( + in.readShort()); + final long preferredBlockSize = in.readLong(); + + return new INodeFileAttributes.SnapshotCopy(name, permissions, modificationTime, + accessTime, replication, preferredBlockSize); + } + + public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in) + throws IOException { + final int layoutVersion = getLayoutVersion(); + + if (!LayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) { + return loadINodeWithLocalName(true, in, false).asDirectory(); + } + + final byte[] name = FSImageSerialization.readLocalName(in); + final PermissionStatus permissions = PermissionStatus.read(in); + final long modificationTime = in.readLong(); + + //read quotas + final long nsQuota = in.readLong(); + final long dsQuota = in.readLong(); + + return nsQuota == -1L && dsQuota == -1L? + new INodeDirectoryAttributes.SnapshotCopy(name, permissions, modificationTime) + : new INodeDirectoryAttributes.CopyWithQuota(name, permissions, + modificationTime, nsQuota, dsQuota); + } + private void loadFilesUnderConstruction(DataInput in, boolean supportSnapshot) throws IOException { FSDirectory fsDir = namesystem.dir; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index 63800f4803a..366bb18255b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -84,7 +84,7 @@ public class FSImageSerialization { final FsPermission FILE_PERM = new FsPermission((short) 0); } - private static void writePermissionStatus(INodeWithAdditionalFields inode, + private static void writePermissionStatus(INodeAttributes inode, DataOutput out) throws IOException { final FsPermission p = TL_DATA.get().FILE_PERM; p.fromShort(inode.getFsPermissionShort()); @@ -205,6 +205,18 @@ public class FSImageSerialization { writePermissionStatus(file, out); } + /** Serialize an {@link INodeFileAttributes}. */ + public static void writeINodeFileAttributes(INodeFileAttributes file, + DataOutput out) throws IOException { + writeLocalName(file, out); + writePermissionStatus(file, out); + out.writeLong(file.getModificationTime()); + out.writeLong(file.getAccessTime()); + + out.writeShort(file.getFileReplication()); + out.writeLong(file.getPreferredBlockSize()); + } + /** * Serialize a {@link INodeDirectory} * @param node The node to write @@ -232,6 +244,21 @@ public class FSImageSerialization { writePermissionStatus(node, out); } + /** + * Serialize a {@link INodeDirectory} + * @param a The node to write + * @param out The {@link DataOutput} where the fields are written + */ + public static void writeINodeDirectoryAttributes( + INodeDirectoryAttributes a, DataOutput out) throws IOException { + writeLocalName(a, out); + writePermissionStatus(a, out); + out.writeLong(a.getModificationTime()); + + out.writeLong(a.getNsQuota()); + out.writeLong(a.getDsQuota()); + } + /** * Serialize a {@link INodeSymlink} node * @param node The node to write @@ -384,7 +411,7 @@ public class FSImageSerialization { return createdNodeName; } - private static void writeLocalName(INode inode, DataOutput out) + private static void writeLocalName(INodeAttributes inode, DataOutput out) throws IOException { final byte[] name = inode.getLocalNameBytes(); out.writeShort(name.length); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index fd48d6b5847..db9069b8aa6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -179,8 +179,6 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HAState; import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer; import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SnapshotDiffInfo; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 6c6531c3f2b..94ad7a8479c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -50,7 +50,7 @@ import com.google.common.base.Preconditions; * directory inodes. */ @InterfaceAudience.Private -public abstract class INode implements Diff.Element { +public abstract class INode implements INodeAttributes, Diff.Element { public static final Log LOG = LogFactory.getLog(INode.class); /** parent is either an {@link INodeDirectory} or an {@link INodeReference}.*/ @@ -87,6 +87,7 @@ public abstract class INode implements Diff.Element { abstract String getUserName(Snapshot snapshot); /** The same as getUserName(null). */ + @Override public final String getUserName() { return getUserName(null); } @@ -110,6 +111,7 @@ public abstract class INode implements Diff.Element { abstract String getGroupName(Snapshot snapshot); /** The same as getGroupName(null). */ + @Override public final String getGroupName() { return getGroupName(null); } @@ -134,6 +136,7 @@ public abstract class INode implements Diff.Element { abstract FsPermission getFsPermission(Snapshot snapshot); /** The same as getFsPermission(null). */ + @Override public final FsPermission getFsPermission() { return getFsPermission(null); } @@ -153,7 +156,7 @@ public abstract class INode implements Diff.Element { * @return if the given snapshot is null, return this; * otherwise return the corresponding snapshot inode. */ - public INode getSnapshotINode(final Snapshot snapshot) { + public INodeAttributes getSnapshotINode(final Snapshot snapshot) { return this; } @@ -464,12 +467,6 @@ public abstract class INode implements Diff.Element { return name == null? null: DFSUtil.bytes2String(name); } - /** - * @return null if the local name is null; - * otherwise, return the local name byte array. - */ - public abstract byte[] getLocalNameBytes(); - @Override public final byte[] getKey() { return getLocalNameBytes(); @@ -555,6 +552,7 @@ public abstract class INode implements Diff.Element { abstract long getModificationTime(Snapshot snapshot); /** The same as getModificationTime(null). */ + @Override public final long getModificationTime() { return getModificationTime(null); } @@ -583,6 +581,7 @@ public abstract class INode implements Diff.Element { abstract long getAccessTime(Snapshot snapshot); /** The same as getAccessTime(null). */ + @Override public final long getAccessTime() { return getAccessTime(null); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java new file mode 100644 index 00000000000..90ee39f5da3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.server.namenode.INodeWithAdditionalFields.PermissionStatusFormat; + +/** + * The attributes of an inode. + */ +@InterfaceAudience.Private +public interface INodeAttributes { + /** + * @return null if the local name is null; + * otherwise, return the local name byte array. + */ + public byte[] getLocalNameBytes(); + + /** @return the user name. */ + public String getUserName(); + + /** @return the group name. */ + public String getGroupName(); + + /** @return the permission. */ + public FsPermission getFsPermission(); + + /** @return the permission as a short. */ + public short getFsPermissionShort(); + + /** @return the permission information as a long. */ + public long getPermissionLong(); + + /** @return the modification time. */ + public long getModificationTime(); + + /** @return the access time. */ + public long getAccessTime(); + + /** A read-only copy of the inode attributes. */ + public static abstract class SnapshotCopy implements INodeAttributes { + private final byte[] name; + private final long permission; + private final long modificationTime; + private final long accessTime; + + SnapshotCopy(byte[] name, PermissionStatus permissions, + long modificationTime, long accessTime) { + this.name = name; + this.permission = PermissionStatusFormat.toLong(permissions); + this.modificationTime = modificationTime; + this.accessTime = accessTime; + } + + SnapshotCopy(INode inode) { + this.name = inode.getLocalNameBytes(); + this.permission = inode.getPermissionLong(); + this.modificationTime = inode.getModificationTime(); + this.accessTime = inode.getAccessTime(); + } + + @Override + public final byte[] getLocalNameBytes() { + return name; + } + + @Override + public final String getUserName() { + final int n = (int)PermissionStatusFormat.USER.retrieve(permission); + return SerialNumberManager.INSTANCE.getUser(n); + } + + @Override + public final String getGroupName() { + final int n = (int)PermissionStatusFormat.GROUP.retrieve(permission); + return SerialNumberManager.INSTANCE.getGroup(n); + } + + @Override + public final FsPermission getFsPermission() { + return new FsPermission(getFsPermissionShort()); + } + + @Override + public final short getFsPermissionShort() { + return (short)PermissionStatusFormat.MODE.retrieve(permission); + } + + @Override + public long getPermissionLong() { + return permission; + } + + @Override + public final long getModificationTime() { + return modificationTime; + } + + @Override + public final long getAccessTime() { + return accessTime; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 7e72fa32286..f2e5a1207d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -45,7 +45,8 @@ import com.google.common.base.Preconditions; /** * Directory INode class. */ -public class INodeDirectory extends INodeWithAdditionalFields { +public class INodeDirectory extends INodeWithAdditionalFields + implements INodeDirectoryAttributes { /** Cast INode to INodeDirectory. */ public static INodeDirectory valueOf(INode inode, Object path ) throws FileNotFoundException, PathIsNotDirectoryException { @@ -558,12 +559,12 @@ public class INodeDirectory extends INodeWithAdditionalFields { /** * Compare the metadata with another INodeDirectory */ - public boolean metadataEquals(INodeDirectory other) { - return other != null && getNsQuota() == other.getNsQuota() + @Override + public boolean metadataEquals(INodeDirectoryAttributes other) { + return other != null + && getNsQuota() == other.getNsQuota() && getDsQuota() == other.getDsQuota() - && getUserName().equals(other.getUserName()) - && getGroupName().equals(other.getGroupName()) - && getFsPermission().equals(other.getFsPermission()); + && getPermissionLong() == other.getPermissionLong(); } /* diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java new file mode 100644 index 00000000000..5bff5cdaf51 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.permission.PermissionStatus; + +import com.google.common.base.Preconditions; + +/** + * The attributes of an inode. + */ +@InterfaceAudience.Private +public interface INodeDirectoryAttributes extends INodeAttributes { + public long getNsQuota(); + + public long getDsQuota(); + + public boolean metadataEquals(INodeDirectoryAttributes other); + + /** A copy of the inode directory attributes */ + public static class SnapshotCopy extends INodeAttributes.SnapshotCopy + implements INodeDirectoryAttributes { + public SnapshotCopy(byte[] name, PermissionStatus permissions, + long modificationTime) { + super(name, permissions, modificationTime, 0L); + } + + public SnapshotCopy(INodeDirectory dir) { + super(dir); + } + + @Override + public long getNsQuota() { + return -1; + } + + @Override + public long getDsQuota() { + return -1; + } + + @Override + public boolean metadataEquals(INodeDirectoryAttributes other) { + return other != null + && getNsQuota() == other.getNsQuota() + && getDsQuota() == other.getDsQuota() + && getPermissionLong() == other.getPermissionLong(); + } + } + + public static class CopyWithQuota extends INodeDirectoryAttributes.SnapshotCopy { + private final long nsQuota; + private final long dsQuota; + + public CopyWithQuota(byte[] name, PermissionStatus permissions, + long modificationTime, long nsQuota, long dsQuota) { + super(name, permissions, modificationTime); + this.nsQuota = nsQuota; + this.dsQuota = dsQuota; + } + + public CopyWithQuota(INodeDirectory dir) { + super(dir); + Preconditions.checkArgument(dir.isQuotaSet()); + this.nsQuota = dir.getNsQuota(); + this.dsQuota = dir.getDsQuota(); + } + + @Override + public final long getNsQuota() { + return nsQuota; + } + + @Override + public final long getDsQuota() { + return dsQuota; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 57f7fefecd8..455d808a37f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -41,7 +41,8 @@ import com.google.common.base.Preconditions; /** I-node for closed file. */ @InterfaceAudience.Private -public class INodeFile extends INodeWithAdditionalFields implements BlockCollection { +public class INodeFile extends INodeWithAdditionalFields + implements INodeFileAttributes, BlockCollection { /** The same as valueOf(inode, path, false). */ public static INodeFile valueOf(INode inode, String path ) throws FileNotFoundException { @@ -65,7 +66,7 @@ public class INodeFile extends INodeWithAdditionalFields implements BlockCollect } /** Format: [16 bits for replication][48 bits for PreferredBlockSize] */ - private static class HeaderFormat { + static class HeaderFormat { /** Number of bits for Block size */ static final int BLOCKBITS = 48; /** Header mask 64-bit representation */ @@ -146,7 +147,7 @@ public class INodeFile extends INodeWithAdditionalFields implements BlockCollect } @Override - public INodeFile getSnapshotINode(final Snapshot snapshot) { + public INodeFileAttributes getSnapshotINode(final Snapshot snapshot) { return this; } @@ -173,6 +174,7 @@ public class INodeFile extends INodeWithAdditionalFields implements BlockCollect } /** The same as getFileReplication(null). */ + @Override public final short getFileReplication() { return getFileReplication(null); } @@ -203,6 +205,11 @@ public class INodeFile extends INodeWithAdditionalFields implements BlockCollect return HeaderFormat.getPreferredBlockSize(header); } + @Override + public long getHeaderLong() { + return header; + } + /** @return the diskspace required for a full block. */ final long getBlockDiskspace() { return getPreferredBlockSize() * getBlockReplication(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java new file mode 100644 index 00000000000..e9e2e872063 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat; + +/** + * The attributes of a file. + */ +@InterfaceAudience.Private +public interface INodeFileAttributes extends INodeAttributes { + /** @return the file replication. */ + public short getFileReplication(); + + /** @return preferred block size in bytes */ + public long getPreferredBlockSize(); + + /** @return the header as a long. */ + public long getHeaderLong(); + + /** A copy of the inode file attributes */ + public static class SnapshotCopy extends INodeAttributes.SnapshotCopy + implements INodeFileAttributes { + private final long header; + + public SnapshotCopy(byte[] name, PermissionStatus permissions, + long modificationTime, long accessTime, + short replication, long preferredBlockSize) { + super(name, permissions, modificationTime, accessTime); + + final long h = HeaderFormat.combineReplication(0L, replication); + header = HeaderFormat.combinePreferredBlockSize(h, preferredBlockSize); + } + + public SnapshotCopy(INodeFile file) { + super(file); + this.header = file.getHeaderLong(); + } + + @Override + public short getFileReplication() { + return HeaderFormat.getReplication(header); + } + + @Override + public long getPreferredBlockSize() { + return HeaderFormat.getPreferredBlockSize(header); + } + + @Override + public long getHeaderLong() { + return header; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index 22d0a487e0a..fcc4b9e7fa2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -212,12 +212,21 @@ public abstract class INodeReference extends INode { public final FsPermission getFsPermission(Snapshot snapshot) { return referred.getFsPermission(snapshot); } + @Override + public final short getFsPermissionShort() { + return referred.getFsPermissionShort(); + } @Override void setPermission(FsPermission permission) { referred.setPermission(permission); } - + + @Override + public long getPermissionLong() { + return referred.getPermissionLong(); + } + @Override public final long getModificationTime(Snapshot snapshot) { return referred.getModificationTime(snapshot); @@ -280,7 +289,7 @@ public abstract class INodeReference extends INode { } @Override - public final INode getSnapshotINode(Snapshot snapshot) { + public final INodeAttributes getSnapshotINode(Snapshot snapshot) { return referred.getSnapshotINode(snapshot); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java index 99d30582221..2ea5f354ff3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java @@ -33,7 +33,7 @@ import com.google.common.base.Preconditions; @InterfaceAudience.Private public abstract class INodeWithAdditionalFields extends INode implements LinkedElement { - private static enum PermissionStatusFormat { + static enum PermissionStatusFormat { MODE(0, 16), GROUP(MODE.OFFSET + MODE.LENGTH, 25), USER(GROUP.OFFSET + GROUP.LENGTH, 23); @@ -197,11 +197,11 @@ public abstract class INodeWithAdditionalFields extends INode return getSnapshotINode(snapshot).getFsPermission(); } - return new FsPermission( - (short)PermissionStatusFormat.MODE.retrieve(permission)); + return new FsPermission(getFsPermissionShort()); } - final short getFsPermissionShort() { + @Override + public final short getFsPermissionShort() { return (short)PermissionStatusFormat.MODE.retrieve(permission); } @Override @@ -210,10 +210,15 @@ public abstract class INodeWithAdditionalFields extends INode updatePermissionStatus(PermissionStatusFormat.MODE, mode); } + @Override + public long getPermissionLong() { + return permission; + } + @Override final long getModificationTime(Snapshot snapshot) { if (snapshot != null) { - return getSnapshotINode(snapshot).getModificationTime(null); + return getSnapshotINode(snapshot).getModificationTime(); } return this.modificationTime; @@ -242,7 +247,7 @@ public abstract class INodeWithAdditionalFields extends INode @Override final long getAccessTime(Snapshot snapshot) { if (snapshot != null) { - return getSnapshotINode(snapshot).getAccessTime(null); + return getSnapshotINode(snapshot).getAccessTime(); } return accessTime; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java index e6a41509891..5ebb56050ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.hdfs.server.namenode.INode; +import org.apache.hadoop.hdfs.server.namenode.INodeAttributes; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.Quota; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; @@ -47,13 +48,14 @@ import com.google.common.base.Preconditions; * */ abstract class AbstractINodeDiff> + A extends INodeAttributes, + D extends AbstractINodeDiff> implements Comparable { /** The snapshot will be obtained after this diff is applied. */ Snapshot snapshot; /** The snapshot inode data. It is null when there is no change. */ - N snapshotINode; + A snapshotINode; /** * Posterior diff is the diff happened after this diff. * The posterior diff should be first applied to obtain the posterior @@ -62,7 +64,7 @@ abstract class AbstractINodeDiff d = this; ; d = d.posteriorDiff) { + for(AbstractINodeDiff d = this; ; d = d.posteriorDiff) { if (d.snapshotINode != null) { return d.snapshotINode; } else if (d.posteriorDiff == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java index 23ca4ea9e73..b032c5d2eb8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java @@ -25,6 +25,7 @@ import java.util.List; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.INode; +import org.apache.hadoop.hdfs.server.namenode.INodeAttributes; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.Quota; @@ -35,7 +36,8 @@ import org.apache.hadoop.hdfs.server.namenode.Quota; * @param The diff type, which must extend {@link AbstractINodeDiff}. */ abstract class AbstractINodeDiffList> + A extends INodeAttributes, + D extends AbstractINodeDiff> implements Iterable { /** Diff list sorted by snapshot IDs, i.e. in chronological order. */ private final List diffs = new ArrayList(); @@ -54,7 +56,7 @@ abstract class AbstractINodeDiffList 0) { - final AbstractINodeDiff previous = diffs.get(snapshotIndex - 1); + final AbstractINodeDiff previous = diffs.get(snapshotIndex - 1); if (!previous.getSnapshot().equals(prior)) { diffs.get(snapshotIndex).setSnapshot(prior); } else { @@ -106,9 +108,8 @@ abstract class AbstractINodeDiffList last = getLast(); + final AbstractINodeDiff last = getLast(); return last == null? null: last.getSnapshot(); } @@ -270,9 +271,9 @@ abstract class AbstractINodeDiffList { + public static class FileDiff extends AbstractINodeDiff { /** The file size at snapshot creation time. */ private final long fileSize; @@ -49,7 +49,7 @@ public interface FileWithSnapshot { } /** Constructor used by FSImage loading */ - FileDiff(Snapshot snapshot, INodeFile snapshotINode, + FileDiff(Snapshot snapshot, INodeFileAttributes snapshotINode, FileDiff posteriorDiff, long fileSize) { super(snapshot, snapshotINode, posteriorDiff); this.fileSize = fileSize; @@ -104,7 +104,7 @@ public interface FileWithSnapshot { // write snapshotINode if (snapshotINode != null) { out.writeBoolean(true); - FSImageSerialization.writeINodeFile(snapshotINode, out, true); + FSImageSerialization.writeINodeFileAttributes(snapshotINode, out); } else { out.writeBoolean(false); } @@ -120,7 +120,7 @@ public interface FileWithSnapshot { /** A list of FileDiffs for storing snapshot data. */ public static class FileDiffList - extends AbstractINodeDiffList { + extends AbstractINodeDiffList { @Override FileDiff createDiff(Snapshot snapshot, INodeFile file) { @@ -128,21 +128,8 @@ public interface FileWithSnapshot { } @Override - INodeFile createSnapshotCopy(INodeFile currentINode) { - if (currentINode instanceof INodeFileUnderConstructionWithSnapshot) { - final INodeFileUnderConstruction uc = - (INodeFileUnderConstruction) currentINode; - - final INodeFileUnderConstruction copy = new INodeFileUnderConstruction( - uc, uc.getClientName(), uc.getClientMachine(), uc.getClientNode()); - - copy.setBlocks(null); - return copy; - } else { - final INodeFile copy = new INodeFile(currentINode); - copy.setBlocks(null); - return copy; - } + INodeFileAttributes createSnapshotCopy(INodeFile currentINode) { + return new INodeFileAttributes.SnapshotCopy(currentINode); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java index 994f9bd481a..808ac75c629 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.server.namenode.Content; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; +import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; import org.apache.hadoop.hdfs.server.namenode.INodeMap; import org.apache.hadoop.hdfs.server.namenode.INodeReference; @@ -224,7 +225,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { * The difference of an {@link INodeDirectory} between two snapshots. */ public static class DirectoryDiff extends - AbstractINodeDiff { + AbstractINodeDiff { /** The size of the children list at snapshot creation time. */ private final int childrenSize; /** The children list diff. */ @@ -238,7 +239,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { } /** Constructor used by FSImage loading */ - DirectoryDiff(Snapshot snapshot, INodeDirectory snapshotINode, + DirectoryDiff(Snapshot snapshot, INodeDirectoryAttributes snapshotINode, DirectoryDiff posteriorDiff, int childrenSize, List createdList, List deletedList) { super(snapshot, snapshotINode, posteriorDiff); @@ -352,7 +353,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { out.writeBoolean(false); if (snapshotINode != null) { out.writeBoolean(true); - FSImageSerialization.writeINodeDirectory(snapshotINode, out); + FSImageSerialization.writeINodeDirectoryAttributes(snapshotINode, out); } else { out.writeBoolean(false); } @@ -373,7 +374,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { /** A list of directory diffs. */ public static class DirectoryDiffList - extends AbstractINodeDiffList { + extends AbstractINodeDiffList { @Override DirectoryDiff createDiff(Snapshot snapshot, INodeDirectory currentDir) { @@ -381,13 +382,10 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { } @Override - INodeDirectory createSnapshotCopy(INodeDirectory currentDir) { - final INodeDirectory copy = currentDir.isQuotaSet()? - new INodeDirectoryWithQuota(currentDir, false, - currentDir.getNsQuota(), currentDir.getDsQuota()) - : new INodeDirectory(currentDir, false); - copy.clearChildren(); - return copy; + INodeDirectoryAttributes createSnapshotCopy(INodeDirectory currentDir) { + return currentDir.isQuotaSet()? + new INodeDirectoryAttributes.CopyWithQuota(currentDir) + : new INodeDirectoryAttributes.SnapshotCopy(currentDir); } /** Replace the given child in the created/deleted list, if there is any. */ @@ -454,7 +452,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { : laterDiffIndex; boolean dirMetadataChanged = false; - INodeDirectory dirCopy = null; + INodeDirectoryAttributes dirCopy = null; for (int i = earlierDiffIndex; i < laterDiffIndex; i++) { DirectoryDiff sdiff = difflist.get(i); diff.combinePosterior(sdiff.diff, null); @@ -506,7 +504,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { } @Override - public INodeDirectory getSnapshotINode(Snapshot snapshot) { + public INodeDirectoryAttributes getSnapshotINode(Snapshot snapshot) { return diffs.getSnapshotINode(snapshot, this); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java index 28f33123b56..05077a6c2c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeFile; +import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.INodeMap; import org.apache.hadoop.hdfs.server.namenode.Quota; @@ -78,7 +79,7 @@ public class INodeFileUnderConstructionWithSnapshot } @Override - public INodeFile getSnapshotINode(Snapshot snapshot) { + public INodeFileAttributes getSnapshotINode(Snapshot snapshot) { return diffs.getSnapshotINode(snapshot, this); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java index 7f37666f880..59a7fc96ba6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeFile; +import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeMap; import org.apache.hadoop.hdfs.server.namenode.Quota; @@ -66,7 +67,7 @@ public class INodeFileWithSnapshot extends INodeFile } @Override - public INodeFile getSnapshotINode(Snapshot snapshot) { + public INodeFileAttributes getSnapshotINode(Snapshot snapshot) { return diffs.getSnapshotINode(snapshot, this); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java index 9761b7b2017..3a46023ab1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java @@ -30,8 +30,11 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; import org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.INode; +import org.apache.hadoop.hdfs.server.namenode.INodeAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; +import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeFile; +import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList; @@ -70,8 +73,8 @@ public class SnapshotFSImageFormat { * @param sNode The directory that the SnapshotDiff list belongs to. * @param out The {@link DataOutput} to write. */ - private static > - void saveINodeDiffs(final AbstractINodeDiffList diffs, + private static > + void saveINodeDiffs(final AbstractINodeDiffList diffs, final DataOutput out, ReferenceMap referenceMap) throws IOException { // Record the diffs in reversed order, so that we can find the correct // reference for INodes in the created list when loading the FSImage @@ -126,8 +129,8 @@ public class SnapshotFSImageFormat { final long fileSize = in.readLong(); // 3. Load snapshotINode - final INodeFile snapshotINode = in.readBoolean()? - loader.loadINodeWithLocalName(true, in, false).asFile(): null; + final INodeFileAttributes snapshotINode = in.readBoolean()? + loader.loadINodeFileAttributes(in): null; return new FileDiff(snapshot, snapshotINode, posterior, fileSize); } @@ -253,7 +256,7 @@ public class SnapshotFSImageFormat { * using. * @return The snapshotINode. */ - private static INodeDirectory loadSnapshotINodeInDirectoryDiff( + private static INodeDirectoryAttributes loadSnapshotINodeInDirectoryDiff( Snapshot snapshot, DataInput in, FSImageFormat.Loader loader) throws IOException { // read the boolean indicating whether snapshotINode == Snapshot.Root @@ -262,8 +265,7 @@ public class SnapshotFSImageFormat { return snapshot.getRoot(); } else { // another boolean is used to indicate whether snapshotINode is non-null - return in.readBoolean()? - loader.loadINodeWithLocalName(true, in, false).asDirectory(): null; + return in.readBoolean()? loader.loadINodeDirectoryAttributes(in): null; } } @@ -285,8 +287,8 @@ public class SnapshotFSImageFormat { int childrenSize = in.readInt(); // 3. Load DirectoryDiff#snapshotINode - INodeDirectory snapshotINode = loadSnapshotINodeInDirectoryDiff(snapshot, - in, loader); + INodeDirectoryAttributes snapshotINode = loadSnapshotINodeInDirectoryDiff( + snapshot, in, loader); // 4. Load the created list in SnapshotDiff#Diff List createdList = loadCreatedList(parent, in); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java index 1ee70ffd522..9e47a52d15a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java @@ -126,7 +126,7 @@ class ImageLoaderCurrent implements ImageLoader { new SimpleDateFormat("yyyy-MM-dd HH:mm"); private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, - -40, -41, -42, -43}; + -40, -41, -42, -43, -44, -45}; private int imageVersion = 0; private final Map subtreeMap = new HashMap(); @@ -531,8 +531,12 @@ class ImageLoaderCurrent implements ImageLoader { boolean useRoot = in.readBoolean(); if (!useRoot) { if (in.readBoolean()) { - v.visitEnclosingElement(ImageElement.SNAPSHOT_DIFF_SNAPSHOTINODE); - processINode(in, v, true, currentINodeName, true); + v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_DIRECTORY_ATTRIBUTES); + if (LayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) { + processINodeDirectoryAttributes(in, v, currentINodeName); + } else { + processINode(in, v, true, currentINodeName, true); + } v.leaveEnclosingElement(); } } @@ -559,7 +563,18 @@ class ImageLoaderCurrent implements ImageLoader { v.leaveEnclosingElement(); v.leaveEnclosingElement(); } - + + private void processINodeDirectoryAttributes(DataInputStream in, ImageVisitor v, + String parentName) throws IOException { + final String pathName = readINodePath(in, parentName); + v.visit(ImageElement.INODE_PATH, pathName); + processPermission(in, v); + v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong())); + + v.visit(ImageElement.NS_QUOTA, in.readLong()); + v.visit(ImageElement.DS_QUOTA, in.readLong()); + } + /** Process children under a directory */ private int processChildren(DataInputStream in, ImageVisitor v, boolean skipBlocks, String parentName) throws IOException { @@ -586,6 +601,18 @@ class ImageLoaderCurrent implements ImageLoader { } } + private String readINodePath(DataInputStream in, String parentName) + throws IOException { + String pathName = FSImageSerialization.readString(in); + if (parentName != null) { // local name + pathName = "/" + pathName; + if (!"/".equals(parentName)) { // children of non-root directory + pathName = parentName + pathName; + } + } + return pathName; + } + /** * Process an INode * @@ -605,16 +632,10 @@ class ImageLoaderCurrent implements ImageLoader { LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion); v.visitEnclosingElement(ImageElement.INODE); - String pathName = FSImageSerialization.readString(in); - if (parentName != null) { // local name - pathName = "/" + pathName; - if (!"/".equals(parentName)) { // children of non-root directory - pathName = parentName + pathName; - } - } + final String pathName = readINodePath(in, parentName); + v.visit(ImageElement.INODE_PATH, pathName); long inodeId = INodeId.GRANDFATHER_INODE_ID; - v.visit(ImageElement.INODE_PATH, pathName); if (supportInodeId) { inodeId = in.readLong(); v.visit(ImageElement.INODE_ID, inodeId); @@ -683,6 +704,20 @@ class ImageLoaderCurrent implements ImageLoader { v.leaveEnclosingElement(); // INode } + + private void processINodeFileAttributes(DataInputStream in, ImageVisitor v, + String parentName) throws IOException { + final String pathName = readINodePath(in, parentName); + v.visit(ImageElement.INODE_PATH, pathName); + processPermission(in, v); + v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong())); + if(LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion)) { + v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong())); + } + + v.visit(ImageElement.REPLICATION, in.readShort()); + v.visit(ImageElement.BLOCK_SIZE, in.readLong()); + } private void processFileDiffList(DataInputStream in, ImageVisitor v, String currentINodeName) throws IOException { @@ -704,8 +739,12 @@ class ImageLoaderCurrent implements ImageLoader { ImageElement.SNAPSHOT_DIFF_SNAPSHOTID, snapshotId); v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong()); if (in.readBoolean()) { - v.visitEnclosingElement(ImageElement.SNAPSHOT_DIFF_SNAPSHOTINODE); - processINode(in, v, true, currentINodeName, true); + v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_FILE_ATTRIBUTES); + if (LayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) { + processINodeFileAttributes(in, v, currentINodeName); + } else { + processINode(in, v, true, currentINodeName, true); + } v.leaveEnclosingElement(); } v.leaveEnclosingElement(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java index a3a7781b01c..dc25428218d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java @@ -97,7 +97,8 @@ abstract class ImageVisitor { SNAPSHOT_DIR_DIFF, SNAPSHOT_DIFF_SNAPSHOTID, SNAPSHOT_DIR_DIFF_CHILDREN_SIZE, - SNAPSHOT_DIFF_SNAPSHOTINODE, + SNAPSHOT_INODE_FILE_ATTRIBUTES, + SNAPSHOT_INODE_DIRECTORY_ATTRIBUTES, SNAPSHOT_DIR_DIFF_CREATEDLIST, SNAPSHOT_DIR_DIFF_CREATEDLIST_SIZE, SNAPSHOT_DIR_DIFF_CREATED_INODE, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored index 56e48ff1a45..9c116f64531 100644 Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored and b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored differ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml index e3b0eb53ff6..c7ce97f86e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml @@ -1,6 +1,6 @@ - -43 + -45 OP_START_LOG_SEGMENT