svn merge -c 1494858 from trunk for HDFS-4908. Reduce snapshot inode memory usage.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1494859 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-06-20 05:12:42 +00:00
parent 0536aec248
commit 619f293bca
24 changed files with 524 additions and 106 deletions

View File

@ -20,6 +20,8 @@ Release 2.2.0 - UNRELEASED
IMPROVEMENTS IMPROVEMENTS
HDFS-4908. Reduce snapshot inode memory usage. (szetszwo)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -98,7 +98,13 @@ public static enum Feature {
"add OP_UPDATE_BLOCKS"), "add OP_UPDATE_BLOCKS"),
RESERVED_REL1_2_0(-41, -32, "Reserved for release 1.2.0", true, CONCAT), RESERVED_REL1_2_0(-41, -32, "Reserved for release 1.2.0", true, CONCAT),
ADD_INODE_ID(-42, -40, "Assign a unique inode id for each inode", false), ADD_INODE_ID(-42, -40, "Assign a unique inode id for each inode", false),
SNAPSHOT(-43, "Support for snapshot feature"); SNAPSHOT(-43, "Support for snapshot feature"),
RESERVED_REL1_3_0(-44, -41,
"Reserved for release 1.3.0", true, ADD_INODE_ID, SNAPSHOT),
OPTIMIZE_SNAPSHOT_INODES(-45, -43,
"Reduce snapshot inode memory footprint", false);
final int lv; final int lv;
final int ancestorLV; final int ancestorLV;

View File

@ -694,6 +694,50 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode,
throw new IOException("Unknown inode type: numBlocks=" + numBlocks); throw new IOException("Unknown inode type: numBlocks=" + numBlocks);
} }
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
throws IOException {
final int layoutVersion = getLayoutVersion();
if (!LayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
return loadINodeWithLocalName(true, in, false).asFile();
}
final byte[] name = FSImageSerialization.readLocalName(in);
final PermissionStatus permissions = PermissionStatus.read(in);
final long modificationTime = in.readLong();
final long accessTime = in.readLong();
final short replication = namesystem.getBlockManager().adjustReplication(
in.readShort());
final long preferredBlockSize = in.readLong();
return new INodeFileAttributes.SnapshotCopy(name, permissions, modificationTime,
accessTime, replication, preferredBlockSize);
}
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
throws IOException {
final int layoutVersion = getLayoutVersion();
if (!LayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
return loadINodeWithLocalName(true, in, false).asDirectory();
}
final byte[] name = FSImageSerialization.readLocalName(in);
final PermissionStatus permissions = PermissionStatus.read(in);
final long modificationTime = in.readLong();
//read quotas
final long nsQuota = in.readLong();
final long dsQuota = in.readLong();
return nsQuota == -1L && dsQuota == -1L?
new INodeDirectoryAttributes.SnapshotCopy(name, permissions, modificationTime)
: new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
modificationTime, nsQuota, dsQuota);
}
private void loadFilesUnderConstruction(DataInput in, private void loadFilesUnderConstruction(DataInput in,
boolean supportSnapshot) throws IOException { boolean supportSnapshot) throws IOException {
FSDirectory fsDir = namesystem.dir; FSDirectory fsDir = namesystem.dir;

View File

@ -84,7 +84,7 @@ static private final class TLData {
final FsPermission FILE_PERM = new FsPermission((short) 0); final FsPermission FILE_PERM = new FsPermission((short) 0);
} }
private static void writePermissionStatus(INodeWithAdditionalFields inode, private static void writePermissionStatus(INodeAttributes inode,
DataOutput out) throws IOException { DataOutput out) throws IOException {
final FsPermission p = TL_DATA.get().FILE_PERM; final FsPermission p = TL_DATA.get().FILE_PERM;
p.fromShort(inode.getFsPermissionShort()); p.fromShort(inode.getFsPermissionShort());
@ -205,6 +205,18 @@ public static void writeINodeFile(INodeFile file, DataOutput out,
writePermissionStatus(file, out); writePermissionStatus(file, out);
} }
/** Serialize an {@link INodeFileAttributes}. */
public static void writeINodeFileAttributes(INodeFileAttributes file,
DataOutput out) throws IOException {
writeLocalName(file, out);
writePermissionStatus(file, out);
out.writeLong(file.getModificationTime());
out.writeLong(file.getAccessTime());
out.writeShort(file.getFileReplication());
out.writeLong(file.getPreferredBlockSize());
}
/** /**
* Serialize a {@link INodeDirectory} * Serialize a {@link INodeDirectory}
* @param node The node to write * @param node The node to write
@ -232,6 +244,21 @@ public static void writeINodeDirectory(INodeDirectory node, DataOutput out)
writePermissionStatus(node, out); writePermissionStatus(node, out);
} }
/**
* Serialize a {@link INodeDirectory}
* @param a The node to write
* @param out The {@link DataOutput} where the fields are written
*/
public static void writeINodeDirectoryAttributes(
INodeDirectoryAttributes a, DataOutput out) throws IOException {
writeLocalName(a, out);
writePermissionStatus(a, out);
out.writeLong(a.getModificationTime());
out.writeLong(a.getNsQuota());
out.writeLong(a.getDsQuota());
}
/** /**
* Serialize a {@link INodeSymlink} node * Serialize a {@link INodeSymlink} node
* @param node The node to write * @param node The node to write
@ -384,7 +411,7 @@ public static byte[] readLocalName(DataInput in) throws IOException {
return createdNodeName; return createdNodeName;
} }
private static void writeLocalName(INode inode, DataOutput out) private static void writeLocalName(INodeAttributes inode, DataOutput out)
throws IOException { throws IOException {
final byte[] name = inode.getLocalNameBytes(); final byte[] name = inode.getLocalNameBytes();
out.writeShort(name.length); out.writeShort(name.length);

View File

@ -179,8 +179,6 @@
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer; import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SnapshotDiffInfo; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SnapshotDiffInfo;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;

View File

@ -50,7 +50,7 @@
* directory inodes. * directory inodes.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public abstract class INode implements Diff.Element<byte[]> { public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
public static final Log LOG = LogFactory.getLog(INode.class); public static final Log LOG = LogFactory.getLog(INode.class);
/** parent is either an {@link INodeDirectory} or an {@link INodeReference}.*/ /** parent is either an {@link INodeDirectory} or an {@link INodeReference}.*/
@ -87,6 +87,7 @@ final PermissionStatus getPermissionStatus() {
abstract String getUserName(Snapshot snapshot); abstract String getUserName(Snapshot snapshot);
/** The same as getUserName(null). */ /** The same as getUserName(null). */
@Override
public final String getUserName() { public final String getUserName() {
return getUserName(null); return getUserName(null);
} }
@ -110,6 +111,7 @@ final INode setUser(String user, Snapshot latest, INodeMap inodeMap)
abstract String getGroupName(Snapshot snapshot); abstract String getGroupName(Snapshot snapshot);
/** The same as getGroupName(null). */ /** The same as getGroupName(null). */
@Override
public final String getGroupName() { public final String getGroupName() {
return getGroupName(null); return getGroupName(null);
} }
@ -134,6 +136,7 @@ final INode setGroup(String group, Snapshot latest, INodeMap inodeMap)
abstract FsPermission getFsPermission(Snapshot snapshot); abstract FsPermission getFsPermission(Snapshot snapshot);
/** The same as getFsPermission(null). */ /** The same as getFsPermission(null). */
@Override
public final FsPermission getFsPermission() { public final FsPermission getFsPermission() {
return getFsPermission(null); return getFsPermission(null);
} }
@ -153,7 +156,7 @@ INode setPermission(FsPermission permission, Snapshot latest,
* @return if the given snapshot is null, return this; * @return if the given snapshot is null, return this;
* otherwise return the corresponding snapshot inode. * otherwise return the corresponding snapshot inode.
*/ */
public INode getSnapshotINode(final Snapshot snapshot) { public INodeAttributes getSnapshotINode(final Snapshot snapshot) {
return this; return this;
} }
@ -464,12 +467,6 @@ public final String getLocalName() {
return name == null? null: DFSUtil.bytes2String(name); return name == null? null: DFSUtil.bytes2String(name);
} }
/**
* @return null if the local name is null;
* otherwise, return the local name byte array.
*/
public abstract byte[] getLocalNameBytes();
@Override @Override
public final byte[] getKey() { public final byte[] getKey() {
return getLocalNameBytes(); return getLocalNameBytes();
@ -555,6 +552,7 @@ public void clear() {
abstract long getModificationTime(Snapshot snapshot); abstract long getModificationTime(Snapshot snapshot);
/** The same as getModificationTime(null). */ /** The same as getModificationTime(null). */
@Override
public final long getModificationTime() { public final long getModificationTime() {
return getModificationTime(null); return getModificationTime(null);
} }
@ -583,6 +581,7 @@ public final INode setModificationTime(long modificationTime,
abstract long getAccessTime(Snapshot snapshot); abstract long getAccessTime(Snapshot snapshot);
/** The same as getAccessTime(null). */ /** The same as getAccessTime(null). */
@Override
public final long getAccessTime() { public final long getAccessTime() {
return getAccessTime(null); return getAccessTime(null);
} }

View File

@ -0,0 +1,121 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.namenode.INodeWithAdditionalFields.PermissionStatusFormat;
/**
* The attributes of an inode.
*/
@InterfaceAudience.Private
public interface INodeAttributes {
/**
* @return null if the local name is null;
* otherwise, return the local name byte array.
*/
public byte[] getLocalNameBytes();
/** @return the user name. */
public String getUserName();
/** @return the group name. */
public String getGroupName();
/** @return the permission. */
public FsPermission getFsPermission();
/** @return the permission as a short. */
public short getFsPermissionShort();
/** @return the permission information as a long. */
public long getPermissionLong();
/** @return the modification time. */
public long getModificationTime();
/** @return the access time. */
public long getAccessTime();
/** A read-only copy of the inode attributes. */
public static abstract class SnapshotCopy implements INodeAttributes {
private final byte[] name;
private final long permission;
private final long modificationTime;
private final long accessTime;
SnapshotCopy(byte[] name, PermissionStatus permissions,
long modificationTime, long accessTime) {
this.name = name;
this.permission = PermissionStatusFormat.toLong(permissions);
this.modificationTime = modificationTime;
this.accessTime = accessTime;
}
SnapshotCopy(INode inode) {
this.name = inode.getLocalNameBytes();
this.permission = inode.getPermissionLong();
this.modificationTime = inode.getModificationTime();
this.accessTime = inode.getAccessTime();
}
@Override
public final byte[] getLocalNameBytes() {
return name;
}
@Override
public final String getUserName() {
final int n = (int)PermissionStatusFormat.USER.retrieve(permission);
return SerialNumberManager.INSTANCE.getUser(n);
}
@Override
public final String getGroupName() {
final int n = (int)PermissionStatusFormat.GROUP.retrieve(permission);
return SerialNumberManager.INSTANCE.getGroup(n);
}
@Override
public final FsPermission getFsPermission() {
return new FsPermission(getFsPermissionShort());
}
@Override
public final short getFsPermissionShort() {
return (short)PermissionStatusFormat.MODE.retrieve(permission);
}
@Override
public long getPermissionLong() {
return permission;
}
@Override
public final long getModificationTime() {
return modificationTime;
}
@Override
public final long getAccessTime() {
return accessTime;
}
}
}

View File

@ -45,7 +45,8 @@
/** /**
* Directory INode class. * Directory INode class.
*/ */
public class INodeDirectory extends INodeWithAdditionalFields { public class INodeDirectory extends INodeWithAdditionalFields
implements INodeDirectoryAttributes {
/** Cast INode to INodeDirectory. */ /** Cast INode to INodeDirectory. */
public static INodeDirectory valueOf(INode inode, Object path public static INodeDirectory valueOf(INode inode, Object path
) throws FileNotFoundException, PathIsNotDirectoryException { ) throws FileNotFoundException, PathIsNotDirectoryException {
@ -558,12 +559,12 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
/** /**
* Compare the metadata with another INodeDirectory * Compare the metadata with another INodeDirectory
*/ */
public boolean metadataEquals(INodeDirectory other) { @Override
return other != null && getNsQuota() == other.getNsQuota() public boolean metadataEquals(INodeDirectoryAttributes other) {
return other != null
&& getNsQuota() == other.getNsQuota()
&& getDsQuota() == other.getDsQuota() && getDsQuota() == other.getDsQuota()
&& getUserName().equals(other.getUserName()) && getPermissionLong() == other.getPermissionLong();
&& getGroupName().equals(other.getGroupName())
&& getFsPermission().equals(other.getFsPermission());
} }
/* /*

View File

@ -0,0 +1,95 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
import com.google.common.base.Preconditions;
/**
* The attributes of an inode.
*/
@InterfaceAudience.Private
public interface INodeDirectoryAttributes extends INodeAttributes {
public long getNsQuota();
public long getDsQuota();
public boolean metadataEquals(INodeDirectoryAttributes other);
/** A copy of the inode directory attributes */
public static class SnapshotCopy extends INodeAttributes.SnapshotCopy
implements INodeDirectoryAttributes {
public SnapshotCopy(byte[] name, PermissionStatus permissions,
long modificationTime) {
super(name, permissions, modificationTime, 0L);
}
public SnapshotCopy(INodeDirectory dir) {
super(dir);
}
@Override
public long getNsQuota() {
return -1;
}
@Override
public long getDsQuota() {
return -1;
}
@Override
public boolean metadataEquals(INodeDirectoryAttributes other) {
return other != null
&& getNsQuota() == other.getNsQuota()
&& getDsQuota() == other.getDsQuota()
&& getPermissionLong() == other.getPermissionLong();
}
}
public static class CopyWithQuota extends INodeDirectoryAttributes.SnapshotCopy {
private final long nsQuota;
private final long dsQuota;
public CopyWithQuota(byte[] name, PermissionStatus permissions,
long modificationTime, long nsQuota, long dsQuota) {
super(name, permissions, modificationTime);
this.nsQuota = nsQuota;
this.dsQuota = dsQuota;
}
public CopyWithQuota(INodeDirectory dir) {
super(dir);
Preconditions.checkArgument(dir.isQuotaSet());
this.nsQuota = dir.getNsQuota();
this.dsQuota = dir.getDsQuota();
}
@Override
public final long getNsQuota() {
return nsQuota;
}
@Override
public final long getDsQuota() {
return dsQuota;
}
}
}

View File

@ -43,7 +43,8 @@
/** I-node for closed file. */ /** I-node for closed file. */
@InterfaceAudience.Private @InterfaceAudience.Private
public class INodeFile extends INodeWithAdditionalFields implements BlockCollection { public class INodeFile extends INodeWithAdditionalFields
implements INodeFileAttributes, BlockCollection {
/** The same as valueOf(inode, path, false). */ /** The same as valueOf(inode, path, false). */
public static INodeFile valueOf(INode inode, String path public static INodeFile valueOf(INode inode, String path
) throws FileNotFoundException { ) throws FileNotFoundException {
@ -67,7 +68,7 @@ public static INodeFile valueOf(INode inode, String path, boolean acceptNull)
} }
/** Format: [16 bits for replication][48 bits for PreferredBlockSize] */ /** Format: [16 bits for replication][48 bits for PreferredBlockSize] */
private static class HeaderFormat { static class HeaderFormat {
/** Number of bits for Block size */ /** Number of bits for Block size */
static final int BLOCKBITS = 48; static final int BLOCKBITS = 48;
/** Header mask 64-bit representation */ /** Header mask 64-bit representation */
@ -148,7 +149,7 @@ public INodeFileUnderConstruction toUnderConstruction(
} }
@Override @Override
public INodeFile getSnapshotINode(final Snapshot snapshot) { public INodeFileAttributes getSnapshotINode(final Snapshot snapshot) {
return this; return this;
} }
@ -175,6 +176,7 @@ public final short getFileReplication(Snapshot snapshot) {
} }
/** The same as getFileReplication(null). */ /** The same as getFileReplication(null). */
@Override
public final short getFileReplication() { public final short getFileReplication() {
return getFileReplication(null); return getFileReplication(null);
} }
@ -205,6 +207,11 @@ public long getPreferredBlockSize() {
return HeaderFormat.getPreferredBlockSize(header); return HeaderFormat.getPreferredBlockSize(header);
} }
@Override
public long getHeaderLong() {
return header;
}
/** @return the diskspace required for a full block. */ /** @return the diskspace required for a full block. */
final long getBlockDiskspace() { final long getBlockDiskspace() {
return getPreferredBlockSize() * getBlockReplication(); return getPreferredBlockSize() * getBlockReplication();

View File

@ -0,0 +1,72 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat;
/**
* The attributes of a file.
*/
@InterfaceAudience.Private
public interface INodeFileAttributes extends INodeAttributes {
/** @return the file replication. */
public short getFileReplication();
/** @return preferred block size in bytes */
public long getPreferredBlockSize();
/** @return the header as a long. */
public long getHeaderLong();
/** A copy of the inode file attributes */
public static class SnapshotCopy extends INodeAttributes.SnapshotCopy
implements INodeFileAttributes {
private final long header;
public SnapshotCopy(byte[] name, PermissionStatus permissions,
long modificationTime, long accessTime,
short replication, long preferredBlockSize) {
super(name, permissions, modificationTime, accessTime);
final long h = HeaderFormat.combineReplication(0L, replication);
header = HeaderFormat.combinePreferredBlockSize(h, preferredBlockSize);
}
public SnapshotCopy(INodeFile file) {
super(file);
this.header = file.getHeaderLong();
}
@Override
public short getFileReplication() {
return HeaderFormat.getReplication(header);
}
@Override
public long getPreferredBlockSize() {
return HeaderFormat.getPreferredBlockSize(header);
}
@Override
public long getHeaderLong() {
return header;
}
}
}

View File

@ -212,12 +212,21 @@ final void setGroup(String group) {
public final FsPermission getFsPermission(Snapshot snapshot) { public final FsPermission getFsPermission(Snapshot snapshot) {
return referred.getFsPermission(snapshot); return referred.getFsPermission(snapshot);
} }
@Override
public final short getFsPermissionShort() {
return referred.getFsPermissionShort();
}
@Override @Override
void setPermission(FsPermission permission) { void setPermission(FsPermission permission) {
referred.setPermission(permission); referred.setPermission(permission);
} }
@Override
public long getPermissionLong() {
return referred.getPermissionLong();
}
@Override @Override
public final long getModificationTime(Snapshot snapshot) { public final long getModificationTime(Snapshot snapshot) {
return referred.getModificationTime(snapshot); return referred.getModificationTime(snapshot);
@ -280,7 +289,7 @@ public Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache,
} }
@Override @Override
public final INode getSnapshotINode(Snapshot snapshot) { public final INodeAttributes getSnapshotINode(Snapshot snapshot) {
return referred.getSnapshotINode(snapshot); return referred.getSnapshotINode(snapshot);
} }

View File

@ -33,7 +33,7 @@
@InterfaceAudience.Private @InterfaceAudience.Private
public abstract class INodeWithAdditionalFields extends INode public abstract class INodeWithAdditionalFields extends INode
implements LinkedElement { implements LinkedElement {
private static enum PermissionStatusFormat { static enum PermissionStatusFormat {
MODE(0, 16), MODE(0, 16),
GROUP(MODE.OFFSET + MODE.LENGTH, 25), GROUP(MODE.OFFSET + MODE.LENGTH, 25),
USER(GROUP.OFFSET + GROUP.LENGTH, 23); USER(GROUP.OFFSET + GROUP.LENGTH, 23);
@ -197,11 +197,11 @@ final FsPermission getFsPermission(Snapshot snapshot) {
return getSnapshotINode(snapshot).getFsPermission(); return getSnapshotINode(snapshot).getFsPermission();
} }
return new FsPermission( return new FsPermission(getFsPermissionShort());
(short)PermissionStatusFormat.MODE.retrieve(permission));
} }
final short getFsPermissionShort() { @Override
public final short getFsPermissionShort() {
return (short)PermissionStatusFormat.MODE.retrieve(permission); return (short)PermissionStatusFormat.MODE.retrieve(permission);
} }
@Override @Override
@ -210,10 +210,15 @@ void setPermission(FsPermission permission) {
updatePermissionStatus(PermissionStatusFormat.MODE, mode); updatePermissionStatus(PermissionStatusFormat.MODE, mode);
} }
@Override
public long getPermissionLong() {
return permission;
}
@Override @Override
final long getModificationTime(Snapshot snapshot) { final long getModificationTime(Snapshot snapshot) {
if (snapshot != null) { if (snapshot != null) {
return getSnapshotINode(snapshot).getModificationTime(null); return getSnapshotINode(snapshot).getModificationTime();
} }
return this.modificationTime; return this.modificationTime;
@ -242,7 +247,7 @@ public final void setModificationTime(long modificationTime) {
@Override @Override
final long getAccessTime(Snapshot snapshot) { final long getAccessTime(Snapshot snapshot) {
if (snapshot != null) { if (snapshot != null) {
return getSnapshotINode(snapshot).getAccessTime(null); return getSnapshotINode(snapshot).getAccessTime();
} }
return accessTime; return accessTime;

View File

@ -22,6 +22,7 @@
import java.util.List; import java.util.List;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.Quota; import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
@ -47,13 +48,14 @@
* </pre> * </pre>
*/ */
abstract class AbstractINodeDiff<N extends INode, abstract class AbstractINodeDiff<N extends INode,
D extends AbstractINodeDiff<N, D>> A extends INodeAttributes,
D extends AbstractINodeDiff<N, A, D>>
implements Comparable<Integer> { implements Comparable<Integer> {
/** The snapshot will be obtained after this diff is applied. */ /** The snapshot will be obtained after this diff is applied. */
Snapshot snapshot; Snapshot snapshot;
/** The snapshot inode data. It is null when there is no change. */ /** The snapshot inode data. It is null when there is no change. */
N snapshotINode; A snapshotINode;
/** /**
* Posterior diff is the diff happened after this diff. * Posterior diff is the diff happened after this diff.
* The posterior diff should be first applied to obtain the posterior * The posterior diff should be first applied to obtain the posterior
@ -62,7 +64,7 @@ abstract class AbstractINodeDiff<N extends INode,
*/ */
private D posteriorDiff; private D posteriorDiff;
AbstractINodeDiff(Snapshot snapshot, N snapshotINode, D posteriorDiff) { AbstractINodeDiff(Snapshot snapshot, A snapshotINode, D posteriorDiff) {
Preconditions.checkNotNull(snapshot, "snapshot is null"); Preconditions.checkNotNull(snapshot, "snapshot is null");
this.snapshot = snapshot; this.snapshot = snapshot;
@ -96,16 +98,16 @@ final void setPosterior(D posterior) {
} }
/** Save the INode state to the snapshot if it is not done already. */ /** Save the INode state to the snapshot if it is not done already. */
void saveSnapshotCopy(N snapshotCopy, N currentINode) { void saveSnapshotCopy(A snapshotCopy, N currentINode) {
Preconditions.checkState(snapshotINode == null, "Expected snapshotINode to be null"); Preconditions.checkState(snapshotINode == null, "Expected snapshotINode to be null");
snapshotINode = snapshotCopy; snapshotINode = snapshotCopy;
} }
/** @return the inode corresponding to the snapshot. */ /** @return the inode corresponding to the snapshot. */
N getSnapshotINode() { A getSnapshotINode() {
// get from this diff, then the posterior diff // get from this diff, then the posterior diff
// and then null for the current inode // and then null for the current inode
for(AbstractINodeDiff<N, D> d = this; ; d = d.posteriorDiff) { for(AbstractINodeDiff<N, A, D> d = this; ; d = d.posteriorDiff) {
if (d.snapshotINode != null) { if (d.snapshotINode != null) {
return d.snapshotINode; return d.snapshotINode;
} else if (d.posteriorDiff == null) { } else if (d.posteriorDiff == null) {

View File

@ -25,6 +25,7 @@
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.Quota; import org.apache.hadoop.hdfs.server.namenode.Quota;
@ -35,7 +36,8 @@
* @param <D> The diff type, which must extend {@link AbstractINodeDiff}. * @param <D> The diff type, which must extend {@link AbstractINodeDiff}.
*/ */
abstract class AbstractINodeDiffList<N extends INode, abstract class AbstractINodeDiffList<N extends INode,
D extends AbstractINodeDiff<N, D>> A extends INodeAttributes,
D extends AbstractINodeDiff<N, A, D>>
implements Iterable<D> { implements Iterable<D> {
/** Diff list sorted by snapshot IDs, i.e. in chronological order. */ /** Diff list sorted by snapshot IDs, i.e. in chronological order. */
private final List<D> diffs = new ArrayList<D>(); private final List<D> diffs = new ArrayList<D>();
@ -54,7 +56,7 @@ public void clear() {
abstract D createDiff(Snapshot snapshot, N currentINode); abstract D createDiff(Snapshot snapshot, N currentINode);
/** @return a snapshot copy of the current inode. */ /** @return a snapshot copy of the current inode. */
abstract N createSnapshotCopy(N currentINode); abstract A createSnapshotCopy(N currentINode);
/** /**
* Delete a snapshot. The synchronization of the diff list will be done * Delete a snapshot. The synchronization of the diff list will be done
@ -93,7 +95,7 @@ public final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot,
collectedBlocks, removedINodes)); collectedBlocks, removedINodes));
} }
} else if (snapshotIndex > 0) { } else if (snapshotIndex > 0) {
final AbstractINodeDiff<N, D> previous = diffs.get(snapshotIndex - 1); final AbstractINodeDiff<N, A, D> previous = diffs.get(snapshotIndex - 1);
if (!previous.getSnapshot().equals(prior)) { if (!previous.getSnapshot().equals(prior)) {
diffs.get(snapshotIndex).setSnapshot(prior); diffs.get(snapshotIndex).setSnapshot(prior);
} else { } else {
@ -106,9 +108,8 @@ public final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot,
} }
if (previous.snapshotINode == null) { if (previous.snapshotINode == null) {
previous.snapshotINode = removed.snapshotINode; previous.snapshotINode = removed.snapshotINode;
} else if (removed.snapshotINode != null) {
removed.snapshotINode.clear();
} }
counts.add(previous.combinePosteriorAndCollectBlocks( counts.add(previous.combinePosteriorAndCollectBlocks(
currentINode, removed, collectedBlocks, removedINodes)); currentINode, removed, collectedBlocks, removedINodes));
previous.setPosterior(removed.getPosterior()); previous.setPosterior(removed.getPosterior());
@ -150,7 +151,7 @@ public final D getLast() {
/** @return the last snapshot. */ /** @return the last snapshot. */
public final Snapshot getLastSnapshot() { public final Snapshot getLastSnapshot() {
final AbstractINodeDiff<N, D> last = getLast(); final AbstractINodeDiff<N, A, D> last = getLast();
return last == null? null: last.getSnapshot(); return last == null? null: last.getSnapshot();
} }
@ -270,9 +271,9 @@ final boolean changedBetweenSnapshots(Snapshot earlier, Snapshot later) {
* Note that the current inode is returned if there is no change * Note that the current inode is returned if there is no change
* between the given snapshot and the current state. * between the given snapshot and the current state.
*/ */
N getSnapshotINode(final Snapshot snapshot, final N currentINode) { A getSnapshotINode(final Snapshot snapshot, final A currentINode) {
final D diff = getDiff(snapshot); final D diff = getDiff(snapshot);
final N inode = diff == null? null: diff.getSnapshotINode(); final A inode = diff == null? null: diff.getSnapshotINode();
return inode == null? currentINode: inode; return inode == null? currentINode: inode;
} }
@ -297,7 +298,7 @@ final D checkAndAddLatestSnapshotDiff(Snapshot latest, N currentINode)
} }
/** Save the snapshot copy to the latest snapshot. */ /** Save the snapshot copy to the latest snapshot. */
public void saveSelf2Snapshot(Snapshot latest, N currentINode, N snapshotCopy) public void saveSelf2Snapshot(Snapshot latest, N currentINode, A snapshotCopy)
throws QuotaExceededException { throws QuotaExceededException {
if (latest != null) { if (latest != null) {
D diff = checkAndAddLatestSnapshotDiff(latest, currentINode); D diff = checkAndAddLatestSnapshotDiff(latest, currentINode);

View File

@ -24,10 +24,10 @@
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.Quota; import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
@ -39,7 +39,7 @@ public interface FileWithSnapshot {
/** /**
* The difference of an {@link INodeFile} between two snapshots. * The difference of an {@link INodeFile} between two snapshots.
*/ */
public static class FileDiff extends AbstractINodeDiff<INodeFile, FileDiff> { public static class FileDiff extends AbstractINodeDiff<INodeFile, INodeFileAttributes, FileDiff> {
/** The file size at snapshot creation time. */ /** The file size at snapshot creation time. */
private final long fileSize; private final long fileSize;
@ -49,7 +49,7 @@ private FileDiff(Snapshot snapshot, INodeFile file) {
} }
/** Constructor used by FSImage loading */ /** Constructor used by FSImage loading */
FileDiff(Snapshot snapshot, INodeFile snapshotINode, FileDiff(Snapshot snapshot, INodeFileAttributes snapshotINode,
FileDiff posteriorDiff, long fileSize) { FileDiff posteriorDiff, long fileSize) {
super(snapshot, snapshotINode, posteriorDiff); super(snapshot, snapshotINode, posteriorDiff);
this.fileSize = fileSize; this.fileSize = fileSize;
@ -104,7 +104,7 @@ void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
// write snapshotINode // write snapshotINode
if (snapshotINode != null) { if (snapshotINode != null) {
out.writeBoolean(true); out.writeBoolean(true);
FSImageSerialization.writeINodeFile(snapshotINode, out, true); FSImageSerialization.writeINodeFileAttributes(snapshotINode, out);
} else { } else {
out.writeBoolean(false); out.writeBoolean(false);
} }
@ -120,7 +120,7 @@ Quota.Counts destroyDiffAndCollectBlocks(INodeFile currentINode,
/** A list of FileDiffs for storing snapshot data. */ /** A list of FileDiffs for storing snapshot data. */
public static class FileDiffList public static class FileDiffList
extends AbstractINodeDiffList<INodeFile, FileDiff> { extends AbstractINodeDiffList<INodeFile, INodeFileAttributes, FileDiff> {
@Override @Override
FileDiff createDiff(Snapshot snapshot, INodeFile file) { FileDiff createDiff(Snapshot snapshot, INodeFile file) {
@ -128,21 +128,8 @@ FileDiff createDiff(Snapshot snapshot, INodeFile file) {
} }
@Override @Override
INodeFile createSnapshotCopy(INodeFile currentINode) { INodeFileAttributes createSnapshotCopy(INodeFile currentINode) {
if (currentINode instanceof INodeFileUnderConstructionWithSnapshot) { return new INodeFileAttributes.SnapshotCopy(currentINode);
final INodeFileUnderConstruction uc =
(INodeFileUnderConstruction) currentINode;
final INodeFileUnderConstruction copy = new INodeFileUnderConstruction(
uc, uc.getClientName(), uc.getClientMachine(), uc.getClientNode());
copy.setBlocks(null);
return copy;
} else {
final INodeFile copy = new INodeFile(currentINode);
copy.setBlocks(null);
return copy;
}
} }
} }

View File

@ -35,6 +35,7 @@
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
import org.apache.hadoop.hdfs.server.namenode.INodeMap; import org.apache.hadoop.hdfs.server.namenode.INodeMap;
import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference;
@ -224,7 +225,7 @@ public List<DiffReportEntry> generateReport(byte[][] parentPath,
* The difference of an {@link INodeDirectory} between two snapshots. * The difference of an {@link INodeDirectory} between two snapshots.
*/ */
public static class DirectoryDiff extends public static class DirectoryDiff extends
AbstractINodeDiff<INodeDirectory, DirectoryDiff> { AbstractINodeDiff<INodeDirectory, INodeDirectoryAttributes, DirectoryDiff> {
/** The size of the children list at snapshot creation time. */ /** The size of the children list at snapshot creation time. */
private final int childrenSize; private final int childrenSize;
/** The children list diff. */ /** The children list diff. */
@ -238,7 +239,7 @@ private DirectoryDiff(Snapshot snapshot, INodeDirectory dir) {
} }
/** Constructor used by FSImage loading */ /** Constructor used by FSImage loading */
DirectoryDiff(Snapshot snapshot, INodeDirectory snapshotINode, DirectoryDiff(Snapshot snapshot, INodeDirectoryAttributes snapshotINode,
DirectoryDiff posteriorDiff, int childrenSize, DirectoryDiff posteriorDiff, int childrenSize,
List<INode> createdList, List<INode> deletedList) { List<INode> createdList, List<INode> deletedList) {
super(snapshot, snapshotINode, posteriorDiff); super(snapshot, snapshotINode, posteriorDiff);
@ -352,7 +353,7 @@ void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
out.writeBoolean(false); out.writeBoolean(false);
if (snapshotINode != null) { if (snapshotINode != null) {
out.writeBoolean(true); out.writeBoolean(true);
FSImageSerialization.writeINodeDirectory(snapshotINode, out); FSImageSerialization.writeINodeDirectoryAttributes(snapshotINode, out);
} else { } else {
out.writeBoolean(false); out.writeBoolean(false);
} }
@ -373,7 +374,7 @@ Quota.Counts destroyDiffAndCollectBlocks(INodeDirectory currentINode,
/** A list of directory diffs. */ /** A list of directory diffs. */
public static class DirectoryDiffList public static class DirectoryDiffList
extends AbstractINodeDiffList<INodeDirectory, DirectoryDiff> { extends AbstractINodeDiffList<INodeDirectory, INodeDirectoryAttributes, DirectoryDiff> {
@Override @Override
DirectoryDiff createDiff(Snapshot snapshot, INodeDirectory currentDir) { DirectoryDiff createDiff(Snapshot snapshot, INodeDirectory currentDir) {
@ -381,13 +382,10 @@ DirectoryDiff createDiff(Snapshot snapshot, INodeDirectory currentDir) {
} }
@Override @Override
INodeDirectory createSnapshotCopy(INodeDirectory currentDir) { INodeDirectoryAttributes createSnapshotCopy(INodeDirectory currentDir) {
final INodeDirectory copy = currentDir.isQuotaSet()? return currentDir.isQuotaSet()?
new INodeDirectoryWithQuota(currentDir, false, new INodeDirectoryAttributes.CopyWithQuota(currentDir)
currentDir.getNsQuota(), currentDir.getDsQuota()) : new INodeDirectoryAttributes.SnapshotCopy(currentDir);
: new INodeDirectory(currentDir, false);
copy.clearChildren();
return copy;
} }
/** Replace the given child in the created/deleted list, if there is any. */ /** Replace the given child in the created/deleted list, if there is any. */
@ -454,7 +452,7 @@ boolean computeDiffBetweenSnapshots(Snapshot fromSnapshot,
: laterDiffIndex; : laterDiffIndex;
boolean dirMetadataChanged = false; boolean dirMetadataChanged = false;
INodeDirectory dirCopy = null; INodeDirectoryAttributes dirCopy = null;
for (int i = earlierDiffIndex; i < laterDiffIndex; i++) { for (int i = earlierDiffIndex; i < laterDiffIndex; i++) {
DirectoryDiff sdiff = difflist.get(i); DirectoryDiff sdiff = difflist.get(i);
diff.combinePosterior(sdiff.diff, null); diff.combinePosterior(sdiff.diff, null);
@ -506,7 +504,7 @@ public DirectoryDiffList getDiffs() {
} }
@Override @Override
public INodeDirectory getSnapshotINode(Snapshot snapshot) { public INodeDirectoryAttributes getSnapshotINode(Snapshot snapshot) {
return diffs.getSnapshotINode(snapshot, this); return diffs.getSnapshotINode(snapshot, this);
} }

View File

@ -24,6 +24,7 @@
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.INodeMap; import org.apache.hadoop.hdfs.server.namenode.INodeMap;
import org.apache.hadoop.hdfs.server.namenode.Quota; import org.apache.hadoop.hdfs.server.namenode.Quota;
@ -78,7 +79,7 @@ public void deleteCurrentFile() {
} }
@Override @Override
public INodeFile getSnapshotINode(Snapshot snapshot) { public INodeFileAttributes getSnapshotINode(Snapshot snapshot) {
return diffs.getSnapshotINode(snapshot, this); return diffs.getSnapshotINode(snapshot, this);
} }

View File

@ -24,6 +24,7 @@
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeMap; import org.apache.hadoop.hdfs.server.namenode.INodeMap;
import org.apache.hadoop.hdfs.server.namenode.Quota; import org.apache.hadoop.hdfs.server.namenode.Quota;
@ -66,7 +67,7 @@ public void deleteCurrentFile() {
} }
@Override @Override
public INodeFile getSnapshotINode(Snapshot snapshot) { public INodeFileAttributes getSnapshotINode(Snapshot snapshot) {
return diffs.getSnapshotINode(snapshot, this); return diffs.getSnapshotINode(snapshot, this);
} }

View File

@ -30,8 +30,11 @@
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader; import org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
@ -70,8 +73,8 @@ public static void saveSnapshots(INodeDirectorySnapshottable current,
* @param sNode The directory that the SnapshotDiff list belongs to. * @param sNode The directory that the SnapshotDiff list belongs to.
* @param out The {@link DataOutput} to write. * @param out The {@link DataOutput} to write.
*/ */
private static <N extends INode, D extends AbstractINodeDiff<N, D>> private static <N extends INode, A extends INodeAttributes, D extends AbstractINodeDiff<N, A, D>>
void saveINodeDiffs(final AbstractINodeDiffList<N, D> diffs, void saveINodeDiffs(final AbstractINodeDiffList<N, A, D> diffs,
final DataOutput out, ReferenceMap referenceMap) throws IOException { final DataOutput out, ReferenceMap referenceMap) throws IOException {
// Record the diffs in reversed order, so that we can find the correct // Record the diffs in reversed order, so that we can find the correct
// reference for INodes in the created list when loading the FSImage // reference for INodes in the created list when loading the FSImage
@ -126,8 +129,8 @@ private static FileDiff loadFileDiff(FileDiff posterior, DataInput in,
final long fileSize = in.readLong(); final long fileSize = in.readLong();
// 3. Load snapshotINode // 3. Load snapshotINode
final INodeFile snapshotINode = in.readBoolean()? final INodeFileAttributes snapshotINode = in.readBoolean()?
loader.loadINodeWithLocalName(true, in, false).asFile(): null; loader.loadINodeFileAttributes(in): null;
return new FileDiff(snapshot, snapshotINode, posterior, fileSize); return new FileDiff(snapshot, snapshotINode, posterior, fileSize);
} }
@ -253,7 +256,7 @@ public static void loadDirectoryDiffList(INodeDirectory dir,
* using. * using.
* @return The snapshotINode. * @return The snapshotINode.
*/ */
private static INodeDirectory loadSnapshotINodeInDirectoryDiff( private static INodeDirectoryAttributes loadSnapshotINodeInDirectoryDiff(
Snapshot snapshot, DataInput in, FSImageFormat.Loader loader) Snapshot snapshot, DataInput in, FSImageFormat.Loader loader)
throws IOException { throws IOException {
// read the boolean indicating whether snapshotINode == Snapshot.Root // read the boolean indicating whether snapshotINode == Snapshot.Root
@ -262,8 +265,7 @@ private static INodeDirectory loadSnapshotINodeInDirectoryDiff(
return snapshot.getRoot(); return snapshot.getRoot();
} else { } else {
// another boolean is used to indicate whether snapshotINode is non-null // another boolean is used to indicate whether snapshotINode is non-null
return in.readBoolean()? return in.readBoolean()? loader.loadINodeDirectoryAttributes(in): null;
loader.loadINodeWithLocalName(true, in, false).asDirectory(): null;
} }
} }
@ -285,8 +287,8 @@ private static DirectoryDiff loadDirectoryDiff(
int childrenSize = in.readInt(); int childrenSize = in.readInt();
// 3. Load DirectoryDiff#snapshotINode // 3. Load DirectoryDiff#snapshotINode
INodeDirectory snapshotINode = loadSnapshotINodeInDirectoryDiff(snapshot, INodeDirectoryAttributes snapshotINode = loadSnapshotINodeInDirectoryDiff(
in, loader); snapshot, in, loader);
// 4. Load the created list in SnapshotDiff#Diff // 4. Load the created list in SnapshotDiff#Diff
List<INode> createdList = loadCreatedList(parent, in); List<INode> createdList = loadCreatedList(parent, in);

View File

@ -126,7 +126,7 @@ class ImageLoaderCurrent implements ImageLoader {
new SimpleDateFormat("yyyy-MM-dd HH:mm"); new SimpleDateFormat("yyyy-MM-dd HH:mm");
private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23, private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
-24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
-40, -41, -42, -43}; -40, -41, -42, -43, -44, -45};
private int imageVersion = 0; private int imageVersion = 0;
private final Map<Long, String> subtreeMap = new HashMap<Long, String>(); private final Map<Long, String> subtreeMap = new HashMap<Long, String>();
@ -531,8 +531,12 @@ private void processDirectoryDiff(DataInputStream in, ImageVisitor v,
boolean useRoot = in.readBoolean(); boolean useRoot = in.readBoolean();
if (!useRoot) { if (!useRoot) {
if (in.readBoolean()) { if (in.readBoolean()) {
v.visitEnclosingElement(ImageElement.SNAPSHOT_DIFF_SNAPSHOTINODE); v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_DIRECTORY_ATTRIBUTES);
processINode(in, v, true, currentINodeName, true); if (LayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
processINodeDirectoryAttributes(in, v, currentINodeName);
} else {
processINode(in, v, true, currentINodeName, true);
}
v.leaveEnclosingElement(); v.leaveEnclosingElement();
} }
} }
@ -560,6 +564,17 @@ private void processDirectoryDiff(DataInputStream in, ImageVisitor v,
v.leaveEnclosingElement(); v.leaveEnclosingElement();
} }
private void processINodeDirectoryAttributes(DataInputStream in, ImageVisitor v,
String parentName) throws IOException {
final String pathName = readINodePath(in, parentName);
v.visit(ImageElement.INODE_PATH, pathName);
processPermission(in, v);
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
v.visit(ImageElement.NS_QUOTA, in.readLong());
v.visit(ImageElement.DS_QUOTA, in.readLong());
}
/** Process children under a directory */ /** Process children under a directory */
private int processChildren(DataInputStream in, ImageVisitor v, private int processChildren(DataInputStream in, ImageVisitor v,
boolean skipBlocks, String parentName) throws IOException { boolean skipBlocks, String parentName) throws IOException {
@ -586,6 +601,18 @@ private void processFullNameINodes(DataInputStream in, ImageVisitor v,
} }
} }
private String readINodePath(DataInputStream in, String parentName)
throws IOException {
String pathName = FSImageSerialization.readString(in);
if (parentName != null) { // local name
pathName = "/" + pathName;
if (!"/".equals(parentName)) { // children of non-root directory
pathName = parentName + pathName;
}
}
return pathName;
}
/** /**
* Process an INode * Process an INode
* *
@ -605,16 +632,10 @@ private void processINode(DataInputStream in, ImageVisitor v,
LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion); LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion);
v.visitEnclosingElement(ImageElement.INODE); v.visitEnclosingElement(ImageElement.INODE);
String pathName = FSImageSerialization.readString(in); final String pathName = readINodePath(in, parentName);
if (parentName != null) { // local name v.visit(ImageElement.INODE_PATH, pathName);
pathName = "/" + pathName;
if (!"/".equals(parentName)) { // children of non-root directory
pathName = parentName + pathName;
}
}
long inodeId = INodeId.GRANDFATHER_INODE_ID; long inodeId = INodeId.GRANDFATHER_INODE_ID;
v.visit(ImageElement.INODE_PATH, pathName);
if (supportInodeId) { if (supportInodeId) {
inodeId = in.readLong(); inodeId = in.readLong();
v.visit(ImageElement.INODE_ID, inodeId); v.visit(ImageElement.INODE_ID, inodeId);
@ -684,6 +705,20 @@ private void processINode(DataInputStream in, ImageVisitor v,
v.leaveEnclosingElement(); // INode v.leaveEnclosingElement(); // INode
} }
private void processINodeFileAttributes(DataInputStream in, ImageVisitor v,
String parentName) throws IOException {
final String pathName = readINodePath(in, parentName);
v.visit(ImageElement.INODE_PATH, pathName);
processPermission(in, v);
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
if(LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion)) {
v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong()));
}
v.visit(ImageElement.REPLICATION, in.readShort());
v.visit(ImageElement.BLOCK_SIZE, in.readLong());
}
private void processFileDiffList(DataInputStream in, ImageVisitor v, private void processFileDiffList(DataInputStream in, ImageVisitor v,
String currentINodeName) throws IOException { String currentINodeName) throws IOException {
final int size = in.readInt(); final int size = in.readInt();
@ -704,8 +739,12 @@ private void processFileDiff(DataInputStream in, ImageVisitor v,
ImageElement.SNAPSHOT_DIFF_SNAPSHOTID, snapshotId); ImageElement.SNAPSHOT_DIFF_SNAPSHOTID, snapshotId);
v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong()); v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong());
if (in.readBoolean()) { if (in.readBoolean()) {
v.visitEnclosingElement(ImageElement.SNAPSHOT_DIFF_SNAPSHOTINODE); v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_FILE_ATTRIBUTES);
processINode(in, v, true, currentINodeName, true); if (LayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
processINodeFileAttributes(in, v, currentINodeName);
} else {
processINode(in, v, true, currentINodeName, true);
}
v.leaveEnclosingElement(); v.leaveEnclosingElement();
} }
v.leaveEnclosingElement(); v.leaveEnclosingElement();

View File

@ -97,7 +97,8 @@ public enum ImageElement {
SNAPSHOT_DIR_DIFF, SNAPSHOT_DIR_DIFF,
SNAPSHOT_DIFF_SNAPSHOTID, SNAPSHOT_DIFF_SNAPSHOTID,
SNAPSHOT_DIR_DIFF_CHILDREN_SIZE, SNAPSHOT_DIR_DIFF_CHILDREN_SIZE,
SNAPSHOT_DIFF_SNAPSHOTINODE, SNAPSHOT_INODE_FILE_ATTRIBUTES,
SNAPSHOT_INODE_DIRECTORY_ATTRIBUTES,
SNAPSHOT_DIR_DIFF_CREATEDLIST, SNAPSHOT_DIR_DIFF_CREATEDLIST,
SNAPSHOT_DIR_DIFF_CREATEDLIST_SIZE, SNAPSHOT_DIR_DIFF_CREATEDLIST_SIZE,
SNAPSHOT_DIR_DIFF_CREATED_INODE, SNAPSHOT_DIR_DIFF_CREATED_INODE,

View File

@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<EDITS> <EDITS>
<EDITS_VERSION>-43</EDITS_VERSION> <EDITS_VERSION>-45</EDITS_VERSION>
<RECORD> <RECORD>
<OPCODE>OP_START_LOG_SEGMENT</OPCODE> <OPCODE>OP_START_LOG_SEGMENT</OPCODE>
<DATA> <DATA>