HDFS-5537. Merge change r1546184 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1568204 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2014-02-14 07:18:58 +00:00
parent ccdeeb39e1
commit 418e152347
15 changed files with 280 additions and 289 deletions

View File

@ -46,6 +46,8 @@ Release 2.4.0 - UNRELEASED
HDFS-5286. Flatten INodeDirectory hierarchy: Replace INodeDirectoryWithQuota HDFS-5286. Flatten INodeDirectory hierarchy: Replace INodeDirectoryWithQuota
with DirectoryWithQuotaFeature. (szetszwo) with DirectoryWithQuotaFeature. (szetszwo)
HDFS-5537. Remove FileWithSnapshot interface. (jing9 via szetszwo)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery

View File

@ -475,8 +475,8 @@ public class FSDirectory implements Closeable {
boolean unprotectedRemoveBlock(String path, boolean unprotectedRemoveBlock(String path,
INodeFile fileNode, Block block) throws IOException { INodeFile fileNode, Block block) throws IOException {
Preconditions.checkArgument(fileNode.isUnderConstruction());
// modify file-> block and blocksMap // modify file-> block and blocksMap
// fileNode should be under construction
boolean removed = fileNode.removeLastBlock(block); boolean removed = fileNode.removeLastBlock(block);
if (!removed) { if (!removed) {
return false; return false;

View File

@ -57,7 +57,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;

View File

@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
/** /**
* I-node for file being written. * Feature for under-construction file.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class FileUnderConstructionFeature extends INodeFile.Feature { public class FileUnderConstructionFeature extends INodeFile.Feature {

View File

@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.ChunkedArrayList; import org.apache.hadoop.hdfs.util.ChunkedArrayList;
@ -315,7 +314,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* 1.2.2 Else do nothing with the current INode. Recursively clean its * 1.2.2 Else do nothing with the current INode. Recursively clean its
* children. * children.
* *
* 1.3 The current inode is a {@link FileWithSnapshot}. * 1.3 The current inode is a file with snapshot.
* Call recordModification(..) to capture the current states. * Call recordModification(..) to capture the current states.
* Mark the INode as deleted. * Mark the INode as deleted.
* *
@ -328,7 +327,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* 2. When deleting a snapshot. * 2. When deleting a snapshot.
* 2.1 To clean {@link INodeFile}: do nothing. * 2.1 To clean {@link INodeFile}: do nothing.
* 2.2 To clean {@link INodeDirectory}: recursively clean its children. * 2.2 To clean {@link INodeDirectory}: recursively clean its children.
* 2.3 To clean {@link FileWithSnapshot}: delete the corresponding snapshot in * 2.3 To clean INodeFile with snapshot: delete the corresponding snapshot in
* its diff list. * its diff list.
* 2.4 To clean {@link INodeDirectoryWithSnapshot}: delete the corresponding * 2.4 To clean {@link INodeDirectoryWithSnapshot}: delete the corresponding
* snapshot in its diff list. Recursively clean its children. * snapshot in its diff list. Recursively clean its children.

View File

@ -33,10 +33,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.Util;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@ -189,7 +187,7 @@ public class INodeFile extends INodeWithAdditionalFields
INodeFile toUnderConstruction(String clientName, String clientMachine, INodeFile toUnderConstruction(String clientName, String clientMachine,
DatanodeDescriptor clientNode) { DatanodeDescriptor clientNode) {
Preconditions.checkState(!isUnderConstruction(), Preconditions.checkState(!isUnderConstruction(),
"file is already an INodeFileUnderConstruction"); "file is already under construction");
FileUnderConstructionFeature uc = new FileUnderConstructionFeature( FileUnderConstructionFeature uc = new FileUnderConstructionFeature(
clientName, clientMachine, clientNode); clientName, clientMachine, clientNode);
addFeature(uc); addFeature(uc);
@ -201,6 +199,8 @@ public class INodeFile extends INodeWithAdditionalFields
* feature. * feature.
*/ */
public INodeFile toCompleteFile(long mtime) { public INodeFile toCompleteFile(long mtime) {
Preconditions.checkState(isUnderConstruction(),
"file is no longer under construction");
FileUnderConstructionFeature uc = getFileUnderConstructionFeature(); FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
if (uc != null) { if (uc != null) {
assertAllBlocksComplete(); assertAllBlocksComplete();
@ -222,15 +222,16 @@ public class INodeFile extends INodeWithAdditionalFields
} }
} }
@Override //BlockCollection @Override // BlockCollection
public void setBlock(int index, BlockInfo blk) { public void setBlock(int index, BlockInfo blk) {
this.blocks[index] = blk; this.blocks[index] = blk;
} }
@Override // BlockCollection @Override // BlockCollection, the file should be under construction
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock, public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
DatanodeStorageInfo[] locations) throws IOException { DatanodeStorageInfo[] locations) throws IOException {
Preconditions.checkState(isUnderConstruction()); Preconditions.checkState(isUnderConstruction(),
"file is no longer under construction");
if (numBlocks() == 0) { if (numBlocks() == 0) {
throw new IOException("Failed to set last block: File is empty."); throw new IOException("Failed to set last block: File is empty.");
@ -248,6 +249,8 @@ public class INodeFile extends INodeWithAdditionalFields
* the last one on the list. * the last one on the list.
*/ */
boolean removeLastBlock(Block oldblock) { boolean removeLastBlock(Block oldblock) {
Preconditions.checkState(isUnderConstruction(),
"file is no longer under construction");
if (blocks == null || blocks.length == 0) { if (blocks == null || blocks.length == 0) {
return false; return false;
} }
@ -299,10 +302,8 @@ public class INodeFile extends INodeWithAdditionalFields
} }
@Override @Override
public final short getBlockReplication() { public short getBlockReplication() {
return this instanceof FileWithSnapshot? return getFileReplication(null);
Util.getBlockReplication((FileWithSnapshot)this)
: getFileReplication(null);
} }
/** Set the replication factor of this file. */ /** Set the replication factor of this file. */
@ -422,8 +423,8 @@ public class INodeFile extends INodeWithAdditionalFields
clear(); clear();
removedINodes.add(this); removedINodes.add(this);
if (this instanceof FileWithSnapshot) { if (this instanceof INodeFileWithSnapshot) {
((FileWithSnapshot) this).getDiffs().clear(); ((INodeFileWithSnapshot) this).getDiffs().clear();
} }
} }
@ -438,8 +439,8 @@ public class INodeFile extends INodeWithAdditionalFields
boolean useCache, int lastSnapshotId) { boolean useCache, int lastSnapshotId) {
long nsDelta = 1; long nsDelta = 1;
final long dsDelta; final long dsDelta;
if (this instanceof FileWithSnapshot) { if (this instanceof INodeFileWithSnapshot) {
FileDiffList fileDiffList = ((FileWithSnapshot) this).getDiffs(); FileDiffList fileDiffList = ((INodeFileWithSnapshot) this).getDiffs();
Snapshot last = fileDiffList.getLastSnapshot(); Snapshot last = fileDiffList.getLastSnapshot();
List<FileDiff> diffs = fileDiffList.asList(); List<FileDiff> diffs = fileDiffList.asList();
@ -471,8 +472,8 @@ public class INodeFile extends INodeWithAdditionalFields
private void computeContentSummary4Snapshot(final Content.Counts counts) { private void computeContentSummary4Snapshot(final Content.Counts counts) {
// file length and diskspace only counted for the latest state of the file // file length and diskspace only counted for the latest state of the file
// i.e. either the current state or the last snapshot // i.e. either the current state or the last snapshot
if (this instanceof FileWithSnapshot) { if (this instanceof INodeFileWithSnapshot) {
final FileWithSnapshot withSnapshot = (FileWithSnapshot)this; final INodeFileWithSnapshot withSnapshot = (INodeFileWithSnapshot) this;
final FileDiffList diffs = withSnapshot.getDiffs(); final FileDiffList diffs = withSnapshot.getDiffs();
final int n = diffs.asList().size(); final int n = diffs.asList().size();
counts.add(Content.FILE, n); counts.add(Content.FILE, n);
@ -488,8 +489,8 @@ public class INodeFile extends INodeWithAdditionalFields
} }
private void computeContentSummary4Current(final Content.Counts counts) { private void computeContentSummary4Current(final Content.Counts counts) {
if (this instanceof FileWithSnapshot if (this instanceof INodeFileWithSnapshot
&& ((FileWithSnapshot)this).isCurrentFileDeleted()) { && ((INodeFileWithSnapshot) this).isCurrentFileDeleted()) {
return; return;
} }
@ -508,8 +509,9 @@ public class INodeFile extends INodeWithAdditionalFields
* otherwise, get the file size from the given snapshot. * otherwise, get the file size from the given snapshot.
*/ */
public final long computeFileSize(Snapshot snapshot) { public final long computeFileSize(Snapshot snapshot) {
if (snapshot != null && this instanceof FileWithSnapshot) { if (snapshot != null && this instanceof INodeFileWithSnapshot) {
final FileDiff d = ((FileWithSnapshot)this).getDiffs().getDiff(snapshot); final FileDiff d = ((INodeFileWithSnapshot) this).getDiffs().getDiff(
snapshot);
if (d != null) { if (d != null) {
return d.getFileSize(); return d.getFileSize();
} }

View File

@ -26,8 +26,8 @@ import java.util.List;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@ -102,8 +102,8 @@ public abstract class INodeReference extends INode {
} }
if (wn != null) { if (wn != null) {
INode referred = wc.getReferredINode(); INode referred = wc.getReferredINode();
if (referred instanceof FileWithSnapshot) { if (referred instanceof INodeFileWithSnapshot) {
return ((FileWithSnapshot) referred).getDiffs().getPrior( return ((INodeFileWithSnapshot) referred).getDiffs().getPrior(
wn.lastSnapshotId); wn.lastSnapshotId);
} else if (referred instanceof INodeDirectoryWithSnapshot) { } else if (referred instanceof INodeDirectoryWithSnapshot) {
return ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior( return ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior(
@ -547,8 +547,8 @@ public abstract class INodeReference extends INode {
private Snapshot getSelfSnapshot() { private Snapshot getSelfSnapshot() {
INode referred = getReferredINode().asReference().getReferredINode(); INode referred = getReferredINode().asReference().getReferredINode();
Snapshot snapshot = null; Snapshot snapshot = null;
if (referred instanceof FileWithSnapshot) { if (referred instanceof INodeFileWithSnapshot) {
snapshot = ((FileWithSnapshot) referred).getDiffs().getPrior( snapshot = ((INodeFileWithSnapshot) referred).getDiffs().getPrior(
lastSnapshotId); lastSnapshotId);
} else if (referred instanceof INodeDirectoryWithSnapshot) { } else if (referred instanceof INodeDirectoryWithSnapshot) {
snapshot = ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior( snapshot = ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior(
@ -637,10 +637,10 @@ public abstract class INodeReference extends INode {
Snapshot snapshot = getSelfSnapshot(prior); Snapshot snapshot = getSelfSnapshot(prior);
INode referred = getReferredINode().asReference().getReferredINode(); INode referred = getReferredINode().asReference().getReferredINode();
if (referred instanceof FileWithSnapshot) { if (referred instanceof INodeFileWithSnapshot) {
// if referred is a file, it must be a FileWithSnapshot since we did // if referred is a file, it must be a FileWithSnapshot since we did
// recordModification before the rename // recordModification before the rename
FileWithSnapshot sfile = (FileWithSnapshot) referred; INodeFileWithSnapshot sfile = (INodeFileWithSnapshot) referred;
// make sure we mark the file as deleted // make sure we mark the file as deleted
sfile.deleteCurrentFile(); sfile.deleteCurrentFile();
try { try {
@ -671,8 +671,8 @@ public abstract class INodeReference extends INode {
WithCount wc = (WithCount) getReferredINode().asReference(); WithCount wc = (WithCount) getReferredINode().asReference();
INode referred = wc.getReferredINode(); INode referred = wc.getReferredINode();
Snapshot lastSnapshot = null; Snapshot lastSnapshot = null;
if (referred instanceof FileWithSnapshot) { if (referred instanceof INodeFileWithSnapshot) {
lastSnapshot = ((FileWithSnapshot) referred).getDiffs() lastSnapshot = ((INodeFileWithSnapshot) referred).getDiffs()
.getLastSnapshot(); .getLastSnapshot();
} else if (referred instanceof INodeDirectoryWithSnapshot) { } else if (referred instanceof INodeDirectoryWithSnapshot) {
lastSnapshot = ((INodeDirectoryWithSnapshot) referred) lastSnapshot = ((INodeDirectoryWithSnapshot) referred)

View File

@ -0,0 +1,92 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataOutput;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
/**
* The difference of an {@link INodeFile} between two snapshots.
*/
public class FileDiff extends
AbstractINodeDiff<INodeFileWithSnapshot, INodeFileAttributes, FileDiff> {
/** The file size at snapshot creation time. */
private final long fileSize;
FileDiff(Snapshot snapshot, INodeFile file) {
super(snapshot, null, null);
fileSize = file.computeFileSize();
}
/** Constructor used by FSImage loading */
FileDiff(Snapshot snapshot, INodeFileAttributes snapshotINode,
FileDiff posteriorDiff, long fileSize) {
super(snapshot, snapshotINode, posteriorDiff);
this.fileSize = fileSize;
}
/** @return the file size in the snapshot. */
public long getFileSize() {
return fileSize;
}
@Override
Quota.Counts combinePosteriorAndCollectBlocks(
INodeFileWithSnapshot currentINode, FileDiff posterior,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
return currentINode.updateQuotaAndCollectBlocks(posterior, collectedBlocks,
removedINodes);
}
@Override
public String toString() {
return super.toString() + " fileSize=" + fileSize + ", rep="
+ (snapshotINode == null? "?": snapshotINode.getFileReplication());
}
@Override
void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
writeSnapshot(out);
out.writeLong(fileSize);
// write snapshotINode
if (snapshotINode != null) {
out.writeBoolean(true);
FSImageSerialization.writeINodeFileAttributes(snapshotINode, out);
} else {
out.writeBoolean(false);
}
}
@Override
Quota.Counts destroyDiffAndCollectBlocks(INodeFileWithSnapshot currentINode,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
return currentINode.updateQuotaAndCollectBlocks(this, collectedBlocks,
removedINodes);
}
}

View File

@ -0,0 +1,35 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
/** A list of FileDiffs for storing snapshot data. */
public class FileDiffList extends
AbstractINodeDiffList<INodeFileWithSnapshot, INodeFileAttributes, FileDiff> {
@Override
FileDiff createDiff(Snapshot snapshot, INodeFileWithSnapshot file) {
return new FileDiff(snapshot, file);
}
@Override
INodeFileAttributes createSnapshotCopy(INodeFileWithSnapshot currentINode) {
return new INodeFileAttributes.SnapshotCopy(currentINode);
}
}

View File

@ -1,227 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataOutput;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
/**
* An interface for {@link INodeFile} to support snapshot.
*/
@InterfaceAudience.Private
public interface FileWithSnapshot {
/**
* The difference of an {@link INodeFile} between two snapshots.
*/
public static class FileDiff extends AbstractINodeDiff<INodeFile, INodeFileAttributes, FileDiff> {
/** The file size at snapshot creation time. */
private final long fileSize;
private FileDiff(Snapshot snapshot, INodeFile file) {
super(snapshot, null, null);
fileSize = file.computeFileSize();
}
/** Constructor used by FSImage loading */
FileDiff(Snapshot snapshot, INodeFileAttributes snapshotINode,
FileDiff posteriorDiff, long fileSize) {
super(snapshot, snapshotINode, posteriorDiff);
this.fileSize = fileSize;
}
/** @return the file size in the snapshot. */
public long getFileSize() {
return fileSize;
}
private static Quota.Counts updateQuotaAndCollectBlocks(
INodeFile currentINode, FileDiff removed,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
FileWithSnapshot sFile = (FileWithSnapshot) currentINode;
long oldDiskspace = currentINode.diskspaceConsumed();
if (removed.snapshotINode != null) {
short replication = removed.snapshotINode.getFileReplication();
short currentRepl = currentINode.getBlockReplication();
if (currentRepl == 0) {
oldDiskspace = currentINode.computeFileSize(true, true) * replication;
} else if (replication > currentRepl) {
oldDiskspace = oldDiskspace / currentINode.getBlockReplication()
* replication;
}
}
Util.collectBlocksAndClear(sFile, collectedBlocks, removedINodes);
long dsDelta = oldDiskspace - currentINode.diskspaceConsumed();
return Quota.Counts.newInstance(0, dsDelta);
}
@Override
Quota.Counts combinePosteriorAndCollectBlocks(INodeFile currentINode,
FileDiff posterior, BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
return updateQuotaAndCollectBlocks(currentINode, posterior,
collectedBlocks, removedINodes);
}
@Override
public String toString() {
return super.toString() + " fileSize=" + fileSize + ", rep="
+ (snapshotINode == null? "?": snapshotINode.getFileReplication());
}
@Override
void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
writeSnapshot(out);
out.writeLong(fileSize);
// write snapshotINode
if (snapshotINode != null) {
out.writeBoolean(true);
FSImageSerialization.writeINodeFileAttributes(snapshotINode, out);
} else {
out.writeBoolean(false);
}
}
@Override
Quota.Counts destroyDiffAndCollectBlocks(INodeFile currentINode,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
return updateQuotaAndCollectBlocks(currentINode, this,
collectedBlocks, removedINodes);
}
}
/** A list of FileDiffs for storing snapshot data. */
public static class FileDiffList
extends AbstractINodeDiffList<INodeFile, INodeFileAttributes, FileDiff> {
@Override
FileDiff createDiff(Snapshot snapshot, INodeFile file) {
return new FileDiff(snapshot, file);
}
@Override
INodeFileAttributes createSnapshotCopy(INodeFile currentINode) {
return new INodeFileAttributes.SnapshotCopy(currentINode);
}
}
/** @return the {@link INodeFile} view of this object. */
public INodeFile asINodeFile();
/** @return the file diff list. */
public FileDiffList getDiffs();
/** Is the current file deleted? */
public boolean isCurrentFileDeleted();
/** Delete the file from the current tree */
public void deleteCurrentFile();
/** Utility methods for the classes which implement the interface. */
public static class Util {
/**
* @return block replication, which is the max file replication among
* the file and the diff list.
*/
public static short getBlockReplication(final FileWithSnapshot file) {
short max = file.isCurrentFileDeleted()? 0
: file.asINodeFile().getFileReplication();
for(FileDiff d : file.getDiffs()) {
if (d.snapshotINode != null) {
final short replication = d.snapshotINode.getFileReplication();
if (replication > max) {
max = replication;
}
}
}
return max;
}
/**
* If some blocks at the end of the block list no longer belongs to
* any inode, collect them and update the block list.
*/
static void collectBlocksAndClear(final FileWithSnapshot file,
final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
// check if everything is deleted.
if (file.isCurrentFileDeleted()
&& file.getDiffs().asList().isEmpty()) {
file.asINodeFile().destroyAndCollectBlocks(info, removedINodes);
return;
}
// find max file size.
final long max;
if (file.isCurrentFileDeleted()) {
final FileDiff last = file.getDiffs().getLast();
max = last == null? 0: last.fileSize;
} else {
max = file.asINodeFile().computeFileSize();
}
collectBlocksBeyondMax(file, max, info);
}
private static void collectBlocksBeyondMax(final FileWithSnapshot file,
final long max, final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfo[] oldBlocks = file.asINodeFile().getBlocks();
if (oldBlocks != null) {
//find the minimum n such that the size of the first n blocks > max
int n = 0;
for(long size = 0; n < oldBlocks.length && max > size; n++) {
size += oldBlocks[n].getNumBytes();
}
// starting from block n, the data is beyond max.
if (n < oldBlocks.length) {
// resize the array.
final BlockInfo[] newBlocks;
if (n == 0) {
newBlocks = null;
} else {
newBlocks = new BlockInfo[n];
System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
}
// set new blocks
file.asINodeFile().setBlocks(newBlocks);
// collect the blocks beyond max.
if (collectedBlocks != null) {
for(; n < oldBlocks.length; n++) {
collectedBlocks.addDeleteBlock(oldBlocks[n]);
}
}
}
}
}
}
}

View File

@ -432,8 +432,8 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
parentPath.remove(parentPath.size() - 1); parentPath.remove(parentPath.size() - 1);
} }
} }
} else if (node.isFile() && node.asFile() instanceof FileWithSnapshot) { } else if (node.isFile() && node.asFile() instanceof INodeFileWithSnapshot) {
FileWithSnapshot file = (FileWithSnapshot) node.asFile(); INodeFileWithSnapshot file = (INodeFileWithSnapshot) node.asFile();
Snapshot earlierSnapshot = diffReport.isFromEarlier() ? diffReport.from Snapshot earlierSnapshot = diffReport.isFromEarlier() ? diffReport.from
: diffReport.to; : diffReport.to;
Snapshot laterSnapshot = diffReport.isFromEarlier() ? diffReport.to Snapshot laterSnapshot = diffReport.isFromEarlier() ? diffReport.to
@ -441,7 +441,7 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
boolean change = file.getDiffs().changedBetweenSnapshots(earlierSnapshot, boolean change = file.getDiffs().changedBetweenSnapshots(earlierSnapshot,
laterSnapshot); laterSnapshot);
if (change) { if (change) {
diffReport.addFileDiff(file.asINodeFile(), relativePath); diffReport.addFileDiff(file, relativePath);
} }
} }
} }

View File

@ -804,10 +804,10 @@ public class INodeDirectoryWithSnapshot extends INodeDirectory {
// For DstReference node, since the node is not in the created list of // For DstReference node, since the node is not in the created list of
// prior, we should treat it as regular file/dir // prior, we should treat it as regular file/dir
} else if (topNode.isFile() } else if (topNode.isFile()
&& topNode.asFile() instanceof FileWithSnapshot) { && topNode.asFile() instanceof INodeFileWithSnapshot) {
FileWithSnapshot fs = (FileWithSnapshot) topNode.asFile(); INodeFileWithSnapshot fs = (INodeFileWithSnapshot) topNode.asFile();
counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior, counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior, fs,
topNode.asFile(), collectedBlocks, removedINodes, countDiffChange)); collectedBlocks, removedINodes, countDiffChange));
} else if (topNode.isDirectory()) { } else if (topNode.isDirectory()) {
INodeDirectory dir = topNode.asDirectory(); INodeDirectory dir = topNode.asDirectory();
ChildrenDiff priorChildrenDiff = null; ChildrenDiff priorChildrenDiff = null;

View File

@ -21,6 +21,7 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
@ -31,14 +32,13 @@ import org.apache.hadoop.hdfs.server.namenode.Quota;
* Represent an {@link INodeFile} that is snapshotted. * Represent an {@link INodeFile} that is snapshotted.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class INodeFileWithSnapshot extends INodeFile public class INodeFileWithSnapshot extends INodeFile {
implements FileWithSnapshot {
private final FileDiffList diffs; private final FileDiffList diffs;
private boolean isCurrentFileDeleted = false; private boolean isCurrentFileDeleted = false;
public INodeFileWithSnapshot(INodeFile f) { public INodeFileWithSnapshot(INodeFile f) {
this(f, f instanceof FileWithSnapshot? this(f, f instanceof INodeFileWithSnapshot ?
((FileWithSnapshot)f).getDiffs(): null); ((INodeFileWithSnapshot) f).getDiffs() : null);
} }
public INodeFileWithSnapshot(INodeFile f, FileDiffList diffs) { public INodeFileWithSnapshot(INodeFile f, FileDiffList diffs) {
@ -46,12 +46,12 @@ public class INodeFileWithSnapshot extends INodeFile
this.diffs = diffs != null? diffs: new FileDiffList(); this.diffs = diffs != null? diffs: new FileDiffList();
} }
@Override /** Is the current file deleted? */
public boolean isCurrentFileDeleted() { public boolean isCurrentFileDeleted() {
return isCurrentFileDeleted; return isCurrentFileDeleted;
} }
@Override /** Delete the file from the current tree */
public void deleteCurrentFile() { public void deleteCurrentFile() {
isCurrentFileDeleted = true; isCurrentFileDeleted = true;
} }
@ -70,12 +70,7 @@ public class INodeFileWithSnapshot extends INodeFile
return this; return this;
} }
@Override /** @return the file diff list. */
public INodeFile asINodeFile() {
return this;
}
@Override
public FileDiffList getDiffs() { public FileDiffList getDiffs() {
return diffs; return diffs;
} }
@ -90,7 +85,7 @@ public class INodeFileWithSnapshot extends INodeFile
recordModification(prior, null); recordModification(prior, null);
deleteCurrentFile(); deleteCurrentFile();
} }
Util.collectBlocksAndClear(this, collectedBlocks, removedINodes); this.collectBlocksAndClear(collectedBlocks, removedINodes);
return Quota.Counts.newInstance(); return Quota.Counts.newInstance();
} else { // delete a snapshot } else { // delete a snapshot
prior = getDiffs().updatePrior(snapshot, prior); prior = getDiffs().updatePrior(snapshot, prior);
@ -104,4 +99,100 @@ public class INodeFileWithSnapshot extends INodeFile
return super.toDetailString() return super.toDetailString()
+ (isCurrentFileDeleted()? "(DELETED), ": ", ") + diffs; + (isCurrentFileDeleted()? "(DELETED), ": ", ") + diffs;
} }
/**
* @return block replication, which is the max file replication among
* the file and the diff list.
*/
@Override
public short getBlockReplication() {
short max = isCurrentFileDeleted() ? 0 : getFileReplication();
for(FileDiff d : getDiffs()) {
if (d.snapshotINode != null) {
final short replication = d.snapshotINode.getFileReplication();
if (replication > max) {
max = replication;
}
}
}
return max;
}
/**
* If some blocks at the end of the block list no longer belongs to
* any inode, collect them and update the block list.
*/
void collectBlocksAndClear(final BlocksMapUpdateInfo info,
final List<INode> removedINodes) {
// check if everything is deleted.
if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
destroyAndCollectBlocks(info, removedINodes);
return;
}
// find max file size.
final long max;
if (isCurrentFileDeleted()) {
final FileDiff last = getDiffs().getLast();
max = last == null? 0: last.getFileSize();
} else {
max = computeFileSize();
}
collectBlocksBeyondMax(max, info);
}
private void collectBlocksBeyondMax(final long max,
final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfo[] oldBlocks = getBlocks();
if (oldBlocks != null) {
//find the minimum n such that the size of the first n blocks > max
int n = 0;
for(long size = 0; n < oldBlocks.length && max > size; n++) {
size += oldBlocks[n].getNumBytes();
}
// starting from block n, the data is beyond max.
if (n < oldBlocks.length) {
// resize the array.
final BlockInfo[] newBlocks;
if (n == 0) {
newBlocks = null;
} else {
newBlocks = new BlockInfo[n];
System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
}
// set new blocks
setBlocks(newBlocks);
// collect the blocks beyond max.
if (collectedBlocks != null) {
for(; n < oldBlocks.length; n++) {
collectedBlocks.addDeleteBlock(oldBlocks[n]);
}
}
}
}
}
Quota.Counts updateQuotaAndCollectBlocks(FileDiff removed,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
long oldDiskspace = this.diskspaceConsumed();
if (removed.snapshotINode != null) {
short replication = removed.snapshotINode.getFileReplication();
short currentRepl = getBlockReplication();
if (currentRepl == 0) {
oldDiskspace = computeFileSize(true, true) * replication;
} else if (replication > currentRepl) {
oldDiskspace = oldDiskspace / getBlockReplication()
* replication;
}
}
this.collectBlocksAndClear(collectedBlocks, removedINodes);
long dsDelta = oldDiskspace - diskspaceConsumed();
return Quota.Counts.newInstance(0, dsDelta);
}
} }

View File

@ -36,8 +36,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes;
import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList;
import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff; import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff;
@ -99,8 +97,8 @@ public class SnapshotFSImageFormat {
public static void saveFileDiffList(final INodeFile file, public static void saveFileDiffList(final INodeFile file,
final DataOutput out) throws IOException { final DataOutput out) throws IOException {
saveINodeDiffs(file instanceof FileWithSnapshot? saveINodeDiffs(file instanceof INodeFileWithSnapshot?
((FileWithSnapshot)file).getDiffs(): null, out, null); ((INodeFileWithSnapshot) file).getDiffs(): null, out, null);
} }
public static FileDiffList loadFileDiffList(DataInput in, public static FileDiffList loadFileDiffList(DataInput in,

View File

@ -63,7 +63,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeMap;
import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
import org.apache.hadoop.hdfs.server.namenode.Quota; import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.ChildrenDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.ChildrenDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
import org.apache.hadoop.hdfs.util.Diff.ListType; import org.apache.hadoop.hdfs.util.Diff.ListType;