HDFS-4529. Disallow concat when one of the src files is in some snapshot.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1468667 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-04-16 23:16:02 +00:00
parent 6bda1f20ad
commit 38bd7061c1
7 changed files with 39 additions and 15 deletions

View File

@ -244,3 +244,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4700. Fix the undo section of rename with snapshots. (Jing Zhao via
szetszwo)
HDFS-4529. Disallow concat when one of the src files is in some snapshot.
(szetszwo)

View File

@ -68,6 +68,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapsho
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotException;
import org.apache.hadoop.hdfs.util.ByteArray;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
@ -1099,9 +1100,9 @@ public class FSDirectory implements Closeable {
/**
* Concat all the blocks from srcs to trg and delete the srcs files
*/
public void concat(String target, String [] srcs)
void concat(String target, String [] srcs)
throws UnresolvedLinkException, QuotaExceededException,
SnapshotAccessControlException {
SnapshotAccessControlException, SnapshotException {
writeLock();
try {
// actual move
@ -1122,7 +1123,7 @@ public class FSDirectory implements Closeable {
*/
void unprotectedConcat(String target, String [] srcs, long timestamp)
throws UnresolvedLinkException, QuotaExceededException,
SnapshotAccessControlException {
SnapshotAccessControlException, SnapshotException {
assert hasWriteLock();
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSNamesystem.concat to "+target);
@ -1137,7 +1138,24 @@ public class FSDirectory implements Closeable {
final INodeFile [] allSrcInodes = new INodeFile[srcs.length];
for(int i = 0; i < srcs.length; i++) {
allSrcInodes[i] = getINode4Write(srcs[i]).asFile();
final INodesInPath iip = getINodesInPath4Write(srcs[i]);
final Snapshot latest = iip.getLatestSnapshot();
final INode inode = iip.getLastINode();
// check if the file in the latest snapshot
if (inode.isInLatestSnapshot(latest)) {
throw new SnapshotException("Concat: the source file " + srcs[i]
+ " is in snapshot " + latest);
}
// check if the file has other references.
if (inode.isReference() && ((INodeReference.WithCount)
inode.asReference().getReferredINode()).getReferenceCount() > 1) {
throw new SnapshotException("Concat: the source file " + srcs[i]
+ " is referred by some other reference in some snapshot.");
}
allSrcInodes[i] = inode.asFile();
}
trgInode.concatBlocks(allSrcInodes);

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.util.Diff;
import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.primitives.SignedBytes;
/**
@ -185,25 +186,27 @@ public abstract class INode implements Diff.Element<byte[]> {
}
/**
* Called by {@link INode#recordModification}. For a reference node and its
* subtree, the function tells which snapshot the modification should be
* When {@link #recordModification} is called on a referred node,
* this method tells which snapshot the modification should be
* associated with: the snapshot that belongs to the SRC tree of the rename
* operation, or the snapshot belonging to the DST tree.
*
* @param latest
* @param latestInDst
* the latest snapshot in the DST tree above the reference node
* @return True: the modification should be recorded in the snapshot that
* belongs to the SRC tree. False: the modification should be
* recorded in the snapshot that belongs to the DST tree.
*/
public final boolean isInSrcSnapshot(final Snapshot latest) {
if (latest == null) {
public final boolean shouldRecordInSrcSnapshot(final Snapshot latestInDst) {
Preconditions.checkState(!isReference());
if (latestInDst == null) {
return true;
}
INodeReference withCount = getParentReference();
if (withCount != null) {
int dstSnapshotId = withCount.getParentReference().getDstSnapshotId();
if (dstSnapshotId >= latest.getId()) {
if (dstSnapshotId >= latestInDst.getId()) {
return true;
}
}

View File

@ -564,7 +564,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
@Override
public INodeDirectoryWithSnapshot recordModification(final Snapshot latest)
throws QuotaExceededException {
if (isInLatestSnapshot(latest) && !isInSrcSnapshot(latest)) {
if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
return saveSelf2Snapshot(latest, null);
}
return this;

View File

@ -95,7 +95,7 @@ public class INodeFileUnderConstructionWithSnapshot
@Override
public INodeFileUnderConstructionWithSnapshot recordModification(
final Snapshot latest) throws QuotaExceededException {
if (isInLatestSnapshot(latest) && !isInSrcSnapshot(latest)) {
if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
diffs.saveSelf2Snapshot(latest, this, null);
}
return this;

View File

@ -66,7 +66,7 @@ public class INodeFileWithSnapshot extends INodeFile
@Override
public INodeFileWithSnapshot recordModification(final Snapshot latest)
throws QuotaExceededException {
if (isInLatestSnapshot(latest) && !isInSrcSnapshot(latest)) {
if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
diffs.saveSelf2Snapshot(latest, this, null);
}
return this;

View File

@ -23,11 +23,11 @@ import java.io.IOException;
public class SnapshotException extends IOException {
private static final long serialVersionUID = 1L;
SnapshotException(final String message) {
public SnapshotException(final String message) {
super(message);
}
SnapshotException(final Throwable cause) {
public SnapshotException(final Throwable cause) {
super(cause);
}
}