diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt index 37e6ca13749..f5275070868 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt @@ -18,3 +18,5 @@ Branch-2802 Snapshot (Unreleased) HDFS-4079. Add SnapshotManager which maintains a list for all the snapshottable directories and supports snapshot methods such as setting a directory to snapshottable and creating a snapshot. (szetszwo) + + HDFS-4078. Handle replication in snapshots. (szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index f4dfc6e94a8..6c52a70b908 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -315,9 +315,19 @@ public class FSDirectory implements Closeable { //add destination snaplink snapshot = addNode(dstPath, snapshot, UNKNOWN_DISK_SPACE); - if (snapshot != null && src.getClass() == INodeFile.class) { - //created a snapshot and the source is an INodeFile, replace the source. - replaceNode(srcPath, src, new INodeFileWithLink(src)); + final INodeFileWithLink srcWithLink; + if (snapshot != null) { + //added snapshot node successfully, check source type, + if (src instanceof INodeFileWithLink) { + srcWithLink = (INodeFileWithLink)src; + } else { + //source is an INodeFile, replace the source. + srcWithLink = new INodeFileWithLink(src); + replaceNode(srcPath, src, srcWithLink); + } + + //insert the snapshot to src's linked list. + srcWithLink.insert(snapshot); } } finally { writeUnlock(); @@ -384,13 +394,13 @@ public class FSDirectory implements Closeable { // check quota limits and updated space consumed updateCount(inodes, inodes.length-1, 0, - fileINode.getPreferredBlockSize()*fileINode.getBlockReplication(), true); + fileINode.getPreferredBlockSize()*fileINode.getFileReplication(), true); // associate new last block for the file BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction( block, - fileINode.getBlockReplication(), + fileINode.getFileReplication(), BlockUCState.UNDER_CONSTRUCTION, targets); getBlockManager().addBlockCollection(blockInfo, fileINode); @@ -481,7 +491,7 @@ public class FSDirectory implements Closeable { // update space consumed INode[] pathINodes = getExistingPathINodes(path); updateCount(pathINodes, pathINodes.length-1, 0, - -fileNode.getPreferredBlockSize()*fileNode.getBlockReplication(), true); + -fileNode.getPreferredBlockSize()*fileNode.getFileReplication(), true); } /** @@ -860,13 +870,13 @@ public class FSDirectory implements Closeable { return null; } INodeFile fileNode = (INodeFile)inode; - final short oldRepl = fileNode.getBlockReplication(); + final short oldRepl = fileNode.getFileReplication(); // check disk quota long dsDelta = (replication - oldRepl) * (fileNode.diskspaceConsumed()/oldRepl); updateCount(inodes, inodes.length-1, 0, dsDelta, true); - fileNode.setReplication(replication); + fileNode.setFileReplication(replication); if (oldReplication != null) { oldReplication[0] = oldRepl; @@ -2124,7 +2134,7 @@ public class FSDirectory implements Closeable { if (node instanceof INodeFile) { INodeFile fileNode = (INodeFile)node; size = fileNode.computeFileSize(true); - replication = fileNode.getBlockReplication(); + replication = fileNode.getFileReplication(); blocksize = fileNode.getPreferredBlockSize(); } return new HdfsFileStatus( @@ -2154,7 +2164,7 @@ public class FSDirectory implements Closeable { if (node instanceof INodeFile) { INodeFile fileNode = (INodeFile)node; size = fileNode.computeFileSize(true); - replication = fileNode.getBlockReplication(); + replication = fileNode.getFileReplication(); blocksize = fileNode.getPreferredBlockSize(); loc = getFSNamesystem().getBlockManager().createLocatedBlocks( fileNode.getBlocks(), fileNode.computeFileSize(false), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index f7ccb552d91..0c5344d04e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -661,7 +661,7 @@ public class FSEditLog implements LogsPurgeable { public void logOpenFile(String path, INodeFileUnderConstruction newNode) { AddOp op = AddOp.getInstance(cache.get()) .setPath(path) - .setReplication(newNode.getBlockReplication()) + .setReplication(newNode.getFileReplication()) .setModificationTime(newNode.getModificationTime()) .setAccessTime(newNode.getAccessTime()) .setBlockSize(newNode.getPreferredBlockSize()) @@ -679,7 +679,7 @@ public class FSEditLog implements LogsPurgeable { public void logCloseFile(String path, INodeFile newNode) { CloseOp op = CloseOp.getInstance(cache.get()) .setPath(path) - .setReplication(newNode.getBlockReplication()) + .setReplication(newNode.getFileReplication()) .setModificationTime(newNode.getModificationTime()) .setAccessTime(newNode.getAccessTime()) .setBlockSize(newNode.getPreferredBlockSize()) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index fc0d6556a08..173d96c9693 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -126,7 +126,7 @@ public class FSImageSerialization { String path) throws IOException { writeString(path, out); - out.writeShort(cons.getBlockReplication()); + out.writeShort(cons.getFileReplication()); out.writeLong(cons.getModificationTime()); out.writeLong(cons.getPreferredBlockSize()); int nrBlocks = cons.getBlocks().length; @@ -175,7 +175,7 @@ public class FSImageSerialization { filePerm); } else { INodeFile fileINode = (INodeFile)node; - out.writeShort(fileINode.getBlockReplication()); + out.writeShort(fileINode.getFileReplication()); out.writeLong(fileINode.getModificationTime()); out.writeLong(fileINode.getAccessTime()); out.writeLong(fileINode.getPreferredBlockSize()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 818c63bb6b0..2d110fd621e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1414,7 +1414,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } si.add(trgInode); - short repl = trgInode.getBlockReplication(); + final short repl = trgInode.getFileReplication(); // now check the srcs boolean endSrc = false; // final src file doesn't have to have full end block @@ -1434,10 +1434,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } // check replication and blocks size - if(repl != srcInode.getBlockReplication()) { + if(repl != srcInode.getFileReplication()) { throw new IllegalArgumentException(src + " and " + target + " " + "should have same replication: " - + repl + " vs. " + srcInode.getBlockReplication()); + + repl + " vs. " + srcInode.getFileReplication()); } //boolean endBlock=false; @@ -1878,9 +1878,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, LocatedBlock prepareFileForWrite(String src, INodeFile file, String leaseHolder, String clientMachine, DatanodeDescriptor clientNode, boolean writeToEditLog) throws IOException { + //TODO SNAPSHOT: INodeFileUnderConstruction with link INodeFileUnderConstruction cons = new INodeFileUnderConstruction( file.getLocalNameBytes(), - file.getBlockReplication(), + file.getFileReplication(), file.getModificationTime(), file.getPreferredBlockSize(), file.getBlocks(), @@ -2194,7 +2195,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, fileLength = pendingFile.computeContentSummary().getLength(); blockSize = pendingFile.getPreferredBlockSize(); clientNode = pendingFile.getClientNode(); - replication = pendingFile.getBlockReplication(); + replication = pendingFile.getFileReplication(); } finally { writeUnlock(); } @@ -3157,7 +3158,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (diff > 0) { try { String path = leaseManager.findPath(fileINode); - dir.updateSpaceConsumed(path, 0, -diff * fileINode.getBlockReplication()); + dir.updateSpaceConsumed(path, 0, -diff*fileINode.getFileReplication()); } catch (IOException e) { LOG.warn("Unexpected exception while updating disk space.", e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 3ada2de0ef9..e2f1e76146d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -49,13 +49,13 @@ public class INodeFile extends INode implements BlockCollection { short replication, long modificationTime, long atime, long preferredBlockSize) { super(permissions, modificationTime, atime); - this.setReplication(replication); + this.setFileReplication(replication); this.setPreferredBlockSize(preferredBlockSize); blocks = blklist; } protected INodeFile(INodeFile f) { - this(f.getPermissionStatus(), f.getBlocks(), f.getBlockReplication(), + this(f.getPermissionStatus(), f.getBlocks(), f.getFileReplication(), f.getModificationTime(), f.getAccessTime(), f.getPreferredBlockSize()); } @@ -75,12 +75,16 @@ public class INodeFile extends INode implements BlockCollection { } /** @return the replication factor of the file. */ - @Override - public short getBlockReplication() { + public final short getFileReplication() { return (short) ((header & HEADERMASK) >> BLOCKBITS); } - void setReplication(short replication) { + @Override + public short getBlockReplication() { + return getFileReplication(); + } + + void setFileReplication(short replication) { if(replication <= 0) throw new IllegalArgumentException("Unexpected value for the replication"); header = ((long)replication << BLOCKBITS) | (header & ~HEADERMASK); @@ -220,7 +224,7 @@ public class INodeFile extends INode implements BlockCollection { isUnderConstruction()) { size += getPreferredBlockSize() - blkArr[blkArr.length-1].getNumBytes(); } - return size * getBlockReplication(); + return size * getFileReplication(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java index aff956e3cf8..d3aac745fc4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java @@ -102,9 +102,10 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec assert allBlocksComplete() : "Can't finalize inode " + this + " since it contains " + "non-complete blocks! Blocks are: " + blocksAsString(); + //TODO SNAPSHOT: may convert to INodeFileWithLink INodeFile obj = new INodeFile(getPermissionStatus(), getBlocks(), - getBlockReplication(), + getFileReplication(), getModificationTime(), getModificationTime(), getPreferredBlockSize()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 348f8dae6bd..07ec14e47d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -834,7 +834,7 @@ class NamenodeJspHelper { doc.endTag(); doc.startTag("replication"); - doc.pcdata(""+inode.getBlockReplication()); + doc.pcdata(""+inode.getFileReplication()); doc.endTag(); doc.startTag("disk_space_consumed"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java index 8726540beec..011984f426d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java @@ -33,6 +33,7 @@ public class INodeFileWithLink extends INodeFile { public INodeFileWithLink(INodeFile f) { super(f); + next = this; } void setNext(INodeFileWithLink next) { @@ -42,4 +43,26 @@ public class INodeFileWithLink extends INodeFile { INodeFileWithLink getNext() { return next; } + + /** Insert inode to the circular linked list. */ + public void insert(INodeFileWithLink inode) { + inode.setNext(this.getNext()); + this.setNext(inode); + } + + /** + * @return the max file replication of the elements + * in the circular linked list. + */ + @Override + public short getBlockReplication() { + short max = getFileReplication(); + for(INodeFileWithLink i = next; i != this; i = i.getNext()) { + final short replication = i.getFileReplication(); + if (replication > max) { + max = replication; + } + } + return max; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index bb802185ac3..725f387d316 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -48,7 +48,7 @@ public class TestINodeFile { FsPermission.getDefault()), null, replication, 0L, 0L, preferredBlockSize); assertEquals("True has to be returned in this case", replication, - inf.getBlockReplication()); + inf.getFileReplication()); } /**