From 1f109df2c6468e1bb23b7995fd0f13701b1087b3 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Mon, 15 Oct 2012 13:55:18 +0000 Subject: [PATCH] svn merge -c 1398288 from trunk for HDFS-4037. Rename the getReplication() method in BlockCollection to getBlockReplication(). git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1398289 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 7 +++++-- .../server/blockmanagement/BlockCollection.java | 4 +++- .../hdfs/server/blockmanagement/BlockInfo.java | 4 ++-- .../hdfs/server/blockmanagement/BlockManager.java | 14 +++++++------- .../hadoop/hdfs/server/namenode/FSDirectory.java | 12 ++++++------ .../hadoop/hdfs/server/namenode/FSEditLog.java | 4 ++-- .../hdfs/server/namenode/FSEditLogLoader.java | 4 ++-- .../hdfs/server/namenode/FSImageSerialization.java | 4 ++-- .../hadoop/hdfs/server/namenode/FSNamesystem.java | 14 +++++++------- .../hadoop/hdfs/server/namenode/INodeFile.java | 4 ++-- .../namenode/INodeFileUnderConstruction.java | 2 +- .../hdfs/server/namenode/NamenodeJspHelper.java | 2 +- .../server/blockmanagement/TestBlockManager.java | 2 +- .../hadoop/hdfs/server/namenode/TestINodeFile.java | 2 +- 14 files changed, 42 insertions(+), 37 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3e98ae3cb19..4d268e23b5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -19,8 +19,8 @@ Release 2.0.3-alpha - Unreleased HDFS-3939. NN RPC address cleanup. (eli) - HDFS-3373. Change DFSClient input stream socket cache to global static and add - a thread to cleanup expired cache entries. (John George via szetszwo) + HDFS-3373. Change DFSClient input stream socket cache to global static and + add a thread to cleanup expired cache entries. (John George via szetszwo) HDFS-3896. Add descriptions for dfs.namenode.rpc-address and dfs.namenode.servicerpc-address to hdfs-default.xml. (Jeff Lord via atm) @@ -51,6 +51,9 @@ Release 2.0.3-alpha - Unreleased HADOOP-8911. CRLF characters in source and text files. (Raja Aluri via suresh) + HDFS-4037. Rename the getReplication() method in BlockCollection to + getBlockReplication(). (szetszwo) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java index e3eecadce0c..f344833a0c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java @@ -19,12 +19,14 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.IOException; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ContentSummary; /** * This interface is used by the block manager to expose a * few characteristics of a collection of Block/BlockUnderConstruction. */ +@InterfaceAudience.Private public interface BlockCollection { /** * Get the last block of the collection. @@ -56,7 +58,7 @@ public interface BlockCollection { * Get block replication for the collection * @return block replication value */ - public short getReplication(); + public short getBlockReplication(); /** * Get the name of the collection. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 610b6cab557..b56b7b620a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -68,7 +68,7 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement { * @param from BlockInfo to copy from. */ protected BlockInfo(BlockInfo from) { - this(from, from.bc.getReplication()); + this(from, from.bc.getBlockReplication()); this.bc = from.bc; } @@ -344,7 +344,7 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement { BlockUCState s, DatanodeDescriptor[] targets) { if(isComplete()) { return new BlockInfoUnderConstruction( - this, getBlockCollection().getReplication(), s, targets); + this, getBlockCollection().getBlockReplication(), s, targets); } // the block is already under construction BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)this; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index e851279ba07..8fc52cdacf3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -998,7 +998,7 @@ public class BlockManager { // Add this replica to corruptReplicas Map corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason); - if (countNodes(b.stored).liveReplicas() >= bc.getReplication()) { + if (countNodes(b.stored).liveReplicas() >= bc.getBlockReplication()) { // the block is over-replicated so invalidate the replicas immediately invalidateBlock(b, node); } else if (namesystem.isPopulatingReplQueues()) { @@ -1136,7 +1136,7 @@ public class BlockManager { continue; } - requiredReplication = bc.getReplication(); + requiredReplication = bc.getBlockReplication(); // get a source data-node containingNodes = new ArrayList(); @@ -1222,7 +1222,7 @@ public class BlockManager { neededReplications.decrementReplicationIndex(priority); continue; } - requiredReplication = bc.getReplication(); + requiredReplication = bc.getBlockReplication(); // do not schedule more if enough replicas is already pending NumberReplicas numReplicas = countNodes(block); @@ -2090,7 +2090,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block } // handle underReplication/overReplication - short fileReplication = bc.getReplication(); + short fileReplication = bc.getBlockReplication(); if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) { neededReplications.remove(storedBlock, numCurrentReplica, num.decommissionedReplicas(), fileReplication); @@ -2229,7 +2229,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block return MisReplicationResult.UNDER_CONSTRUCTION; } // calculate current replication - short expectedReplication = bc.getReplication(); + short expectedReplication = bc.getBlockReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); // add to under-replicated queue if need to be @@ -2728,7 +2728,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block while(it.hasNext()) { final Block block = it.next(); BlockCollection bc = blocksMap.getBlockCollection(block); - short expectedReplication = bc.getReplication(); + short expectedReplication = bc.getBlockReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); if (numCurrentReplica > expectedReplication) { @@ -2874,7 +2874,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block if (bc == null) { // block does not belong to any file return 0; } - return bc.getReplication(); + return bc.getBlockReplication(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index e02aea5f1d1..ec86de628a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -345,13 +345,13 @@ public class FSDirectory implements Closeable { // check quota limits and updated space consumed updateCount(inodes, inodes.length-1, 0, - fileINode.getPreferredBlockSize()*fileINode.getReplication(), true); + fileINode.getPreferredBlockSize()*fileINode.getBlockReplication(), true); // associate new last block for the file BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction( block, - fileINode.getReplication(), + fileINode.getBlockReplication(), BlockUCState.UNDER_CONSTRUCTION, targets); getBlockManager().addBlockCollection(blockInfo, fileINode); @@ -442,7 +442,7 @@ public class FSDirectory implements Closeable { // update space consumed INode[] pathINodes = getExistingPathINodes(path); updateCount(pathINodes, pathINodes.length-1, 0, - -fileNode.getPreferredBlockSize()*fileNode.getReplication(), true); + -fileNode.getPreferredBlockSize()*fileNode.getBlockReplication(), true); } /** @@ -821,7 +821,7 @@ public class FSDirectory implements Closeable { return null; } INodeFile fileNode = (INodeFile)inode; - final short oldRepl = fileNode.getReplication(); + final short oldRepl = fileNode.getBlockReplication(); // check disk quota long dsDelta = (replication - oldRepl) * (fileNode.diskspaceConsumed()/oldRepl); @@ -2061,7 +2061,7 @@ public class FSDirectory implements Closeable { if (node instanceof INodeFile) { INodeFile fileNode = (INodeFile)node; size = fileNode.computeFileSize(true); - replication = fileNode.getReplication(); + replication = fileNode.getBlockReplication(); blocksize = fileNode.getPreferredBlockSize(); } return new HdfsFileStatus( @@ -2091,7 +2091,7 @@ public class FSDirectory implements Closeable { if (node instanceof INodeFile) { INodeFile fileNode = (INodeFile)node; size = fileNode.computeFileSize(true); - replication = fileNode.getReplication(); + replication = fileNode.getBlockReplication(); blocksize = fileNode.getPreferredBlockSize(); loc = getFSNamesystem().getBlockManager().createLocatedBlocks( fileNode.getBlocks(), fileNode.computeFileSize(false), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index ac7f63b6889..344326e2cc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -602,7 +602,7 @@ public class FSEditLog { public void logOpenFile(String path, INodeFileUnderConstruction newNode) { AddOp op = AddOp.getInstance(cache.get()) .setPath(path) - .setReplication(newNode.getReplication()) + .setReplication(newNode.getBlockReplication()) .setModificationTime(newNode.getModificationTime()) .setAccessTime(newNode.getAccessTime()) .setBlockSize(newNode.getPreferredBlockSize()) @@ -620,7 +620,7 @@ public class FSEditLog { public void logCloseFile(String path, INodeFile newNode) { CloseOp op = CloseOp.getInstance(cache.get()) .setPath(path) - .setReplication(newNode.getReplication()) + .setReplication(newNode.getBlockReplication()) .setModificationTime(newNode.getModificationTime()) .setAccessTime(newNode.getAccessTime()) .setBlockSize(newNode.getPreferredBlockSize()) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index acc49627f51..d98b2472288 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -594,13 +594,13 @@ public class FSEditLogLoader { // what about an old-version fsync() where fsync isn't called // until several blocks in? newBI = new BlockInfoUnderConstruction( - newBlock, file.getReplication()); + newBlock, file.getBlockReplication()); } else { // OP_CLOSE should add finalized blocks. This code path // is only executed when loading edits written by prior // versions of Hadoop. Current versions always log // OP_ADD operations as each block is allocated. - newBI = new BlockInfo(newBlock, file.getReplication()); + newBI = new BlockInfo(newBlock, file.getBlockReplication()); } fsNamesys.getBlockManager().addBlockCollection(newBI, file); file.addBlock(newBI); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index a8df0f706c8..fc0d6556a08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -126,7 +126,7 @@ public class FSImageSerialization { String path) throws IOException { writeString(path, out); - out.writeShort(cons.getReplication()); + out.writeShort(cons.getBlockReplication()); out.writeLong(cons.getModificationTime()); out.writeLong(cons.getPreferredBlockSize()); int nrBlocks = cons.getBlocks().length; @@ -175,7 +175,7 @@ public class FSImageSerialization { filePerm); } else { INodeFile fileINode = (INodeFile)node; - out.writeShort(fileINode.getReplication()); + out.writeShort(fileINode.getBlockReplication()); out.writeLong(fileINode.getModificationTime()); out.writeLong(fileINode.getAccessTime()); out.writeLong(fileINode.getPreferredBlockSize()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 47c4b89941f..194b1a2c54d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1396,7 +1396,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } si.add(trgInode); - short repl = trgInode.getReplication(); + short repl = trgInode.getBlockReplication(); // now check the srcs boolean endSrc = false; // final src file doesn't have to have full end block @@ -1416,10 +1416,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } // check replication and blocks size - if(repl != srcInode.getReplication()) { + if(repl != srcInode.getBlockReplication()) { throw new IllegalArgumentException(src + " and " + target + " " + "should have same replication: " - + repl + " vs. " + srcInode.getReplication()); + + repl + " vs. " + srcInode.getBlockReplication()); } //boolean endBlock=false; @@ -1862,7 +1862,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, boolean writeToEditLog) throws IOException { INodeFileUnderConstruction cons = new INodeFileUnderConstruction( file.getLocalNameBytes(), - file.getReplication(), + file.getBlockReplication(), file.getModificationTime(), file.getPreferredBlockSize(), file.getBlocks(), @@ -2176,7 +2176,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, fileLength = pendingFile.computeContentSummary().getLength(); blockSize = pendingFile.getPreferredBlockSize(); clientNode = pendingFile.getClientNode(); - replication = pendingFile.getReplication(); + replication = pendingFile.getBlockReplication(); } finally { writeUnlock(); } @@ -2420,7 +2420,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * them into invalidateBlocks. */ private void checkReplicationFactor(INodeFile file) { - short numExpectedReplicas = file.getReplication(); + short numExpectedReplicas = file.getBlockReplication(); Block[] pendingBlocks = file.getBlocks(); int nrBlocks = pendingBlocks.length; for (int i = 0; i < nrBlocks; i++) { @@ -3139,7 +3139,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (diff > 0) { try { String path = leaseManager.findPath(fileINode); - dir.updateSpaceConsumed(path, 0, -diff * fileINode.getReplication()); + dir.updateSpaceConsumed(path, 0, -diff * fileINode.getBlockReplication()); } catch (IOException e) { LOG.warn("Unexpected exception while updating disk space.", e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index ef8732d1d56..957f851c061 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -71,7 +71,7 @@ class INodeFile extends INode implements BlockCollection { /** @return the replication factor of the file. */ @Override - public short getReplication() { + public short getBlockReplication() { return (short) ((header & HEADERMASK) >> BLOCKBITS); } @@ -215,7 +215,7 @@ class INodeFile extends INode implements BlockCollection { isUnderConstruction()) { size += getPreferredBlockSize() - blkArr[blkArr.length-1].getNumBytes(); } - return size * getReplication(); + return size * getBlockReplication(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java index 188050898b3..aff956e3cf8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java @@ -104,7 +104,7 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec "non-complete blocks! Blocks are: " + blocksAsString(); INodeFile obj = new INodeFile(getPermissionStatus(), getBlocks(), - getReplication(), + getBlockReplication(), getModificationTime(), getModificationTime(), getPreferredBlockSize()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 1ffb343fd73..64027d31ff3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -785,7 +785,7 @@ class NamenodeJspHelper { doc.endTag(); doc.startTag("replication"); - doc.pcdata(""+inode.getReplication()); + doc.pcdata(""+inode.getBlockReplication()); doc.endTag(); doc.startTag("disk_space_consumed"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 07682fe9b25..22bf9b146be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -379,7 +379,7 @@ public class TestBlockManager { private BlockInfo addBlockOnNodes(long blockId, List nodes) { BlockCollection bc = Mockito.mock(BlockCollection.class); - Mockito.doReturn((short)3).when(bc).getReplication(); + Mockito.doReturn((short)3).when(bc).getBlockReplication(); BlockInfo blockInfo = blockOnNodes(blockId, nodes); bm.blocksMap.addBlockCollection(blockInfo, bc); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 346844da73a..bb802185ac3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -48,7 +48,7 @@ public class TestINodeFile { FsPermission.getDefault()), null, replication, 0L, 0L, preferredBlockSize); assertEquals("True has to be returned in this case", replication, - inf.getReplication()); + inf.getBlockReplication()); } /**