From ad06a087131d69d173d8e03dce5c97650a530f2e Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Mon, 15 Oct 2012 13:48:56 +0000 Subject: [PATCH] HDFS-4037. Rename the getReplication() method in BlockCollection to getBlockReplication(). git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1398288 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 7 +++++-- .../server/blockmanagement/BlockCollection.java | 4 +++- .../hdfs/server/blockmanagement/BlockInfo.java | 4 ++-- .../hdfs/server/blockmanagement/BlockManager.java | 14 +++++++------- .../hadoop/hdfs/server/namenode/FSDirectory.java | 12 ++++++------ .../hadoop/hdfs/server/namenode/FSEditLog.java | 4 ++-- .../hdfs/server/namenode/FSEditLogLoader.java | 4 ++-- .../hdfs/server/namenode/FSImageSerialization.java | 4 ++-- .../hadoop/hdfs/server/namenode/FSNamesystem.java | 14 +++++++------- .../hadoop/hdfs/server/namenode/INodeFile.java | 4 ++-- .../namenode/INodeFileUnderConstruction.java | 2 +- .../hdfs/server/namenode/NamenodeJspHelper.java | 2 +- .../server/blockmanagement/TestBlockManager.java | 2 +- .../hadoop/hdfs/server/namenode/TestINodeFile.java | 2 +- 14 files changed, 42 insertions(+), 37 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 870a2560797..9778ad25ed0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -350,8 +350,8 @@ Release 2.0.3-alpha - Unreleased HDFS-3939. NN RPC address cleanup. (eli) - HDFS-3373. Change DFSClient input stream socket cache to global static and add - a thread to cleanup expired cache entries. (John George via szetszwo) + HDFS-3373. Change DFSClient input stream socket cache to global static and + add a thread to cleanup expired cache entries. (John George via szetszwo) HDFS-3896. Add descriptions for dfs.namenode.rpc-address and dfs.namenode.servicerpc-address to hdfs-default.xml. (Jeff Lord via atm) @@ -382,6 +382,9 @@ Release 2.0.3-alpha - Unreleased HADOOP-8911. CRLF characters in source and text files. (Raja Aluri via suresh) + HDFS-4037. Rename the getReplication() method in BlockCollection to + getBlockReplication(). (szetszwo) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java index e3eecadce0c..f344833a0c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java @@ -19,12 +19,14 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.IOException; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ContentSummary; /** * This interface is used by the block manager to expose a * few characteristics of a collection of Block/BlockUnderConstruction. */ +@InterfaceAudience.Private public interface BlockCollection { /** * Get the last block of the collection. @@ -56,7 +58,7 @@ public interface BlockCollection { * Get block replication for the collection * @return block replication value */ - public short getReplication(); + public short getBlockReplication(); /** * Get the name of the collection. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 0739aab970c..e08af3dd408 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -73,7 +73,7 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement { * @param from BlockInfo to copy from. */ protected BlockInfo(BlockInfo from) { - this(from, from.bc.getReplication()); + this(from, from.bc.getBlockReplication()); this.bc = from.bc; } @@ -335,7 +335,7 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement { BlockUCState s, DatanodeDescriptor[] targets) { if(isComplete()) { return new BlockInfoUnderConstruction( - this, getBlockCollection().getReplication(), s, targets); + this, getBlockCollection().getBlockReplication(), s, targets); } // the block is already under construction BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)this; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index bd6a494b9f7..9af8eb4cbef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -997,7 +997,7 @@ public class BlockManager { // Add this replica to corruptReplicas Map corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason); - if (countNodes(b.stored).liveReplicas() >= bc.getReplication()) { + if (countNodes(b.stored).liveReplicas() >= bc.getBlockReplication()) { // the block is over-replicated so invalidate the replicas immediately invalidateBlock(b, node); } else if (namesystem.isPopulatingReplQueues()) { @@ -1135,7 +1135,7 @@ public class BlockManager { continue; } - requiredReplication = bc.getReplication(); + requiredReplication = bc.getBlockReplication(); // get a source data-node containingNodes = new ArrayList(); @@ -1221,7 +1221,7 @@ public class BlockManager { neededReplications.decrementReplicationIndex(priority); continue; } - requiredReplication = bc.getReplication(); + requiredReplication = bc.getBlockReplication(); // do not schedule more if enough replicas is already pending NumberReplicas numReplicas = countNodes(block); @@ -2089,7 +2089,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block } // handle underReplication/overReplication - short fileReplication = bc.getReplication(); + short fileReplication = bc.getBlockReplication(); if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) { neededReplications.remove(storedBlock, numCurrentReplica, num.decommissionedReplicas(), fileReplication); @@ -2228,7 +2228,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block return MisReplicationResult.UNDER_CONSTRUCTION; } // calculate current replication - short expectedReplication = bc.getReplication(); + short expectedReplication = bc.getBlockReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); // add to under-replicated queue if need to be @@ -2699,7 +2699,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block while(it.hasNext()) { final Block block = it.next(); BlockCollection bc = blocksMap.getBlockCollection(block); - short expectedReplication = bc.getReplication(); + short expectedReplication = bc.getBlockReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); if (numCurrentReplica > expectedReplication) { @@ -2845,7 +2845,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block if (bc == null) { // block does not belong to any file return 0; } - return bc.getReplication(); + return bc.getBlockReplication(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index fb9f54d21b5..21e98dc524d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -345,13 +345,13 @@ public class FSDirectory implements Closeable { // check quota limits and updated space consumed updateCount(inodes, inodes.length-1, 0, - fileINode.getPreferredBlockSize()*fileINode.getReplication(), true); + fileINode.getPreferredBlockSize()*fileINode.getBlockReplication(), true); // associate new last block for the file BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction( block, - fileINode.getReplication(), + fileINode.getBlockReplication(), BlockUCState.UNDER_CONSTRUCTION, targets); getBlockManager().addBlockCollection(blockInfo, fileINode); @@ -442,7 +442,7 @@ public class FSDirectory implements Closeable { // update space consumed INode[] pathINodes = getExistingPathINodes(path); updateCount(pathINodes, pathINodes.length-1, 0, - -fileNode.getPreferredBlockSize()*fileNode.getReplication(), true); + -fileNode.getPreferredBlockSize()*fileNode.getBlockReplication(), true); } /** @@ -821,7 +821,7 @@ public class FSDirectory implements Closeable { return null; } INodeFile fileNode = (INodeFile)inode; - final short oldRepl = fileNode.getReplication(); + final short oldRepl = fileNode.getBlockReplication(); // check disk quota long dsDelta = (replication - oldRepl) * (fileNode.diskspaceConsumed()/oldRepl); @@ -2061,7 +2061,7 @@ public class FSDirectory implements Closeable { if (node instanceof INodeFile) { INodeFile fileNode = (INodeFile)node; size = fileNode.computeFileSize(true); - replication = fileNode.getReplication(); + replication = fileNode.getBlockReplication(); blocksize = fileNode.getPreferredBlockSize(); } return new HdfsFileStatus( @@ -2091,7 +2091,7 @@ public class FSDirectory implements Closeable { if (node instanceof INodeFile) { INodeFile fileNode = (INodeFile)node; size = fileNode.computeFileSize(true); - replication = fileNode.getReplication(); + replication = fileNode.getBlockReplication(); blocksize = fileNode.getPreferredBlockSize(); loc = getFSNamesystem().getBlockManager().createLocatedBlocks( fileNode.getBlocks(), fileNode.computeFileSize(false), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 2286e2ebb53..8f15d793842 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -657,7 +657,7 @@ public class FSEditLog implements LogsPurgeable { public void logOpenFile(String path, INodeFileUnderConstruction newNode) { AddOp op = AddOp.getInstance(cache.get()) .setPath(path) - .setReplication(newNode.getReplication()) + .setReplication(newNode.getBlockReplication()) .setModificationTime(newNode.getModificationTime()) .setAccessTime(newNode.getAccessTime()) .setBlockSize(newNode.getPreferredBlockSize()) @@ -675,7 +675,7 @@ public class FSEditLog implements LogsPurgeable { public void logCloseFile(String path, INodeFile newNode) { CloseOp op = CloseOp.getInstance(cache.get()) .setPath(path) - .setReplication(newNode.getReplication()) + .setReplication(newNode.getBlockReplication()) .setModificationTime(newNode.getModificationTime()) .setAccessTime(newNode.getAccessTime()) .setBlockSize(newNode.getPreferredBlockSize()) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 5a874fc1366..945164bf3de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -592,13 +592,13 @@ public class FSEditLogLoader { // what about an old-version fsync() where fsync isn't called // until several blocks in? newBI = new BlockInfoUnderConstruction( - newBlock, file.getReplication()); + newBlock, file.getBlockReplication()); } else { // OP_CLOSE should add finalized blocks. This code path // is only executed when loading edits written by prior // versions of Hadoop. Current versions always log // OP_ADD operations as each block is allocated. - newBI = new BlockInfo(newBlock, file.getReplication()); + newBI = new BlockInfo(newBlock, file.getBlockReplication()); } fsNamesys.getBlockManager().addBlockCollection(newBI, file); file.addBlock(newBI); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index a8df0f706c8..fc0d6556a08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -126,7 +126,7 @@ public class FSImageSerialization { String path) throws IOException { writeString(path, out); - out.writeShort(cons.getReplication()); + out.writeShort(cons.getBlockReplication()); out.writeLong(cons.getModificationTime()); out.writeLong(cons.getPreferredBlockSize()); int nrBlocks = cons.getBlocks().length; @@ -175,7 +175,7 @@ public class FSImageSerialization { filePerm); } else { INodeFile fileINode = (INodeFile)node; - out.writeShort(fileINode.getReplication()); + out.writeShort(fileINode.getBlockReplication()); out.writeLong(fileINode.getModificationTime()); out.writeLong(fileINode.getAccessTime()); out.writeLong(fileINode.getPreferredBlockSize()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index a907a57ff71..32d367879a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1411,7 +1411,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } si.add(trgInode); - short repl = trgInode.getReplication(); + short repl = trgInode.getBlockReplication(); // now check the srcs boolean endSrc = false; // final src file doesn't have to have full end block @@ -1431,10 +1431,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } // check replication and blocks size - if(repl != srcInode.getReplication()) { + if(repl != srcInode.getBlockReplication()) { throw new IllegalArgumentException(src + " and " + target + " " + "should have same replication: " - + repl + " vs. " + srcInode.getReplication()); + + repl + " vs. " + srcInode.getBlockReplication()); } //boolean endBlock=false; @@ -1877,7 +1877,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, boolean writeToEditLog) throws IOException { INodeFileUnderConstruction cons = new INodeFileUnderConstruction( file.getLocalNameBytes(), - file.getReplication(), + file.getBlockReplication(), file.getModificationTime(), file.getPreferredBlockSize(), file.getBlocks(), @@ -2191,7 +2191,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, fileLength = pendingFile.computeContentSummary().getLength(); blockSize = pendingFile.getPreferredBlockSize(); clientNode = pendingFile.getClientNode(); - replication = pendingFile.getReplication(); + replication = pendingFile.getBlockReplication(); } finally { writeUnlock(); } @@ -2435,7 +2435,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * them into invalidateBlocks. */ private void checkReplicationFactor(INodeFile file) { - short numExpectedReplicas = file.getReplication(); + short numExpectedReplicas = file.getBlockReplication(); Block[] pendingBlocks = file.getBlocks(); int nrBlocks = pendingBlocks.length; for (int i = 0; i < nrBlocks; i++) { @@ -3154,7 +3154,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (diff > 0) { try { String path = leaseManager.findPath(fileINode); - dir.updateSpaceConsumed(path, 0, -diff * fileINode.getReplication()); + dir.updateSpaceConsumed(path, 0, -diff * fileINode.getBlockReplication()); } catch (IOException e) { LOG.warn("Unexpected exception while updating disk space.", e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index f69c3511101..26a0b6ed22c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -71,7 +71,7 @@ public class INodeFile extends INode implements BlockCollection { /** @return the replication factor of the file. */ @Override - public short getReplication() { + public short getBlockReplication() { return (short) ((header & HEADERMASK) >> BLOCKBITS); } @@ -215,7 +215,7 @@ public class INodeFile extends INode implements BlockCollection { isUnderConstruction()) { size += getPreferredBlockSize() - blkArr[blkArr.length-1].getNumBytes(); } - return size * getReplication(); + return size * getBlockReplication(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java index 188050898b3..aff956e3cf8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java @@ -104,7 +104,7 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec "non-complete blocks! Blocks are: " + blocksAsString(); INodeFile obj = new INodeFile(getPermissionStatus(), getBlocks(), - getReplication(), + getBlockReplication(), getModificationTime(), getModificationTime(), getPreferredBlockSize()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 07f3f5ff771..348f8dae6bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -834,7 +834,7 @@ class NamenodeJspHelper { doc.endTag(); doc.startTag("replication"); - doc.pcdata(""+inode.getReplication()); + doc.pcdata(""+inode.getBlockReplication()); doc.endTag(); doc.startTag("disk_space_consumed"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 07682fe9b25..22bf9b146be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -379,7 +379,7 @@ public class TestBlockManager { private BlockInfo addBlockOnNodes(long blockId, List nodes) { BlockCollection bc = Mockito.mock(BlockCollection.class); - Mockito.doReturn((short)3).when(bc).getReplication(); + Mockito.doReturn((short)3).when(bc).getBlockReplication(); BlockInfo blockInfo = blockOnNodes(blockId, nodes); bm.blocksMap.addBlockCollection(blockInfo, bc); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 346844da73a..bb802185ac3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -48,7 +48,7 @@ public class TestINodeFile { FsPermission.getDefault()), null, replication, 0L, 0L, preferredBlockSize); assertEquals("True has to be returned in this case", replication, - inf.getReplication()); + inf.getBlockReplication()); } /**