diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3e98ae3cb19..4d268e23b5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -19,8 +19,8 @@ Release 2.0.3-alpha - Unreleased HDFS-3939. NN RPC address cleanup. (eli) - HDFS-3373. Change DFSClient input stream socket cache to global static and add - a thread to cleanup expired cache entries. (John George via szetszwo) + HDFS-3373. Change DFSClient input stream socket cache to global static and + add a thread to cleanup expired cache entries. (John George via szetszwo) HDFS-3896. Add descriptions for dfs.namenode.rpc-address and dfs.namenode.servicerpc-address to hdfs-default.xml. (Jeff Lord via atm) @@ -51,6 +51,9 @@ Release 2.0.3-alpha - Unreleased HADOOP-8911. CRLF characters in source and text files. (Raja Aluri via suresh) + HDFS-4037. Rename the getReplication() method in BlockCollection to + getBlockReplication(). (szetszwo) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java index e3eecadce0c..f344833a0c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java @@ -19,12 +19,14 @@ import java.io.IOException; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ContentSummary; /** * This interface is used by the block manager to expose a * few characteristics of a collection of Block/BlockUnderConstruction. */ +@InterfaceAudience.Private public interface BlockCollection { /** * Get the last block of the collection. @@ -56,7 +58,7 @@ public interface BlockCollection { * Get block replication for the collection * @return block replication value */ - public short getReplication(); + public short getBlockReplication(); /** * Get the name of the collection. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 610b6cab557..b56b7b620a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -68,7 +68,7 @@ public BlockInfo(Block blk, int replication) { * @param from BlockInfo to copy from. */ protected BlockInfo(BlockInfo from) { - this(from, from.bc.getReplication()); + this(from, from.bc.getBlockReplication()); this.bc = from.bc; } @@ -344,7 +344,7 @@ public BlockInfoUnderConstruction convertToBlockUnderConstruction( BlockUCState s, DatanodeDescriptor[] targets) { if(isComplete()) { return new BlockInfoUnderConstruction( - this, getBlockCollection().getReplication(), s, targets); + this, getBlockCollection().getBlockReplication(), s, targets); } // the block is already under construction BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)this; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index e851279ba07..8fc52cdacf3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -998,7 +998,7 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b, // Add this replica to corruptReplicas Map corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason); - if (countNodes(b.stored).liveReplicas() >= bc.getReplication()) { + if (countNodes(b.stored).liveReplicas() >= bc.getBlockReplication()) { // the block is over-replicated so invalidate the replicas immediately invalidateBlock(b, node); } else if (namesystem.isPopulatingReplQueues()) { @@ -1136,7 +1136,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { continue; } - requiredReplication = bc.getReplication(); + requiredReplication = bc.getBlockReplication(); // get a source data-node containingNodes = new ArrayList(); @@ -1222,7 +1222,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { neededReplications.decrementReplicationIndex(priority); continue; } - requiredReplication = bc.getReplication(); + requiredReplication = bc.getBlockReplication(); // do not schedule more if enough replicas is already pending NumberReplicas numReplicas = countNodes(block); @@ -2090,7 +2090,7 @@ private Block addStoredBlock(final BlockInfo block, } // handle underReplication/overReplication - short fileReplication = bc.getReplication(); + short fileReplication = bc.getBlockReplication(); if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) { neededReplications.remove(storedBlock, numCurrentReplica, num.decommissionedReplicas(), fileReplication); @@ -2229,7 +2229,7 @@ private MisReplicationResult processMisReplicatedBlock(BlockInfo block) { return MisReplicationResult.UNDER_CONSTRUCTION; } // calculate current replication - short expectedReplication = bc.getReplication(); + short expectedReplication = bc.getBlockReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); // add to under-replicated queue if need to be @@ -2728,7 +2728,7 @@ void processOverReplicatedBlocksOnReCommission( while(it.hasNext()) { final Block block = it.next(); BlockCollection bc = blocksMap.getBlockCollection(block); - short expectedReplication = bc.getReplication(); + short expectedReplication = bc.getBlockReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); if (numCurrentReplica > expectedReplication) { @@ -2874,7 +2874,7 @@ private int getReplication(Block block) { if (bc == null) { // block does not belong to any file return 0; } - return bc.getReplication(); + return bc.getBlockReplication(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index e02aea5f1d1..ec86de628a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -345,13 +345,13 @@ BlockInfo addBlock(String path, // check quota limits and updated space consumed updateCount(inodes, inodes.length-1, 0, - fileINode.getPreferredBlockSize()*fileINode.getReplication(), true); + fileINode.getPreferredBlockSize()*fileINode.getBlockReplication(), true); // associate new last block for the file BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction( block, - fileINode.getReplication(), + fileINode.getBlockReplication(), BlockUCState.UNDER_CONSTRUCTION, targets); getBlockManager().addBlockCollection(blockInfo, fileINode); @@ -442,7 +442,7 @@ void unprotectedRemoveBlock(String path, INodeFileUnderConstruction fileNode, // update space consumed INode[] pathINodes = getExistingPathINodes(path); updateCount(pathINodes, pathINodes.length-1, 0, - -fileNode.getPreferredBlockSize()*fileNode.getReplication(), true); + -fileNode.getPreferredBlockSize()*fileNode.getBlockReplication(), true); } /** @@ -821,7 +821,7 @@ Block[] unprotectedSetReplication(String src, return null; } INodeFile fileNode = (INodeFile)inode; - final short oldRepl = fileNode.getReplication(); + final short oldRepl = fileNode.getBlockReplication(); // check disk quota long dsDelta = (replication - oldRepl) * (fileNode.diskspaceConsumed()/oldRepl); @@ -2061,7 +2061,7 @@ private HdfsFileStatus createFileStatus(byte[] path, INode node) { if (node instanceof INodeFile) { INodeFile fileNode = (INodeFile)node; size = fileNode.computeFileSize(true); - replication = fileNode.getReplication(); + replication = fileNode.getBlockReplication(); blocksize = fileNode.getPreferredBlockSize(); } return new HdfsFileStatus( @@ -2091,7 +2091,7 @@ private HdfsLocatedFileStatus createLocatedFileStatus( if (node instanceof INodeFile) { INodeFile fileNode = (INodeFile)node; size = fileNode.computeFileSize(true); - replication = fileNode.getReplication(); + replication = fileNode.getBlockReplication(); blocksize = fileNode.getPreferredBlockSize(); loc = getFSNamesystem().getBlockManager().createLocatedBlocks( fileNode.getBlocks(), fileNode.computeFileSize(false), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index ac7f63b6889..344326e2cc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -602,7 +602,7 @@ private void printStatistics(boolean force) { public void logOpenFile(String path, INodeFileUnderConstruction newNode) { AddOp op = AddOp.getInstance(cache.get()) .setPath(path) - .setReplication(newNode.getReplication()) + .setReplication(newNode.getBlockReplication()) .setModificationTime(newNode.getModificationTime()) .setAccessTime(newNode.getAccessTime()) .setBlockSize(newNode.getPreferredBlockSize()) @@ -620,7 +620,7 @@ public void logOpenFile(String path, INodeFileUnderConstruction newNode) { public void logCloseFile(String path, INodeFile newNode) { CloseOp op = CloseOp.getInstance(cache.get()) .setPath(path) - .setReplication(newNode.getReplication()) + .setReplication(newNode.getBlockReplication()) .setModificationTime(newNode.getModificationTime()) .setAccessTime(newNode.getAccessTime()) .setBlockSize(newNode.getPreferredBlockSize()) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index acc49627f51..d98b2472288 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -594,13 +594,13 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, // what about an old-version fsync() where fsync isn't called // until several blocks in? newBI = new BlockInfoUnderConstruction( - newBlock, file.getReplication()); + newBlock, file.getBlockReplication()); } else { // OP_CLOSE should add finalized blocks. This code path // is only executed when loading edits written by prior // versions of Hadoop. Current versions always log // OP_ADD operations as each block is allocated. - newBI = new BlockInfo(newBlock, file.getReplication()); + newBI = new BlockInfo(newBlock, file.getBlockReplication()); } fsNamesys.getBlockManager().addBlockCollection(newBI, file); file.addBlock(newBI); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index a8df0f706c8..fc0d6556a08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -126,7 +126,7 @@ static void writeINodeUnderConstruction(DataOutputStream out, String path) throws IOException { writeString(path, out); - out.writeShort(cons.getReplication()); + out.writeShort(cons.getBlockReplication()); out.writeLong(cons.getModificationTime()); out.writeLong(cons.getPreferredBlockSize()); int nrBlocks = cons.getBlocks().length; @@ -175,7 +175,7 @@ static void saveINode2Image(INode node, filePerm); } else { INodeFile fileINode = (INodeFile)node; - out.writeShort(fileINode.getReplication()); + out.writeShort(fileINode.getBlockReplication()); out.writeLong(fileINode.getModificationTime()); out.writeLong(fileINode.getAccessTime()); out.writeLong(fileINode.getPreferredBlockSize()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 47c4b89941f..194b1a2c54d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1396,7 +1396,7 @@ private void concatInternal(String target, String [] srcs) } si.add(trgInode); - short repl = trgInode.getReplication(); + short repl = trgInode.getBlockReplication(); // now check the srcs boolean endSrc = false; // final src file doesn't have to have full end block @@ -1416,10 +1416,10 @@ private void concatInternal(String target, String [] srcs) } // check replication and blocks size - if(repl != srcInode.getReplication()) { + if(repl != srcInode.getBlockReplication()) { throw new IllegalArgumentException(src + " and " + target + " " + "should have same replication: " - + repl + " vs. " + srcInode.getReplication()); + + repl + " vs. " + srcInode.getBlockReplication()); } //boolean endBlock=false; @@ -1862,7 +1862,7 @@ LocatedBlock prepareFileForWrite(String src, INodeFile file, boolean writeToEditLog) throws IOException { INodeFileUnderConstruction cons = new INodeFileUnderConstruction( file.getLocalNameBytes(), - file.getReplication(), + file.getBlockReplication(), file.getModificationTime(), file.getPreferredBlockSize(), file.getBlocks(), @@ -2176,7 +2176,7 @@ LocatedBlock getAdditionalBlock(String src, fileLength = pendingFile.computeContentSummary().getLength(); blockSize = pendingFile.getPreferredBlockSize(); clientNode = pendingFile.getClientNode(); - replication = pendingFile.getReplication(); + replication = pendingFile.getBlockReplication(); } finally { writeUnlock(); } @@ -2420,7 +2420,7 @@ private boolean completeFileInternal(String src, * them into invalidateBlocks. */ private void checkReplicationFactor(INodeFile file) { - short numExpectedReplicas = file.getReplication(); + short numExpectedReplicas = file.getBlockReplication(); Block[] pendingBlocks = file.getBlocks(); int nrBlocks = pendingBlocks.length; for (int i = 0; i < nrBlocks; i++) { @@ -3139,7 +3139,7 @@ private void commitOrCompleteLastBlock(final INodeFileUnderConstruction fileINod if (diff > 0) { try { String path = leaseManager.findPath(fileINode); - dir.updateSpaceConsumed(path, 0, -diff * fileINode.getReplication()); + dir.updateSpaceConsumed(path, 0, -diff * fileINode.getBlockReplication()); } catch (IOException e) { LOG.warn("Unexpected exception while updating disk space.", e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index ef8732d1d56..957f851c061 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -71,7 +71,7 @@ boolean isDirectory() { /** @return the replication factor of the file. */ @Override - public short getReplication() { + public short getBlockReplication() { return (short) ((header & HEADERMASK) >> BLOCKBITS); } @@ -215,7 +215,7 @@ private long diskspaceConsumed(Block[] blkArr) { isUnderConstruction()) { size += getPreferredBlockSize() - blkArr[blkArr.length-1].getNumBytes(); } - return size * getReplication(); + return size * getBlockReplication(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java index 188050898b3..aff956e3cf8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java @@ -104,7 +104,7 @@ assert allBlocksComplete() : "non-complete blocks! Blocks are: " + blocksAsString(); INodeFile obj = new INodeFile(getPermissionStatus(), getBlocks(), - getReplication(), + getBlockReplication(), getModificationTime(), getModificationTime(), getPreferredBlockSize()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 1ffb343fd73..64027d31ff3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -785,7 +785,7 @@ public void toXML(XMLOutputter doc) throws IOException { doc.endTag(); doc.startTag("replication"); - doc.pcdata(""+inode.getReplication()); + doc.pcdata(""+inode.getBlockReplication()); doc.endTag(); doc.startTag("disk_space_consumed"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 07682fe9b25..22bf9b146be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -379,7 +379,7 @@ private List startDecommission(int ... indexes) { private BlockInfo addBlockOnNodes(long blockId, List nodes) { BlockCollection bc = Mockito.mock(BlockCollection.class); - Mockito.doReturn((short)3).when(bc).getReplication(); + Mockito.doReturn((short)3).when(bc).getBlockReplication(); BlockInfo blockInfo = blockOnNodes(blockId, nodes); bm.blocksMap.addBlockCollection(blockInfo, bc); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 346844da73a..bb802185ac3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -48,7 +48,7 @@ public void testReplication () { FsPermission.getDefault()), null, replication, 0L, 0L, preferredBlockSize); assertEquals("True has to be returned in this case", replication, - inf.getReplication()); + inf.getBlockReplication()); } /**