From 6e36dbf03a6e2769a24a79f8fe2cc7c00d9525c8 Mon Sep 17 00:00:00 2001 From: Haohui Mai Date: Thu, 2 Apr 2015 11:38:02 -0700 Subject: [PATCH] HDFS-8035. Move checkBlocksProperlyReplicated() in FSNamesystem to BlockManager. Contributed by Haohui Mai. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../server/blockmanagement/BlockManager.java | 21 ++++++++ .../hdfs/server/namenode/FSNamesystem.java | 49 ++++--------------- 3 files changed, 33 insertions(+), 40 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c97f82caaad..9036ae23b7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -568,6 +568,9 @@ Release 2.7.0 - UNRELEASED HDFS-8008. Support client-side back off when the datanodes are congested. (wheat9) + HDFS-8035. Move checkBlocksProperlyReplicated() in FSNamesystem to + BlockManager. (wheat9) + OPTIMIZATIONS HDFS-7454. Reduce memory footprint for AclEntries in NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index acb5c4459b0..b2babf9a757 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -3411,6 +3411,27 @@ public void checkReplication(BlockCollection bc) { } } + /** + * Check that the indicated blocks are present and + * replicated. + */ + public boolean checkBlocksProperlyReplicated( + String src, BlockInfoContiguous[] blocks) { + for (BlockInfoContiguous b: blocks) { + if (!b.isComplete()) { + final BlockInfoContiguousUnderConstruction uc = + (BlockInfoContiguousUnderConstruction)b; + final int numNodes = b.numNodes(); + LOG.info("BLOCK* " + b + " is not COMPLETE (ucState = " + + uc.getBlockUCState() + ", replication# = " + numNodes + + (numNodes < minReplication ? " < ": " >= ") + + " minimum = " + minReplication + ") in file " + src); + return false; + } + } + return true; + } + /** * @return 0 if the block is not found; * otherwise, return the replication factor of the block. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 17308d26b22..12d3e0584ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3525,20 +3525,19 @@ private boolean completeFileInternal(String src, String holder, Block last, * @param targets target datanodes where replicas of the new block is placed * @throws QuotaExceededException If addition of block exceeds space quota */ - BlockInfoContiguous saveAllocatedBlock(String src, INodesInPath inodesInPath, + private void saveAllocatedBlock(String src, INodesInPath inodesInPath, Block newBlock, DatanodeStorageInfo[] targets) - throws IOException { + throws IOException { assert hasWriteLock(); BlockInfoContiguous b = dir.addBlock(src, inodesInPath, newBlock, targets); NameNode.stateChangeLog.info("BLOCK* allocate " + b + " for " + src); DatanodeStorageInfo.incrementBlocksScheduled(targets); - return b; } /** * Create new block with a unique block id and a new generation stamp. */ - Block createNewBlock() throws IOException { + private Block createNewBlock() throws IOException { assert hasWriteLock(); Block b = new Block(nextBlockId(), 0, 0); // Increment the generation stamp for every new block. @@ -3555,51 +3554,21 @@ private boolean checkFileProgress(String src, INodeFile v, boolean checkall) { readLock(); try { if (checkall) { - // check all blocks of the file. - for (BlockInfoContiguous block: v.getBlocks()) { - if (!isCompleteBlock(src, block, blockManager.minReplication)) { - return false; - } - } + return blockManager.checkBlocksProperlyReplicated(src, v + .getBlocks()); } else { // check the penultimate block of this file BlockInfoContiguous b = v.getPenultimateBlock(); - if (b != null - && !isCompleteBlock(src, b, blockManager.minReplication)) { - return false; - } + return b == null || + blockManager.checkBlocksProperlyReplicated( + src, new BlockInfoContiguous[] { b }); } - return true; } finally { readUnlock(); } } - private static boolean isCompleteBlock(String src, BlockInfoContiguous b, int minRepl) { - if (!b.isComplete()) { - final BlockInfoContiguousUnderConstruction uc = (BlockInfoContiguousUnderConstruction)b; - final int numNodes = b.numNodes(); - LOG.info("BLOCK* " + b + " is not COMPLETE (ucState = " - + uc.getBlockUCState() + ", replication# = " + numNodes - + (numNodes < minRepl? " < ": " >= ") - + " minimum = " + minRepl + ") in file " + src); - return false; - } - return true; - } - - //////////////////////////////////////////////////////////////// - // Here's how to handle block-copy failure during client write: - // -- As usual, the client's write should result in a streaming - // backup write to a k-machine sequence. - // -- If one of the backup machines fails, no worries. Fail silently. - // -- Before client is allowed to close and finalize file, make sure - // that the blocks are backed up. Namenode may have to issue specific backup - // commands to make up for earlier datanode failures. Once all copies - // are made, edit namespace and return to client. - //////////////////////////////////////////////////////////////// - - /** + /** * Change the indicated filename. * @deprecated Use {@link #renameTo(String, String, boolean, * Options.Rename...)} instead.