diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index aec0eb24297..7ab54aac6c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -289,9 +289,6 @@ Release 2.8.0 - UNRELEASED HDFS-7923. The DataNodes should rate-limit their full block reports by asking the NN on heartbeat messages (cmccabe) - HDFS-8499. Refactor BlockInfo class hierarchy with static helper class. - (Zhe Zhang via wang) - HDFS-8540. Mover should exit with NO_MOVE_BLOCK if no block can be moved. (surendra singh lilhore via szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java index f11a825c62d..02a1d0522de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java @@ -79,7 +79,7 @@ public interface BlockCollection { * Convert the last block of the collection to an under-construction block * and set the locations. */ - public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock, + public BlockInfoContiguousUnderConstruction setLastBlock(BlockInfo lastBlock, DatanodeStorageInfo[] targets) throws IOException; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 4459974bffc..25f6efdb614 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -54,7 +54,7 @@ public abstract class BlockInfo extends Block * per replica is 42 bytes (LinkedList#Entry object per replica) versus 16 * bytes using the triplets. */ - Object[] triplets; + protected Object[] triplets; /** * Construct an entry for blocksmap @@ -298,7 +298,7 @@ public abstract class BlockInfo extends Block /** * BlockInfo represents a block that is not being constructed. * In order to start modifying the block, the BlockInfo should be converted - * to {@link BlockInfoUnderConstruction}. + * to {@link BlockInfoContiguousUnderConstruction}. * @return {@link BlockUCState#COMPLETE} */ public BlockUCState getBlockUCState() { @@ -315,29 +315,27 @@ public abstract class BlockInfo extends Block } /** - * Convert a block to an under construction block. + * Convert a complete block to an under construction block. * @return BlockInfoUnderConstruction - an under construction block. */ - public BlockInfoUnderConstruction convertToBlockUnderConstruction( + public BlockInfoContiguousUnderConstruction convertToBlockUnderConstruction( BlockUCState s, DatanodeStorageInfo[] targets) { if(isComplete()) { - return convertCompleteBlockToUC(s, targets); + BlockInfoContiguousUnderConstruction ucBlock = + new BlockInfoContiguousUnderConstruction(this, + getBlockCollection().getPreferredBlockReplication(), s, targets); + ucBlock.setBlockCollection(getBlockCollection()); + return ucBlock; } // the block is already under construction - BlockInfoUnderConstruction ucBlock = - (BlockInfoUnderConstruction)this; + BlockInfoContiguousUnderConstruction ucBlock = + (BlockInfoContiguousUnderConstruction)this; ucBlock.setBlockUCState(s); ucBlock.setExpectedLocations(targets); ucBlock.setBlockCollection(getBlockCollection()); return ucBlock; } - /** - * Convert a complete block to an under construction block. - */ - abstract BlockInfoUnderConstruction convertCompleteBlockToUC( - BlockUCState s, DatanodeStorageInfo[] targets); - @Override public int hashCode() { // Super implementation is sufficient diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java index b9abcd03f29..eff89a80832 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java @@ -19,13 +19,13 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; /** * Subclass of {@link BlockInfo}, used for a block with replication scheme. */ @InterfaceAudience.Private public class BlockInfoContiguous extends BlockInfo { + public static final BlockInfoContiguous[] EMPTY_ARRAY = {}; public BlockInfoContiguous(short size) { super(size); @@ -40,37 +40,84 @@ public class BlockInfoContiguous extends BlockInfo { * This is used to convert BlockReplicationInfoUnderConstruction * @param from BlockReplicationInfo to copy from. */ - protected BlockInfoContiguous(BlockInfo from) { + protected BlockInfoContiguous(BlockInfoContiguous from) { super(from); } + /** + * Ensure that there is enough space to include num more triplets. + * @return first free triplet index. + */ + private int ensureCapacity(int num) { + assert this.triplets != null : "BlockInfo is not initialized"; + int last = numNodes(); + if (triplets.length >= (last+num)*3) { + return last; + } + /* Not enough space left. Create a new array. Should normally + * happen only when replication is manually increased by the user. */ + Object[] old = triplets; + triplets = new Object[(last+num)*3]; + System.arraycopy(old, 0, triplets, 0, last * 3); + return last; + } + @Override boolean addStorage(DatanodeStorageInfo storage) { - return ContiguousBlockStorageOp.addStorage(this, storage); + // find the last null node + int lastNode = ensureCapacity(1); + setStorageInfo(lastNode, storage); + setNext(lastNode, null); + setPrevious(lastNode, null); + return true; } @Override boolean removeStorage(DatanodeStorageInfo storage) { - return ContiguousBlockStorageOp.removeStorage(this, storage); + int dnIndex = findStorageInfo(storage); + if (dnIndex < 0) { // the node is not found + return false; + } + assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : + "Block is still in the list and must be removed first."; + // find the last not null node + int lastNode = numNodes()-1; + // replace current node triplet by the lastNode one + setStorageInfo(dnIndex, getStorageInfo(lastNode)); + setNext(dnIndex, getNext(lastNode)); + setPrevious(dnIndex, getPrevious(lastNode)); + // set the last triplet to null + setStorageInfo(lastNode, null); + setNext(lastNode, null); + setPrevious(lastNode, null); + return true; } @Override public int numNodes() { - return ContiguousBlockStorageOp.numNodes(this); + assert this.triplets != null : "BlockInfo is not initialized"; + assert triplets.length % 3 == 0 : "Malformed BlockInfo"; + + for (int idx = getCapacity()-1; idx >= 0; idx--) { + if (getDatanode(idx) != null) { + return idx + 1; + } + } + return 0; } @Override void replaceBlock(BlockInfo newBlock) { - ContiguousBlockStorageOp.replaceBlock(this, newBlock); - } + assert newBlock instanceof BlockInfoContiguous; + for (int i = this.numNodes() - 1; i >= 0; i--) { + final DatanodeStorageInfo storage = this.getStorageInfo(i); + final boolean removed = storage.removeBlock(this); + assert removed : "currentBlock not found."; - @Override - BlockInfoUnderConstruction convertCompleteBlockToUC( - HdfsServerConstants.BlockUCState s, DatanodeStorageInfo[] targets) { - BlockInfoUnderConstructionContiguous ucBlock = - new BlockInfoUnderConstructionContiguous(this, - getBlockCollection().getPreferredBlockReplication(), s, targets); - ucBlock.setBlockCollection(getBlockCollection()); - return ucBlock; + final DatanodeStorageInfo.AddBlockResult result = storage.addBlock( + newBlock); + assert result == DatanodeStorageInfo.AddBlockResult.ADDED : + "newBlock already exists."; + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java similarity index 83% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java index bbc4232e3fd..7ca64198999 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java @@ -18,12 +18,11 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; import java.util.List; -import com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -32,15 +31,15 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; * Represents a block that is currently being constructed.
* This is usually the last block of a file opened for write or append. */ -public abstract class BlockInfoUnderConstruction extends BlockInfo { +public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous { /** Block state. See {@link BlockUCState} */ - protected BlockUCState blockUCState; + private BlockUCState blockUCState; /** * Block replicas as assigned when the block was allocated. * This defines the pipeline order. */ - protected List replicas; + private List replicas; /** * Index of the primary data node doing the recovery. Useful for log @@ -58,12 +57,12 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo { /** * The block source to use in the event of copy-on-write truncate. */ - protected Block truncateBlock; + private Block truncateBlock; /** * ReplicaUnderConstruction contains information about replicas while * they are under construction. - * The GS, the length and the state of the replica is as reported by + * The GS, the length and the state of the replica is as reported by * the data-node. * It is not guaranteed, but expected, that data-nodes actually have * corresponding replicas. @@ -144,7 +143,7 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo { appendStringTo(b); return b.toString(); } - + @Override public void appendStringTo(StringBuilder sb) { sb.append("ReplicaUC[") @@ -159,24 +158,45 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo { * Create block and set its state to * {@link BlockUCState#UNDER_CONSTRUCTION}. */ - public BlockInfoUnderConstruction(Block blk, short replication) { + public BlockInfoContiguousUnderConstruction(Block blk, short replication) { this(blk, replication, BlockUCState.UNDER_CONSTRUCTION, null); } /** * Create a block that is currently being constructed. */ - public BlockInfoUnderConstruction(Block blk, short replication, + public BlockInfoContiguousUnderConstruction(Block blk, short replication, BlockUCState state, DatanodeStorageInfo[] targets) { super(blk, replication); - Preconditions.checkState(getBlockUCState() != BlockUCState.COMPLETE, - "BlockInfoUnderConstruction cannot be in COMPLETE state"); + assert getBlockUCState() != BlockUCState.COMPLETE : + "BlockInfoUnderConstruction cannot be in COMPLETE state"; this.blockUCState = state; setExpectedLocations(targets); } - /** Set expected locations. */ - public abstract void setExpectedLocations(DatanodeStorageInfo[] targets); + /** + * Convert an under construction block to a complete block. + * + * @return BlockInfo - a complete block. + * @throws IOException if the state of the block + * (the generation stamp and the length) has not been committed by + * the client or it does not have at least a minimal number of replicas + * reported from data-nodes. + */ + BlockInfo convertToCompleteBlock() throws IOException { + assert getBlockUCState() != BlockUCState.COMPLETE : + "Trying to convert a COMPLETE block"; + return new BlockInfoContiguous(this); + } + + /** Set expected locations */ + public void setExpectedLocations(DatanodeStorageInfo[] targets) { + int numLocations = targets == null ? 0 : targets.length; + this.replicas = new ArrayList(numLocations); + for(int i = 0; i < numLocations; i++) + replicas.add( + new ReplicaUnderConstruction(this, targets[i], ReplicaState.RBW)); + } /** * Create array of expected replica locations @@ -185,13 +205,12 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo { public DatanodeStorageInfo[] getExpectedStorageLocations() { int numLocations = replicas == null ? 0 : replicas.size(); DatanodeStorageInfo[] storages = new DatanodeStorageInfo[numLocations]; - for(int i = 0; i < numLocations; i++) { + for(int i = 0; i < numLocations; i++) storages[i] = replicas.get(i).getExpectedStorageLocation(); - } return storages; } - /** Get the number of expected locations. */ + /** Get the number of expected locations */ public int getNumExpectedLocations() { return replicas == null ? 0 : replicas.size(); } @@ -209,15 +228,19 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo { blockUCState = s; } - /** Get block recovery ID. */ + /** Get block recovery ID */ public long getBlockRecoveryId() { return blockRecoveryId; } - /** Get recover block. */ - public abstract Block getTruncateBlock(); + /** Get recover block */ + public Block getTruncateBlock() { + return truncateBlock; + } - public abstract void setTruncateBlock(Block recoveryBlock); + public void setTruncateBlock(Block recoveryBlock) { + this.truncateBlock = recoveryBlock; + } /** * Process the recorded replicas. When about to commit or finish the @@ -227,9 +250,8 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo { public void setGenerationStampAndVerifyReplicas(long genStamp) { // Set the generation stamp for the block. setGenerationStamp(genStamp); - if (replicas == null) { + if (replicas == null) return; - } // Remove the replicas with wrong gen stamp. // The replica list is unchanged. @@ -245,14 +267,13 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo { /** * Commit block's length and generation stamp as reported by the client. * Set block state to {@link BlockUCState#COMMITTED}. - * @param block - contains client reported block length and generation + * @param block - contains client reported block length and generation * @throws IOException if block ids are inconsistent. */ void commitBlock(Block block) throws IOException { - if(getBlockId() != block.getBlockId()) { + if(getBlockId() != block.getBlockId()) throw new IOException("Trying to commit inconsistent block: id = " + block.getBlockId() + ", expected id = " + getBlockId()); - } blockUCState = BlockUCState.COMMITTED; this.set(getBlockId(), block.getNumBytes(), block.getGenerationStamp()); // Sort out invalid replicas. @@ -268,17 +289,16 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo { setBlockUCState(BlockUCState.UNDER_RECOVERY); blockRecoveryId = recoveryId; if (replicas.size() == 0) { - NameNode.blockStateChangeLog.warn("BLOCK* " + - "BlockInfoUnderConstruction.initLeaseRecovery: " + - "No blocks found, lease removed."); + NameNode.blockStateChangeLog.warn("BLOCK*" + + " BlockInfoUnderConstruction.initLeaseRecovery:" + + " No blocks found, lease removed."); } boolean allLiveReplicasTriedAsPrimary = true; for (int i = 0; i < replicas.size(); i++) { // Check if all replicas have been tried or not. if (replicas.get(i).isAlive()) { allLiveReplicasTriedAsPrimary = - (allLiveReplicasTriedAsPrimary && - replicas.get(i).getChosenAsPrimary()); + (allLiveReplicasTriedAsPrimary && replicas.get(i).getChosenAsPrimary()); } } if (allLiveReplicasTriedAsPrimary) { @@ -292,8 +312,7 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo { primaryNodeIndex = -1; for(int i = 0; i < replicas.size(); i++) { // Skip alive replicas which have been chosen for recovery. - if (!(replicas.get(i).isAlive() && - !replicas.get(i).getChosenAsPrimary())) { + if (!(replicas.get(i).isAlive() && !replicas.get(i).getChosenAsPrimary())) { continue; } final ReplicaUnderConstruction ruc = replicas.get(i); @@ -306,8 +325,7 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo { } } if (primary != null) { - primary.getExpectedStorageLocation(). - getDatanodeDescriptor().addBlockToBeRecovered(this); + primary.getExpectedStorageLocation().getDatanodeDescriptor().addBlockToBeRecovered(this); primary.setChosenAsPrimary(true); NameNode.blockStateChangeLog.debug( "BLOCK* {} recovery started, primary={}", this, primary); @@ -340,25 +358,6 @@ public abstract class BlockInfoUnderConstruction extends BlockInfo { replicas.add(new ReplicaUnderConstruction(block, storage, rState)); } - /** - * Convert an under construction block to a complete block. - * - * @return a complete block. - * @throws IOException - * if the state of the block (the generation stamp and the length) - * has not been committed by the client or it does not have at - * least a minimal number of replicas reported from data-nodes. - */ - public abstract BlockInfo convertToCompleteBlock(); - - @Override - BlockInfoUnderConstruction convertCompleteBlockToUC - (HdfsServerConstants.BlockUCState s, DatanodeStorageInfo[] targets) { - BlockManager.LOG.error("convertCompleteBlockToUC should only be applied " + - "on complete blocks."); - return null; - } - @Override // BlockInfo // BlockInfoUnderConstruction participates in maps the same way as BlockInfo public int hashCode() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java deleted file mode 100644 index c66675a29a4..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstructionContiguous.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.blockmanagement; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; - -import java.util.ArrayList; - -/** - * Subclass of {@link BlockInfoUnderConstruction}, representing a block under - * the contiguous (instead of striped) layout. - */ -public class BlockInfoUnderConstructionContiguous extends - BlockInfoUnderConstruction { - /** - * Create block and set its state to - * {@link HdfsServerConstants.BlockUCState#UNDER_CONSTRUCTION}. - */ - public BlockInfoUnderConstructionContiguous(Block blk, short replication) { - this(blk, replication, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, - null); - } - - /** - * Create a block that is currently being constructed. - */ - public BlockInfoUnderConstructionContiguous(Block blk, short replication, - HdfsServerConstants.BlockUCState state, DatanodeStorageInfo[] targets) { - super(blk, replication); - Preconditions.checkState(getBlockUCState() != - HdfsServerConstants.BlockUCState.COMPLETE, - "BlockInfoUnderConstructionContiguous cannot be in COMPLETE state"); - this.blockUCState = state; - setExpectedLocations(targets); - } - - /** - * Convert an under construction block to a complete block. - * - * @return BlockInfo - a complete block. - * @throws IOException if the state of the block - * (the generation stamp and the length) has not been committed by - * the client or it does not have at least a minimal number of replicas - * reported from data-nodes. - */ - @Override - public BlockInfoContiguous convertToCompleteBlock() { - Preconditions.checkState(getBlockUCState() != - HdfsServerConstants.BlockUCState.COMPLETE, - "Trying to convert a COMPLETE block"); - return new BlockInfoContiguous(this); - } - - @Override - boolean addStorage(DatanodeStorageInfo storage) { - return ContiguousBlockStorageOp.addStorage(this, storage); - } - - @Override - boolean removeStorage(DatanodeStorageInfo storage) { - return ContiguousBlockStorageOp.removeStorage(this, storage); - } - - @Override - public int numNodes() { - return ContiguousBlockStorageOp.numNodes(this); - } - - @Override - void replaceBlock(BlockInfo newBlock) { - ContiguousBlockStorageOp.replaceBlock(this, newBlock); - } - - @Override - public void setExpectedLocations(DatanodeStorageInfo[] targets) { - int numLocations = targets == null ? 0 : targets.length; - this.replicas = new ArrayList<>(numLocations); - for(int i = 0; i < numLocations; i++) { - replicas.add( - new ReplicaUnderConstruction(this, targets[i], HdfsServerConstants.ReplicaState.RBW)); - } - } - - @Override - public Block getTruncateBlock() { - return truncateBlock; - } - - @Override - public void setTruncateBlock(Block recoveryBlock) { - this.truncateBlock = recoveryBlock; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 6ae2b32dd78..e0eae26ae6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -610,7 +610,7 @@ public class BlockManager implements BlockStatsMXBean { * of replicas reported from data-nodes. */ private static boolean commitBlock( - final BlockInfoUnderConstruction block, final Block commitBlock) + final BlockInfoContiguousUnderConstruction block, final Block commitBlock) throws IOException { if (block.getBlockUCState() == BlockUCState.COMMITTED) return false; @@ -642,7 +642,7 @@ public class BlockManager implements BlockStatsMXBean { return false; // already completed (e.g. by syncBlock) final boolean b = commitBlock( - (BlockInfoUnderConstruction) lastBlock, commitBlock); + (BlockInfoContiguousUnderConstruction) lastBlock, commitBlock); if(countNodes(lastBlock).liveReplicas() >= minReplication) completeBlock(bc, bc.numBlocks()-1, false); return b; @@ -662,8 +662,8 @@ public class BlockManager implements BlockStatsMXBean { BlockInfo curBlock = bc.getBlocks()[blkIndex]; if(curBlock.isComplete()) return curBlock; - BlockInfoUnderConstruction ucBlock = - (BlockInfoUnderConstruction) curBlock; + BlockInfoContiguousUnderConstruction ucBlock = + (BlockInfoContiguousUnderConstruction) curBlock; int numNodes = ucBlock.numNodes(); if (!force && numNodes < minReplication) throw new IOException("Cannot complete block: " + @@ -705,7 +705,7 @@ public class BlockManager implements BlockStatsMXBean { * when tailing edit logs as a Standby. */ public BlockInfo forceCompleteBlock(final BlockCollection bc, - final BlockInfoUnderConstruction block) throws IOException { + final BlockInfoContiguousUnderConstruction block) throws IOException { block.commitBlock(block); return completeBlock(bc, block, true); } @@ -736,7 +736,7 @@ public class BlockManager implements BlockStatsMXBean { DatanodeStorageInfo[] targets = getStorages(oldBlock); - BlockInfoUnderConstruction ucBlock = + BlockInfoContiguousUnderConstruction ucBlock = bc.setLastBlock(oldBlock, targets); blocksMap.replaceBlock(ucBlock); @@ -838,14 +838,14 @@ public class BlockManager implements BlockStatsMXBean { /** @return a LocatedBlock for the given block */ private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos ) throws IOException { - if (blk instanceof BlockInfoUnderConstruction) { + if (blk instanceof BlockInfoContiguousUnderConstruction) { if (blk.isComplete()) { throw new IOException( "blk instanceof BlockInfoUnderConstruction && blk.isComplete()" + ", blk=" + blk); } - final BlockInfoUnderConstruction uc = - (BlockInfoUnderConstruction) blk; + final BlockInfoContiguousUnderConstruction uc = + (BlockInfoContiguousUnderConstruction) blk; final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations(); final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk); return newLocatedBlock(eb, storages, pos, false); @@ -1750,11 +1750,11 @@ public class BlockManager implements BlockStatsMXBean { * reported by the datanode in the block report. */ static class StatefulBlockInfo { - final BlockInfoUnderConstruction storedBlock; + final BlockInfoContiguousUnderConstruction storedBlock; final Block reportedBlock; final ReplicaState reportedState; - StatefulBlockInfo(BlockInfoUnderConstruction storedBlock, + StatefulBlockInfo(BlockInfoContiguousUnderConstruction storedBlock, Block reportedBlock, ReplicaState reportedState) { this.storedBlock = storedBlock; this.reportedBlock = reportedBlock; @@ -1795,7 +1795,7 @@ public class BlockManager implements BlockStatsMXBean { BlockToMarkCorrupt(BlockInfo stored, long gs, String reason, Reason reasonCode) { - this(new BlockInfoContiguous(stored), stored, + this(new BlockInfoContiguous((BlockInfoContiguous)stored), stored, reason, reasonCode); //the corrupted block in datanode has a different generation stamp corrupted.setGenerationStamp(gs); @@ -2154,13 +2154,13 @@ public class BlockManager implements BlockStatsMXBean { // If block is under construction, add this replica to its list if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { - ((BlockInfoUnderConstruction)storedBlock) + ((BlockInfoContiguousUnderConstruction)storedBlock) .addReplicaIfNotPresent(storageInfo, iblk, reportedState); // OpenFileBlocks only inside snapshots also will be added to safemode // threshold. So we need to update such blocks to safemode // refer HDFS-5283 - BlockInfoUnderConstruction blockUC = - (BlockInfoUnderConstruction) storedBlock; + BlockInfoContiguousUnderConstruction blockUC = + (BlockInfoContiguousUnderConstruction) storedBlock; if (namesystem.isInSnapshot(blockUC)) { int numOfReplicas = blockUC.getNumExpectedLocations(); namesystem.incrementSafeBlockCount(numOfReplicas); @@ -2315,7 +2315,7 @@ public class BlockManager implements BlockStatsMXBean { if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { toUC.add(new StatefulBlockInfo( - (BlockInfoUnderConstruction) storedBlock, + (BlockInfoContiguousUnderConstruction) storedBlock, new Block(block), reportedState)); return storedBlock; } @@ -2506,7 +2506,7 @@ public class BlockManager implements BlockStatsMXBean { void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock, DatanodeStorageInfo storageInfo) throws IOException { - BlockInfoUnderConstruction block = ucBlock.storedBlock; + BlockInfoContiguousUnderConstruction block = ucBlock.storedBlock; block.addReplicaIfNotPresent( storageInfo, ucBlock.reportedBlock, ucBlock.reportedState); @@ -2567,7 +2567,7 @@ public class BlockManager implements BlockStatsMXBean { assert block != null && namesystem.hasWriteLock(); BlockInfo storedBlock; DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); - if (block instanceof BlockInfoUnderConstruction) { + if (block instanceof BlockInfoContiguousUnderConstruction) { //refresh our copy in case the block got completed in another thread storedBlock = blocksMap.getStoredBlock(block); } else { @@ -3522,8 +3522,8 @@ public class BlockManager implements BlockStatsMXBean { String src, BlockInfo[] blocks) { for (BlockInfo b: blocks) { if (!b.isComplete()) { - final BlockInfoUnderConstruction uc = - (BlockInfoUnderConstruction)b; + final BlockInfoContiguousUnderConstruction uc = + (BlockInfoContiguousUnderConstruction)b; final int numNodes = b.numNodes(); LOG.info("BLOCK* " + b + " is not COMPLETE (ucState = " + uc.getBlockUCState() + ", replication# = " + numNodes diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java deleted file mode 100644 index 092f65ec3cb..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ContiguousBlockStorageOp.java +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.blockmanagement; - -import com.google.common.base.Preconditions; - -/** - * Utility class with logic on managing storage locations shared between - * complete and under-construction blocks under the contiguous format -- - * {@link BlockInfoContiguous} and - * {@link BlockInfoUnderConstructionContiguous}. - */ -class ContiguousBlockStorageOp { - /** - * Ensure that there is enough space to include num more triplets. - * @return first free triplet index. - */ - private static int ensureCapacity(BlockInfo b, int num) { - Preconditions.checkArgument(b.triplets != null, - "BlockInfo is not initialized"); - int last = b.numNodes(); - if (b.triplets.length >= (last+num)*3) { - return last; - } - /* Not enough space left. Create a new array. Should normally - * happen only when replication is manually increased by the user. */ - Object[] old = b.triplets; - b.triplets = new Object[(last+num)*3]; - System.arraycopy(old, 0, b.triplets, 0, last * 3); - return last; - } - - static boolean addStorage(BlockInfo b, DatanodeStorageInfo storage) { - // find the last null node - int lastNode = ensureCapacity(b, 1); - b.setStorageInfo(lastNode, storage); - b.setNext(lastNode, null); - b.setPrevious(lastNode, null); - return true; - } - - static boolean removeStorage(BlockInfo b, - DatanodeStorageInfo storage) { - int dnIndex = b.findStorageInfo(storage); - if (dnIndex < 0) { // the node is not found - return false; - } - Preconditions.checkArgument(b.getPrevious(dnIndex) == null && - b.getNext(dnIndex) == null, - "Block is still in the list and must be removed first."); - // find the last not null node - int lastNode = b.numNodes()-1; - // replace current node triplet by the lastNode one - b.setStorageInfo(dnIndex, b.getStorageInfo(lastNode)); - b.setNext(dnIndex, b.getNext(lastNode)); - b.setPrevious(dnIndex, b.getPrevious(lastNode)); - // set the last triplet to null - b.setStorageInfo(lastNode, null); - b.setNext(lastNode, null); - b.setPrevious(lastNode, null); - return true; - } - - static int numNodes(BlockInfo b) { - Preconditions.checkArgument(b.triplets != null, - "BlockInfo is not initialized"); - Preconditions.checkArgument(b.triplets.length % 3 == 0, - "Malformed BlockInfo"); - - for (int idx = b.getCapacity()-1; idx >= 0; idx--) { - if (b.getDatanode(idx) != null) { - return idx + 1; - } - } - return 0; - } - - static void replaceBlock(BlockInfo b, BlockInfo newBlock) { - for (int i = b.numNodes() - 1; i >= 0; i--) { - final DatanodeStorageInfo storage = b.getStorageInfo(i); - final boolean removed = storage.removeBlock(b); - Preconditions.checkState(removed, "currentBlock not found."); - - final DatanodeStorageInfo.AddBlockResult result = storage.addBlock( - newBlock); - Preconditions.checkState( - result == DatanodeStorageInfo.AddBlockResult.ADDED, - "newBlock already exists."); - } - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 45a56b5730c..ca5b541fe8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -223,8 +223,8 @@ public class DatanodeDescriptor extends DatanodeInfo { private final BlockQueue replicateBlocks = new BlockQueue<>(); /** A queue of blocks to be recovered by this datanode */ - private final BlockQueue recoverBlocks = - new BlockQueue<>(); + private final BlockQueue recoverBlocks = + new BlockQueue(); /** A set of blocks to be invalidated by this datanode */ private final LightWeightHashSet invalidateBlocks = new LightWeightHashSet<>(); @@ -602,7 +602,7 @@ public class DatanodeDescriptor extends DatanodeInfo { /** * Store block recovery work. */ - void addBlockToBeRecovered(BlockInfoUnderConstruction block) { + void addBlockToBeRecovered(BlockInfoContiguousUnderConstruction block) { if(recoverBlocks.contains(block)) { // this prevents adding the same block twice to the recovery queue BlockManager.LOG.info(block + " is already in the recovery queue"); @@ -644,12 +644,11 @@ public class DatanodeDescriptor extends DatanodeInfo { return replicateBlocks.poll(maxTransfers); } - public BlockInfoUnderConstruction[] getLeaseRecoveryCommand( - int maxTransfers) { - List blocks = recoverBlocks.poll(maxTransfers); + public BlockInfoContiguousUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) { + List blocks = recoverBlocks.poll(maxTransfers); if(blocks == null) return null; - return blocks.toArray(new BlockInfoUnderConstruction[blocks.size()]); + return blocks.toArray(new BlockInfoContiguousUnderConstruction[blocks.size()]); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 90b173a7136..e1e87f5a274 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -1379,12 +1379,12 @@ public class DatanodeManager { } //check lease recovery - BlockInfoUnderConstruction[] blocks = nodeinfo + BlockInfoContiguousUnderConstruction[] blocks = nodeinfo .getLeaseRecoveryCommand(Integer.MAX_VALUE); if (blocks != null) { BlockRecoveryCommand brCommand = new BlockRecoveryCommand( blocks.length); - for (BlockInfoUnderConstruction b : blocks) { + for (BlockInfoContiguousUnderConstruction b : blocks) { final DatanodeStorageInfo[] storages = b.getExpectedStorageLocations(); // Skip stale nodes during recovery - not heart beated for some time (30s by default). final List recoveryLocations = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java index e24bb2f0b81..474c2574d88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java @@ -28,8 +28,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.RecoverLeaseOp; @@ -96,7 +95,7 @@ final class FSDirTruncateOp { final BlockInfo last = file.getLastBlock(); if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) { - final Block truncatedBlock = ((BlockInfoUnderConstruction) last) + final Block truncatedBlock = ((BlockInfoContiguousUnderConstruction) last) .getTruncateBlock(); if (truncatedBlock != null) { final long truncateLength = file.computeFileSize(false, false) @@ -223,12 +222,12 @@ final class FSDirTruncateOp { oldBlock))); } - BlockInfoUnderConstruction truncatedBlockUC; + BlockInfoContiguousUnderConstruction truncatedBlockUC; BlockManager blockManager = fsn.getFSDirectory().getBlockManager(); if (shouldCopyOnTruncate) { // Add new truncateBlock into blocksMap and // use oldBlock as a source for copy-on-truncate recovery - truncatedBlockUC = new BlockInfoUnderConstructionContiguous(newBlock, + truncatedBlockUC = new BlockInfoContiguousUnderConstruction(newBlock, file.getPreferredBlockReplication()); truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta); truncatedBlockUC.setTruncateBlock(oldBlock); @@ -245,7 +244,7 @@ final class FSDirTruncateOp { blockManager.convertLastBlockToUnderConstruction(file, lastBlockDelta); oldBlock = file.getLastBlock(); assert !oldBlock.isComplete() : "oldBlock should be under construction"; - truncatedBlockUC = (BlockInfoUnderConstruction) oldBlock; + truncatedBlockUC = (BlockInfoContiguousUnderConstruction) oldBlock; truncatedBlockUC.setTruncateBlock(new Block(oldBlock)); truncatedBlockUC.getTruncateBlock().setNumBytes( oldBlock.getNumBytes() - lastBlockDelta); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index 008a945b836..732e9cf4cae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -43,8 +43,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; @@ -74,7 +73,7 @@ class FSDirWriteFileOp { Block block) throws IOException { // modify file-> block and blocksMap // fileNode should be under construction - BlockInfoUnderConstruction uc = fileNode.removeLastBlock(block); + BlockInfoContiguousUnderConstruction uc = fileNode.removeLastBlock(block); if (uc == null) { return false; } @@ -237,7 +236,7 @@ class FSDirWriteFileOp { } else { // add new chosen targets to already allocated block and return BlockInfo lastBlockInFile = pendingFile.getLastBlock(); - ((BlockInfoUnderConstruction) lastBlockInFile) + ((BlockInfoContiguousUnderConstruction) lastBlockInFile) .setExpectedLocations(targets); offset = pendingFile.computeFileSize(); return makeLocatedBlock(fsn, lastBlockInFile, targets, offset); @@ -521,8 +520,8 @@ class FSDirWriteFileOp { fileINode.getPreferredBlockReplication(), true); // associate new last block for the file - BlockInfoUnderConstruction blockInfo = - new BlockInfoUnderConstructionContiguous( + BlockInfoContiguousUnderConstruction blockInfo = + new BlockInfoContiguousUnderConstruction( block, fileINode.getFileReplication(), HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, @@ -663,8 +662,8 @@ class FSDirWriteFileOp { "allocation of a new block in " + src + ". Returning previously" + " allocated block " + lastBlockInFile); long offset = file.computeFileSize(); - BlockInfoUnderConstruction lastBlockUC = - (BlockInfoUnderConstruction) lastBlockInFile; + BlockInfoContiguousUnderConstruction lastBlockUC = + (BlockInfoContiguousUnderConstruction) lastBlockInFile; onRetryBlock[0] = makeLocatedBlock(fsn, lastBlockInFile, lastBlockUC.getExpectedStorageLocations(), offset); return new FileState(file, src, iip); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 645d682a55e..757ecad6ec5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -45,8 +45,7 @@ import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; @@ -964,16 +963,16 @@ public class FSEditLogLoader { } oldLastBlock.setNumBytes(pBlock.getNumBytes()); - if (oldLastBlock instanceof BlockInfoUnderConstruction) { + if (oldLastBlock instanceof BlockInfoContiguousUnderConstruction) { fsNamesys.getBlockManager().forceCompleteBlock(file, - (BlockInfoUnderConstruction) oldLastBlock); + (BlockInfoContiguousUnderConstruction) oldLastBlock); fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock); } } else { // the penultimate block is null Preconditions.checkState(oldBlocks == null || oldBlocks.length == 0); } // add the new block - BlockInfo newBI = new BlockInfoUnderConstructionContiguous( + BlockInfo newBI = new BlockInfoContiguousUnderConstruction( newBlock, file.getPreferredBlockReplication()); fsNamesys.getBlockManager().addBlockCollection(newBI, file); file.addBlock(newBI); @@ -1014,11 +1013,11 @@ public class FSEditLogLoader { oldBlock.getGenerationStamp() != newBlock.getGenerationStamp(); oldBlock.setGenerationStamp(newBlock.getGenerationStamp()); - if (oldBlock instanceof BlockInfoUnderConstruction && + if (oldBlock instanceof BlockInfoContiguousUnderConstruction && (!isLastBlock || op.shouldCompleteLastBlock())) { changeMade = true; fsNamesys.getBlockManager().forceCompleteBlock(file, - (BlockInfoUnderConstruction) oldBlock); + (BlockInfoContiguousUnderConstruction) oldBlock); } if (changeMade) { // The state or gen-stamp of the block has changed. So, we may be @@ -1053,7 +1052,7 @@ public class FSEditLogLoader { // TODO: shouldn't this only be true for the last block? // what about an old-version fsync() where fsync isn't called // until several blocks in? - newBI = new BlockInfoUnderConstructionContiguous( + newBI = new BlockInfoContiguousUnderConstruction( newBlock, file.getPreferredBlockReplication()); } else { // OP_CLOSE should add finalized blocks. This code path diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index cdf9ecaf347..bbda7c79398 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -54,7 +54,7 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; @@ -777,9 +777,8 @@ public class FSImageFormat { // convert the last block to BlockUC if (blocks.length > 0) { BlockInfo lastBlk = blocks[blocks.length - 1]; - blocks[blocks.length - 1] = - new BlockInfoUnderConstructionContiguous( - lastBlk, replication); + blocks[blocks.length - 1] = new BlockInfoContiguousUnderConstruction( + lastBlk, replication); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java index 95aa627fd1d..a29009b3fbe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext; @@ -363,8 +363,8 @@ public final class FSImageFormatPBINode { if (blocks.length > 0) { BlockInfo lastBlk = file.getLastBlock(); // replace the last block of file - file.setBlock(file.numBlocks() - 1, - new BlockInfoUnderConstructionContiguous(lastBlk, replication)); + file.setBlock(file.numBlocks() - 1, new BlockInfoContiguousUnderConstruction( + lastBlk, replication)); } } return file; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index 86b3a4a1cff..8419482072c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; @@ -137,7 +137,7 @@ public class FSImageSerialization { // last block is UNDER_CONSTRUCTION if(numBlocks > 0) { blk.readFields(in); - blocks[i] = new BlockInfoUnderConstructionContiguous( + blocks[i] = new BlockInfoContiguousUnderConstruction( blk, blockReplication, BlockUCState.UNDER_CONSTRUCTION, null); } PermissionStatus perm = PermissionStatus.read(in); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 32ab16a5f8f..3b1de18b764 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -203,7 +203,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretMan import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; @@ -3085,8 +3085,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, throw new AlreadyBeingCreatedException(message); case UNDER_CONSTRUCTION: case UNDER_RECOVERY: - final BlockInfoUnderConstruction uc = - (BlockInfoUnderConstruction)lastBlock; + final BlockInfoContiguousUnderConstruction uc = (BlockInfoContiguousUnderConstruction)lastBlock; // determine if last block was intended to be truncated Block recoveryBlock = uc.getTruncateBlock(); boolean truncateRecovery = recoveryBlock != null; @@ -3202,7 +3201,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } @Override - public boolean isInSnapshot(BlockInfoUnderConstruction blockUC) { + public boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC) { assert hasReadLock(); final BlockCollection bc = blockUC.getBlockCollection(); if (bc == null || !(bc instanceof INodeFile) @@ -3249,7 +3248,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, waitForLoadingFSImage(); writeLock(); boolean copyTruncate = false; - BlockInfoUnderConstruction truncatedBlock = null; + BlockInfoContiguousUnderConstruction truncatedBlock = null; try { checkOperation(OperationCategory.WRITE); // If a DN tries to commit to the standby, the recovery will @@ -3306,7 +3305,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, return; } - truncatedBlock = (BlockInfoUnderConstruction) iFile + truncatedBlock = (BlockInfoContiguousUnderConstruction) iFile .getLastBlock(); long recoveryId = truncatedBlock.getBlockRecoveryId(); copyTruncate = truncatedBlock.getBlockId() != storedBlock.getBlockId(); @@ -5330,8 +5329,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, assert hasWriteLock(); // check the vadility of the block and lease holder name final INodeFile pendingFile = checkUCBlock(oldBlock, clientName); - final BlockInfoUnderConstruction blockinfo - = (BlockInfoUnderConstruction)pendingFile.getLastBlock(); + final BlockInfoContiguousUnderConstruction blockinfo + = (BlockInfoContiguousUnderConstruction)pendingFile.getLastBlock(); // check new GS & length: this is not expected if (newBlock.getGenerationStamp() <= blockinfo.getGenerationStamp() || diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java index 74c5d094991..d07ae1f513b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java @@ -21,7 +21,7 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; /** @@ -61,7 +61,7 @@ public class FileUnderConstructionFeature implements INode.Feature { BlockInfo lastBlock = f.getLastBlock(); assert (lastBlock != null) : "The last block for path " + f.getFullPathName() + " is null when updating its length"; - assert (lastBlock instanceof BlockInfoUnderConstruction) + assert (lastBlock instanceof BlockInfoContiguousUnderConstruction) : "The last block for path " + f.getFullPathName() + " is not a BlockInfoUnderConstruction when updating its length"; lastBlock.setNumBytes(lastBlockLength); @@ -76,9 +76,9 @@ public class FileUnderConstructionFeature implements INode.Feature { final BlocksMapUpdateInfo collectedBlocks) { final BlockInfo[] blocks = f.getBlocks(); if (blocks != null && blocks.length > 0 - && blocks[blocks.length - 1] instanceof BlockInfoUnderConstruction) { - BlockInfoUnderConstruction lastUC = - (BlockInfoUnderConstruction) blocks[blocks.length - 1]; + && blocks[blocks.length - 1] instanceof BlockInfoContiguousUnderConstruction) { + BlockInfoContiguousUnderConstruction lastUC = + (BlockInfoContiguousUnderConstruction) blocks[blocks.length - 1]; if (lastUC.getNumBytes() == 0) { // this is a 0-sized block. do not need check its UC state here collectedBlocks.addDeleteBlock(lastUC); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index a7d8964052d..14714007f90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; @@ -231,7 +231,7 @@ public class INodeFile extends INodeWithAdditionalFields } @Override // BlockCollection, the file should be under construction - public BlockInfoUnderConstruction setLastBlock( + public BlockInfoContiguousUnderConstruction setLastBlock( BlockInfo lastBlock, DatanodeStorageInfo[] locations) throws IOException { Preconditions.checkState(isUnderConstruction(), @@ -240,7 +240,7 @@ public class INodeFile extends INodeWithAdditionalFields if (numBlocks() == 0) { throw new IOException("Failed to set last block: File is empty."); } - BlockInfoUnderConstruction ucBlock = + BlockInfoContiguousUnderConstruction ucBlock = lastBlock.convertToBlockUnderConstruction( BlockUCState.UNDER_CONSTRUCTION, locations); setBlock(numBlocks() - 1, ucBlock); @@ -251,7 +251,7 @@ public class INodeFile extends INodeWithAdditionalFields * Remove a block from the block list. This block should be * the last one on the list. */ - BlockInfoUnderConstruction removeLastBlock(Block oldblock) { + BlockInfoContiguousUnderConstruction removeLastBlock(Block oldblock) { Preconditions.checkState(isUnderConstruction(), "file is no longer under construction"); if (blocks == null || blocks.length == 0) { @@ -262,8 +262,8 @@ public class INodeFile extends INodeWithAdditionalFields return null; } - BlockInfoUnderConstruction uc = - (BlockInfoUnderConstruction)blocks[size_1]; + BlockInfoContiguousUnderConstruction uc = + (BlockInfoContiguousUnderConstruction)blocks[size_1]; //copy to a new list BlockInfo[] newlist = new BlockInfo[size_1]; System.arraycopy(blocks, 0, newlist, 0, size_1); @@ -696,7 +696,7 @@ public class INodeFile extends INodeWithAdditionalFields final int last = blocks.length - 1; //check if the last block is BlockInfoUnderConstruction long size = blocks[last].getNumBytes(); - if (blocks[last] instanceof BlockInfoUnderConstruction) { + if (blocks[last] instanceof BlockInfoContiguousUnderConstruction) { if (!includesLastUcBlock) { size = 0; } else if (usePreferredBlockSize4LastUcBlock) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java index 1732865a712..a5053bc6e13 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.util.RwLock; import org.apache.hadoop.ipc.StandbyException; @@ -45,7 +45,7 @@ public interface Namesystem extends RwLock, SafeMode { void checkOperation(OperationCategory read) throws StandbyException; - boolean isInSnapshot(BlockInfoUnderConstruction blockUC); + boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC); CacheManager getCacheManager(); } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java index d081a6b5dda..6b8388e0fe1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java @@ -22,7 +22,7 @@ import java.util.List; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; @@ -133,7 +133,7 @@ public class FileDiffList extends Block dontRemoveBlock = null; if (lastBlock != null && lastBlock.getBlockUCState().equals( HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) { - dontRemoveBlock = ((BlockInfoUnderConstruction) lastBlock) + dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock) .getTruncateBlock(); } // Collect the remaining blocks of the file, ignoring truncate block diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 0218b32874f..53a016aa758 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -110,7 +110,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; @@ -1645,9 +1645,9 @@ public class DFSTestUtil { BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock()); assertTrue("Block " + blk + " should be under construction, " + "got: " + storedBlock, - storedBlock instanceof BlockInfoUnderConstruction); - BlockInfoUnderConstruction ucBlock = - (BlockInfoUnderConstruction)storedBlock; + storedBlock instanceof BlockInfoContiguousUnderConstruction); + BlockInfoContiguousUnderConstruction ucBlock = + (BlockInfoContiguousUnderConstruction)storedBlock; // We expect that the replica with the most recent heart beat will be // the one to be in charge of the synchronization / recovery protocol. final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java index 630cd1c756e..a7ba29399dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.util.Time; import org.junit.Test; /** @@ -39,8 +40,7 @@ public class TestBlockInfoUnderConstruction { DatanodeDescriptor dd3 = s3.getDatanodeDescriptor(); dd1.isAlive = dd2.isAlive = dd3.isAlive = true; - BlockInfoUnderConstruction blockInfo = - new BlockInfoUnderConstructionContiguous( + BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction( new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, BlockUCState.UNDER_CONSTRUCTION, @@ -51,7 +51,7 @@ public class TestBlockInfoUnderConstruction { DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, -2 * 1000); blockInfo.initializeBlockRecovery(1); - BlockInfoUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1); + BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1); assertEquals(blockInfoRecovery[0], blockInfo); // Recovery attempt #2. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 396dff302a9..2d3d90a1cda 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -726,7 +726,7 @@ public class TestBlockManager { // verify the storage info is correct assertTrue(bm.getStoredBlock(new Block(receivedBlockId)).findStorageInfo (ds) >= 0); - assertTrue(((BlockInfoUnderConstruction) bm. + assertTrue(((BlockInfoContiguousUnderConstruction) bm. getStoredBlock(new Block(receivingBlockId))).getNumExpectedLocations() > 0); assertTrue(bm.getStoredBlock(new Block(receivingReceivedBlockId)) .findStorageInfo(ds) >= 0); @@ -747,8 +747,8 @@ public class TestBlockManager { private BlockInfo addUcBlockToBM(long blkId) { Block block = new Block(blkId); - BlockInfoUnderConstruction blockInfo = - new BlockInfoUnderConstructionContiguous(block, (short) 3); + BlockInfoContiguousUnderConstruction blockInfo = + new BlockInfoContiguousUnderConstruction(block, (short) 3); BlockCollection bc = Mockito.mock(BlockCollection.class); Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication(); bm.blocksMap.addBlockCollection(blockInfo, bc); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java index e48e9e84c63..6fc30ba6d63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.apache.hadoop.util.Time; import org.junit.Test; /** @@ -172,8 +173,7 @@ public class TestHeartbeatHandling { dd1.getStorageInfos()[0], dd2.getStorageInfos()[0], dd3.getStorageInfos()[0]}; - BlockInfoUnderConstruction blockInfo = - new BlockInfoUnderConstructionContiguous( + BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction( new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, BlockUCState.UNDER_RECOVERY, storages); dd1.addBlockToBeRecovered(blockInfo); @@ -195,7 +195,7 @@ public class TestHeartbeatHandling { // More than the default stale interval of 30 seconds. DFSTestUtil.resetLastUpdatesWithOffset(dd2, -40 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0); - blockInfo = new BlockInfoUnderConstructionContiguous( + blockInfo = new BlockInfoContiguousUnderConstruction( new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, BlockUCState.UNDER_RECOVERY, storages); dd1.addBlockToBeRecovered(blockInfo); @@ -216,7 +216,7 @@ public class TestHeartbeatHandling { // More than the default stale interval of 30 seconds. DFSTestUtil.resetLastUpdatesWithOffset(dd2, - 40 * 1000); DFSTestUtil.resetLastUpdatesWithOffset(dd3, - 80 * 1000); - blockInfo = new BlockInfoUnderConstructionContiguous( + blockInfo = new BlockInfoContiguousUnderConstruction( new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3, BlockUCState.UNDER_RECOVERY, storages); dd1.addBlockToBeRecovered(blockInfo); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 28129572370..6553185e0ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -1182,8 +1182,7 @@ public class TestReplicationPolicy { // block under construction, the BlockManager will realize the expected // replication has been achieved and remove it from the under-replicated // queue. - BlockInfoUnderConstruction info = - new BlockInfoUnderConstructionContiguous(block1, (short) 1); + BlockInfoContiguousUnderConstruction info = new BlockInfoContiguousUnderConstruction(block1, (short) 1); BlockCollection bc = mock(BlockCollection.class); when(bc.getPreferredBlockReplication()).thenReturn((short)1); bm.addBlockCollection(info, bc); @@ -1239,7 +1238,7 @@ public class TestReplicationPolicy { DatanodeStorageInfo[] storageAry = {new DatanodeStorageInfo( dataNodes[0], new DatanodeStorage("s1"))}; - final BlockInfoUnderConstruction ucBlock = + final BlockInfoContiguousUnderConstruction ucBlock = info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, storageAry); DatanodeStorageInfo storage = mock(DatanodeStorageInfo.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java index 872ff9c490f..f372bec3ba6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.junit.AfterClass; @@ -170,7 +170,7 @@ public class TestBlockUnderConstruction { final List blocks = lb.getLocatedBlocks(); assertEquals(i, blocks.size()); final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock(); - assertTrue(b instanceof BlockInfoUnderConstruction); + assertTrue(b instanceof BlockInfoContiguousUnderConstruction); if (++i < NUM_BLOCKS) { // write one more block diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java index 9de426e4505..c218b7c2cc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java @@ -24,8 +24,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstructionContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.junit.Test; @@ -69,10 +68,8 @@ public class TestCommitBlockSynchronization { namesystem.dir.getINodeMap().put(file); FSNamesystem namesystemSpy = spy(namesystem); - BlockInfoUnderConstruction blockInfo = - new BlockInfoUnderConstructionContiguous( - block, (short) 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, - targets); + BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction( + block, (short) 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets); blockInfo.setBlockCollection(file); blockInfo.setGenerationStamp(genStamp); blockInfo.initializeBlockRecovery(genStamp); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index 8e54edc8139..767f4de543b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -54,7 +54,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.security.UserGroupInformation; @@ -1016,7 +1016,7 @@ public class TestFileTruncate { is(fsn.getBlockIdManager().getGenerationStampV2())); assertThat(file.getLastBlock().getBlockUCState(), is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY)); - long blockRecoveryId = ((BlockInfoUnderConstruction) file.getLastBlock()) + long blockRecoveryId = ((BlockInfoContiguousUnderConstruction) file.getLastBlock()) .getBlockRecoveryId(); assertThat(blockRecoveryId, is(initialGenStamp + 1)); fsn.getEditLog().logTruncate( @@ -1049,7 +1049,7 @@ public class TestFileTruncate { is(fsn.getBlockIdManager().getGenerationStampV2())); assertThat(file.getLastBlock().getBlockUCState(), is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY)); - long blockRecoveryId = ((BlockInfoUnderConstruction) file.getLastBlock()) + long blockRecoveryId = ((BlockInfoContiguousUnderConstruction) file.getLastBlock()) .getBlockRecoveryId(); assertThat(blockRecoveryId, is(initialGenStamp + 1)); fsn.getEditLog().logTruncate( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index 14d9a1ee942..d202fb788f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -72,7 +72,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; @@ -752,8 +752,8 @@ public class TestRetryCacheWithHA { boolean checkNamenodeBeforeReturn() throws Exception { INodeFile fileNode = cluster.getNamesystem(0).getFSDirectory() .getINode4Write(file).asFile(); - BlockInfoUnderConstruction blkUC = - (BlockInfoUnderConstruction) (fileNode.getBlocks())[1]; + BlockInfoContiguousUnderConstruction blkUC = + (BlockInfoContiguousUnderConstruction) (fileNode.getBlocks())[1]; int datanodeNum = blkUC.getExpectedStorageLocations().length; for (int i = 0; i < CHECKTIMES && datanodeNum != 2; i++) { Thread.sleep(1000); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java index 824f45baa41..a1abd0892e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage; import org.apache.hadoop.hdfs.server.datanode.BlockScanner; @@ -177,7 +177,7 @@ public class SnapshotTestHelper { * Specific information for different types of INode: * {@link INodeDirectory}:childrenSize * {@link INodeFile}: fileSize, block list. Check {@link BlockInfo#toString()} - * and {@link BlockInfoUnderConstruction#toString()} for detailed information. + * and {@link BlockInfoContiguousUnderConstruction#toString()} for detailed information. * {@link FileWithSnapshot}: next link * * @see INode#dumpTreeRecursively()