diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b879f62d134..11d15f8575c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -129,6 +129,10 @@ Trunk (Unreleased) HDFS-6609. Use DirectorySnapshottableFeature to represent a snapshottable directory. (Jing Zhao via wheat9) + HDFS-5782. Change BlockListAsLongs constructor to take Replica as parameter + type instead of concrete classes Block and ReplicaInfo. (David Powell + and Joe Pallas via szetszwo) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java index 8a0b7316c83..4389714986f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java @@ -25,7 +25,7 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; -import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; +import org.apache.hadoop.hdfs.server.datanode.Replica; /** * This class provides an interface for accessing list of blocks that @@ -85,8 +85,8 @@ public class BlockListAsLongs implements Iterable { * @param finalized - list of finalized blocks * @param uc - list of under construction blocks */ - public BlockListAsLongs(final List finalized, - final List uc) { + public BlockListAsLongs(final List finalized, + final List uc) { int finalizedSize = finalized == null ? 0 : finalized.size(); int ucSize = uc == null ? 0 : uc.size(); int len = HEADER_SIZE @@ -113,8 +113,34 @@ public class BlockListAsLongs implements Iterable { } } + /** + * Create block report from a list of finalized blocks. Used by + * NNThroughputBenchmark. + * + * @param blocks - list of finalized blocks + */ + public BlockListAsLongs(final List blocks) { + int finalizedSize = blocks == null ? 0 : blocks.size(); + int len = HEADER_SIZE + + (finalizedSize + 1) * LONGS_PER_FINALIZED_BLOCK; + + blockList = new long[len]; + + // set the header + blockList[0] = finalizedSize; + blockList[1] = 0; + + // set finalized blocks + for (int i = 0; i < finalizedSize; i++) { + setBlock(i, blocks.get(i)); + } + + // set invalid delimiting block + setDelimitingBlock(finalizedSize); + } + public BlockListAsLongs() { - this(null); + this((long[])null); } /** @@ -279,18 +305,30 @@ public class BlockListAsLongs implements Iterable { /** * Set the indexTh block * @param index - the index of the block to set - * @param b - the block is set to the value of the this block + * @param r - the block is set to the value of the this Replica */ - private void setBlock(final int index, final T b) { + private void setBlock(final int index, final Replica r) { + int pos = index2BlockId(index); + blockList[pos] = r.getBlockId(); + blockList[pos + 1] = r.getNumBytes(); + blockList[pos + 2] = r.getGenerationStamp(); + if(index < getNumberOfFinalizedReplicas()) + return; + assert r.getState() != ReplicaState.FINALIZED : + "Must be under-construction replica."; + blockList[pos + 3] = r.getState().getValue(); + } + + /** + * Set the indexTh block + * @param index - the index of the block to set + * @param b - the block is set to the value of the this Block + */ + private void setBlock(final int index, final Block b) { int pos = index2BlockId(index); blockList[pos] = b.getBlockId(); blockList[pos + 1] = b.getNumBytes(); blockList[pos + 2] = b.getGenerationStamp(); - if(index < getNumberOfFinalizedReplicas()) - return; - assert ((ReplicaInfo)b).getState() != ReplicaState.FINALIZED : - "Must be under-construction replica."; - blockList[pos + 3] = ((ReplicaInfo)b).getState().getValue(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index 78eedf96c28..16e7a208f62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -557,12 +557,12 @@ public class SimulatedFSDataset implements FsDatasetSpi { } synchronized BlockListAsLongs getBlockReport(String bpid) { - final List blocks = new ArrayList(); + final List blocks = new ArrayList(); final Map map = blockMap.get(bpid); if (map != null) { for (BInfo b : map.values()) { if (b.isFinalized()) { - blocks.add(b.theBlock); + blocks.add(b); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java index e71c0ea982a..1152c74477c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java @@ -114,7 +114,7 @@ public class TestBlockHasMultipleReplicasOnSameDN { } for (int i = 0; i < cluster.getStoragesPerDatanode(); ++i) { - BlockListAsLongs bll = new BlockListAsLongs(blocks, null); + BlockListAsLongs bll = new BlockListAsLongs(blocks); FsVolumeSpi v = dn.getFSDataset().getVolumes().get(i); DatanodeStorage dns = new DatanodeStorage(v.getStorageID()); reports[i] = new StorageBlockReport(dns, bll.getBlockListAsLongs()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 1fb1c1f9942..6abe600c305 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -977,7 +977,7 @@ public class NNThroughputBenchmark implements Tool { // fill remaining slots with blocks that do not exist for(int idx = blocks.size()-1; idx >= nrBlocks; idx--) blocks.set(idx, new Block(blocks.size() - idx, 0, 0)); - blockReportList = new BlockListAsLongs(blocks,null).getBlockListAsLongs(); + blockReportList = new BlockListAsLongs(blocks).getBlockListAsLongs(); } long[] getBlockReportList() {