From 52d18aa217a308e8343ca8b23b5a2dedda77270f Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Wed, 18 Jun 2014 19:37:18 +0000 Subject: [PATCH] HDFS-6552. add DN storage to a BlockInfo will not replace the different storage from same DN. (Contributed by Amir Langer) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1603602 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../server/blockmanagement/BlockInfo.java | 2 +- .../server/blockmanagement/TestBlockInfo.java | 30 +++++++++++++++++++ 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 268276c12f4..e3e6e095e7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -653,6 +653,9 @@ Release 2.5.0 - UNRELEASED HDFS-6527. Edit log corruption due to defered INode removal. (kihwal and jing9 via jing9) + HDFS-6552. add DN storage to a BlockInfo will not replace the different + storage from same DN. (Amir Langer via Arpit Agarwal) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 983f60bb8f8..c6650bf66db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -203,7 +203,7 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement { } else { // The block is on the DN but belongs to a different storage. // Update our state. - removeStorage(storage); + removeStorage(getStorageInfo(idx)); added = false; // Just updating storage. Return false. } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java index 78a77c44601..7cfe423c6e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java @@ -29,6 +29,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.GenerationStamp; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.junit.Assert; import org.junit.Test; /** @@ -42,6 +44,34 @@ public class TestBlockInfo { private static final Log LOG = LogFactory .getLog("org.apache.hadoop.hdfs.TestBlockInfo"); + + @Test + public void testAddStorage() throws Exception { + BlockInfo blockInfo = new BlockInfo(3); + + final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1"); + + boolean added = blockInfo.addStorage(storage); + + Assert.assertTrue(added); + Assert.assertEquals(storage, blockInfo.getStorageInfo(0)); + } + + + @Test + public void testReplaceStorageIfDifferetnOneAlreadyExistedFromSameDataNode() throws Exception { + BlockInfo blockInfo = new BlockInfo(3); + + final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1"); + final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2")); + + blockInfo.addStorage(storage1); + boolean added = blockInfo.addStorage(storage2); + + Assert.assertFalse(added); + Assert.assertEquals(storage2, blockInfo.getStorageInfo(0)); + } + @Test public void testBlockListMoveToHead() throws Exception { LOG.info("BlockInfo moveToHead tests...");