HDFS-5154. Fix TestBlockManager and TestDatanodeDescriptor after HDFS-4987. Contributed by Junping Du

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1519548 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-09-03 04:04:22 +00:00
parent b305f1ff08
commit f7e3bc553d
5 changed files with 80 additions and 27 deletions

View File

@ -12,3 +12,6 @@ IMPROVEMENTS:
HDFS-4987. Namenode changes to track multiple storages per datanode. HDFS-4987. Namenode changes to track multiple storages per datanode.
(szetszwo) (szetszwo)
HDFS-5154. Fix TestBlockManager and TestDatanodeDescriptor after HDFS-4987.
(Junping Du via szetszwo)

View File

@ -247,9 +247,12 @@ public class DatanodeDescriptor extends DatanodeInfo {
*/ */
boolean removeBlock(BlockInfo b) { boolean removeBlock(BlockInfo b) {
int index = b.findStorageInfo(this); int index = b.findStorageInfo(this);
DatanodeStorageInfo s = b.getStorageInfo(index); // if block exists on this datanode
if (s != null) { if (index >= 0) {
return s.removeBlock(b); DatanodeStorageInfo s = b.getStorageInfo(index);
if (s != null) {
return s.removeBlock(b);
}
} }
return false; return false;
} }

View File

@ -24,9 +24,11 @@ import java.util.Iterator;
import java.util.Set; import java.util.Set;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Daemon;
import org.junit.Assert; import org.junit.Assert;
@ -215,4 +217,23 @@ public class BlockManagerTestUtil {
public static void checkHeartbeat(BlockManager bm) { public static void checkHeartbeat(BlockManager bm) {
bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck(); bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
} }
public static DatanodeDescriptor getLocalDatanodeDescriptor(
boolean initializeStorage) {
DatanodeDescriptor dn = new DatanodeDescriptor(DFSTestUtil.getLocalDatanodeID());
if (initializeStorage) {
dn.updateStorage(new DatanodeStorage(DatanodeStorage.newStorageID()));
}
return dn;
}
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
String rackLocation, boolean initializeStorage) {
DatanodeDescriptor dn = DFSTestUtil.getDatanodeDescriptor(ipAddr,
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, rackLocation);
if (initializeStorage) {
dn.updateStorage(new DatanodeStorage(DatanodeStorage.newStorageID()));
}
return dn;
}
} }

View File

@ -25,6 +25,7 @@ import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Map.Entry; import java.util.Map.Entry;
@ -39,6 +40,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -80,17 +82,17 @@ public class TestBlockManager {
Mockito.doReturn(true).when(fsn).hasWriteLock(); Mockito.doReturn(true).when(fsn).hasWriteLock();
bm = new BlockManager(fsn, fsn, conf); bm = new BlockManager(fsn, fsn, conf);
nodes = ImmutableList.of( nodes = ImmutableList.of(
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"), BlockManagerTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA", true),
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"), BlockManagerTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA", true),
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"), BlockManagerTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA", true),
DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackB"), BlockManagerTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackB", true),
DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackB"), BlockManagerTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackB", true),
DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackB") BlockManagerTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackB", true)
); );
rackA = nodes.subList(0, 3); rackA = nodes.subList(0, 3);
rackB = nodes.subList(3, 6); rackB = nodes.subList(3, 6);
} }
private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd) { private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd) {
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology(); NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
// construct network topology // construct network topology
@ -282,6 +284,7 @@ public class TestBlockManager {
// the third off-rack replica. // the third off-rack replica.
DatanodeDescriptor rackCNode = DatanodeDescriptor rackCNode =
DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/rackC"); DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/rackC");
rackCNode.updateStorage(new DatanodeStorage(DatanodeStorage.newStorageID()));
addNodes(ImmutableList.of(rackCNode)); addNodes(ImmutableList.of(rackCNode));
try { try {
DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo); DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo);
@ -322,15 +325,15 @@ public class TestBlockManager {
@Test @Test
public void testBlocksAreNotUnderreplicatedInSingleRack() throws Exception { public void testBlocksAreNotUnderreplicatedInSingleRack() throws Exception {
List<DatanodeDescriptor> nodes = ImmutableList.of( List<DatanodeDescriptor> nodes = ImmutableList.of(
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"), BlockManagerTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA", true),
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"), BlockManagerTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA", true),
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"), BlockManagerTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA", true),
DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackA"), BlockManagerTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackA", true),
DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackA"), BlockManagerTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackA", true),
DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackA") BlockManagerTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackA", true)
); );
addNodes(nodes); addNodes(nodes);
List<DatanodeDescriptor> origNodes = nodes.subList(0, 3);; List<DatanodeDescriptor> origNodes = nodes.subList(0, 3);
for (int i = 0; i < NUM_TEST_ITERS; i++) { for (int i = 0; i < NUM_TEST_ITERS; i++) {
doTestSingleRackClusterIsSufficientlyReplicated(i, origNodes); doTestSingleRackClusterIsSufficientlyReplicated(i, origNodes);
} }
@ -353,7 +356,17 @@ public class TestBlockManager {
private void fulfillPipeline(BlockInfo blockInfo, private void fulfillPipeline(BlockInfo blockInfo,
DatanodeDescriptor[] pipeline) throws IOException { DatanodeDescriptor[] pipeline) throws IOException {
for (int i = 1; i < pipeline.length; i++) { for (int i = 1; i < pipeline.length; i++) {
bm.addBlock(pipeline[i], "STORAGE_ID", blockInfo, null); DatanodeDescriptor dn = pipeline[i];
Iterator<DatanodeStorageInfo> iterator = dn.getStorageInfos().iterator();
if (iterator.hasNext()) {
DatanodeStorageInfo storage = iterator.next();
bm.addBlock(dn, storage.getStorageID(), blockInfo, null);
blockInfo.addStorage(storage);
} else {
throw new RuntimeException("Storage info on node: " + dn.getHostName()
+ " is invalid.");
}
} }
} }
@ -494,7 +507,10 @@ public class TestBlockManager {
@Test @Test
public void testSafeModeIBR() throws Exception { public void testSafeModeIBR() throws Exception {
DatanodeDescriptor node = spy(nodes.get(0)); DatanodeDescriptor node = spy(nodes.get(0));
node.setStorageID("dummy-storage"); Iterator<DatanodeStorageInfo> i = node.getStorageInfos().iterator();
DatanodeStorageInfo ds = i.next();
node.setStorageID(ds.getStorageID());
node.isAlive = true; node.isAlive = true;
DatanodeRegistration nodeReg = DatanodeRegistration nodeReg =
@ -510,12 +526,15 @@ public class TestBlockManager {
assertTrue(node.isFirstBlockReport()); assertTrue(node.isFirstBlockReport());
// send block report, should be processed // send block report, should be processed
reset(node); reset(node);
bm.processReport(node, null, "pool", new BlockListAsLongs(null, null));
bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool",
new BlockListAsLongs(null, null));
verify(node).receivedBlockReport(); verify(node).receivedBlockReport();
assertFalse(node.isFirstBlockReport()); assertFalse(node.isFirstBlockReport());
// send block report again, should NOT be processed // send block report again, should NOT be processed
reset(node); reset(node);
bm.processReport(node, null, "pool", new BlockListAsLongs(null, null)); bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool",
new BlockListAsLongs(null, null));
verify(node, never()).receivedBlockReport(); verify(node, never()).receivedBlockReport();
assertFalse(node.isFirstBlockReport()); assertFalse(node.isFirstBlockReport());
@ -527,7 +546,8 @@ public class TestBlockManager {
assertTrue(node.isFirstBlockReport()); // ready for report again assertTrue(node.isFirstBlockReport()); // ready for report again
// send block report, should be processed after restart // send block report, should be processed after restart
reset(node); reset(node);
bm.processReport(node, null, "pool", new BlockListAsLongs(null, null)); bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool",
new BlockListAsLongs(null, null));
verify(node).receivedBlockReport(); verify(node).receivedBlockReport();
assertFalse(node.isFirstBlockReport()); assertFalse(node.isFirstBlockReport());
} }
@ -535,7 +555,9 @@ public class TestBlockManager {
@Test @Test
public void testSafeModeIBRAfterIncremental() throws Exception { public void testSafeModeIBRAfterIncremental() throws Exception {
DatanodeDescriptor node = spy(nodes.get(0)); DatanodeDescriptor node = spy(nodes.get(0));
node.setStorageID("dummy-storage"); Iterator<DatanodeStorageInfo> i = node.getStorageInfos().iterator();
DatanodeStorageInfo ds = i.next();
node.setStorageID(ds.getStorageID());
node.isAlive = true; node.isAlive = true;
DatanodeRegistration nodeReg = DatanodeRegistration nodeReg =
@ -552,7 +574,8 @@ public class TestBlockManager {
// send block report while pretending to already have blocks // send block report while pretending to already have blocks
reset(node); reset(node);
doReturn(1).when(node).numBlocks(); doReturn(1).when(node).numBlocks();
bm.processReport(node, null, "pool", new BlockListAsLongs(null, null)); bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool",
new BlockListAsLongs(null, null));
verify(node).receivedBlockReport(); verify(node).receivedBlockReport();
assertFalse(node.isFirstBlockReport()); assertFalse(node.isFirstBlockReport());
} }

View File

@ -22,6 +22,7 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Iterator;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
@ -55,11 +56,13 @@ public class TestDatanodeDescriptor {
@Test @Test
public void testBlocksCounter() throws Exception { public void testBlocksCounter() throws Exception {
DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor(); DatanodeDescriptor dd = BlockManagerTestUtil.getLocalDatanodeDescriptor(true);
assertEquals(0, dd.numBlocks()); assertEquals(0, dd.numBlocks());
BlockInfo blk = new BlockInfo(new Block(1L), 1); BlockInfo blk = new BlockInfo(new Block(1L), 1);
BlockInfo blk1 = new BlockInfo(new Block(2L), 2); BlockInfo blk1 = new BlockInfo(new Block(2L), 2);
final String storageID = "STORAGE_ID"; Iterator<DatanodeStorageInfo> iterator = dd.getStorageInfos().iterator();
assertTrue(iterator.hasNext());
final String storageID = iterator.next().getStorageID();
// add first block // add first block
assertTrue(dd.addBlock(storageID, blk)); assertTrue(dd.addBlock(storageID, blk));
assertEquals(1, dd.numBlocks()); assertEquals(1, dd.numBlocks());