diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 558f4aa494c..1b6632e3d9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -515,6 +515,8 @@ Release 2.0.0 - UNRELEASED HDFS-3395. NN doesn't start with HA+security enabled and HTTP address set to 0.0.0.0. (atm) + HDFS-3230. Cleanup DatanodeID creation in the tests. (eli) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 9218078a482..912f362728c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 7c8a02e513e..eded4667dca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -706,13 +706,23 @@ public static void setFederatedConfiguration(MiniDFSCluster cluster, .join(nameservices)); } + public static DatanodeID getLocalDatanodeID() { + return new DatanodeID("127.0.0.1", "localhost", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT); + } + + public static DatanodeID getLocalDatanodeID(int port) { + return new DatanodeID("127.0.0.1", "localhost", "", port, port, port); + } + public static DatanodeDescriptor getLocalDatanodeDescriptor() { - return new DatanodeDescriptor( - new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT)); + return new DatanodeDescriptor(getLocalDatanodeID()); } public static DatanodeInfo getLocalDatanodeInfo() { - return new DatanodeInfo( - new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT)); + return new DatanodeInfo(getLocalDatanodeID()); + } + + public static DatanodeInfo getLocalDatanodeInfo(int port) { + return new DatanodeInfo(getLocalDatanodeID(port)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 8fe96a8005f..5624fed6e75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -626,8 +626,7 @@ public void testClientDNProtocolTimeout() throws IOException { server.start(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); - DatanodeID fakeDnId = new DatanodeID( - "localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort()); + DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L)); LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java index 9841dc8700e..9a7504a0508 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java @@ -62,7 +62,7 @@ public void testDefaultPolicy() throws Exception { final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][]; datanodes[0] = new DatanodeInfo[0]; for(int i = 0; i < infos.length; ) { - infos[i] = new DatanodeInfo(new DatanodeID("dn" + i, 100)); + infos[i] = DFSTestUtil.getLocalDatanodeInfo(50020 + i); i++; datanodes[i] = new DatanodeInfo[i]; System.arraycopy(infos, 0, datanodes[i], 0, datanodes[i].length); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index a6280d319aa..6a8d687a3aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -131,7 +131,7 @@ public void testConvertNamenodeRegistration() { @Test public void testConvertDatanodeID() { - DatanodeID dn = new DatanodeID("node", "node", "sid", 1, 2, 3); + DatanodeID dn = DFSTestUtil.getLocalDatanodeID(); DatanodeIDProto dnProto = PBHelper.convert(dn); DatanodeID dn2 = PBHelper.convert(dnProto); compare(dn, dn2); @@ -280,10 +280,6 @@ public ExtendedBlock getExtendedBlock(long blkid) { return new ExtendedBlock("bpid", blkid, 100, 2); } - private DatanodeInfo getDNInfo() { - return new DatanodeInfo(new DatanodeID("node", "node", "sid", 0, 1, 2)); - } - private void compare(DatanodeInfo dn1, DatanodeInfo dn2) { assertEquals(dn1.getAdminState(), dn2.getAdminState()); assertEquals(dn1.getBlockPoolUsed(), dn2.getBlockPoolUsed()); @@ -316,7 +312,9 @@ public void testConvertExtendedBlock() { @Test public void testConvertRecoveringBlock() { - DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() }; + DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo(); + DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo(); + DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 }; RecoveringBlock b = new RecoveringBlock(getExtendedBlock(), dnInfo, 3); RecoveringBlockProto bProto = PBHelper.convert(b); RecoveringBlock b1 = PBHelper.convert(bProto); @@ -330,7 +328,9 @@ public void testConvertRecoveringBlock() { @Test public void testConvertBlockRecoveryCommand() { - DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() }; + DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo(); + DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo(); + DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 }; List blks = ImmutableList.of( new RecoveringBlock(getExtendedBlock(1), dnInfo, 3), @@ -401,11 +401,14 @@ private void compare(Token expected, @Test public void testConvertLocatedBlock() { DatanodeInfo [] dnInfos = new DatanodeInfo[3]; - dnInfos[0] = new DatanodeInfo("host0", "host0", "0", 5000, 5001, 5002, 20000, 10001, 9999, + dnInfos[0] = new DatanodeInfo("127.0.0.1", "host1", "0", + 5000, 5001, 5002, 20000, 10001, 9999, 59, 69, 32, "local", AdminStates.DECOMMISSION_INPROGRESS); - dnInfos[1] = new DatanodeInfo("host1", "host1", "1", 5000, 5001, 5002, 20000, 10001, 9999, + dnInfos[1] = new DatanodeInfo("127.0.0.1", "host2", "1", + 5000, 5001, 5002, 20000, 10001, 9999, 59, 69, 32, "local", AdminStates.DECOMMISSIONED); - dnInfos[2] = new DatanodeInfo("host2", "host2", "2", 5000, 5001, 5002, 20000, 10001, 9999, + dnInfos[2] = new DatanodeInfo("127.0.0.1", "host3", "2", + 5000, 5001, 5002, 20000, 10001, 9999, 59, 69, 32, "local", AdminStates.NORMAL); LocatedBlock lb = new LocatedBlock( new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false); @@ -424,7 +427,7 @@ public void testConvertLocatedBlock() { @Test public void testConvertDatanodeRegistration() { - DatanodeID dnId = new DatanodeID("host", "host", "xyz", 0, 1, 0); + DatanodeID dnId = DFSTestUtil.getLocalDatanodeID(); BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) }; ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10, getBlockKey(1), keys); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java index ea335d26120..bf2c33815bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java @@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -279,8 +280,7 @@ public void testBlockTokenRpcLeak() throws Exception { server.start(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); - DatanodeID fakeDnId = new DatanodeID("localhost", - "localhost", "fake-storage", addr.getPort(), 0, addr.getPort()); + DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort()); ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L)); LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index a6e8c4f05b8..f13b2779fad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -47,17 +47,10 @@ import com.google.common.collect.Lists; public class TestBlockManager { - private final List nodes = ImmutableList.of( - new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"), - new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"), - new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"), - new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackB"), - new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackB"), - new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackB") - ); - private final List rackA = nodes.subList(0, 3); - private final List rackB = nodes.subList(3, 6); - + private List nodes; + private List rackA; + private List rackB; + /** * Some of these tests exercise code which has some randomness involved - * ie even if there's a bug, they may pass because the random node selection @@ -82,6 +75,16 @@ public void setupMockCluster() throws IOException { fsn = Mockito.mock(FSNamesystem.class); Mockito.doReturn(true).when(fsn).hasWriteLock(); bm = new BlockManager(fsn, fsn, conf); + nodes = ImmutableList.of( + new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/rackA"), + new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/rackA"), + new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/rackA"), + new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/rackB"), + new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/rackB"), + new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/rackB") + ); + rackA = nodes.subList(0, 3); + rackB = nodes.subList(3, 6); } private void addNodes(Iterable nodesToAdd) { @@ -116,7 +119,7 @@ public void testBasicReplication() throws Exception { } private void doBasicTest(int testIndex) { - List origNodes = nodes(0, 1); + List origNodes = getNodes(0, 1); BlockInfo blockInfo = addBlockOnNodes((long)testIndex, origNodes); DatanodeDescriptor[] pipeline = scheduleSingleReplication(blockInfo); @@ -147,7 +150,7 @@ public void testTwoOfThreeNodesDecommissioned() throws Exception { private void doTestTwoOfThreeNodesDecommissioned(int testIndex) throws Exception { // Block originally on A1, A2, B1 - List origNodes = nodes(0, 1, 3); + List origNodes = getNodes(0, 1, 3); BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes); // Decommission two of the nodes (A1, A2) @@ -157,7 +160,7 @@ private void doTestTwoOfThreeNodesDecommissioned(int testIndex) throws Exception assertTrue("Source of replication should be one of the nodes the block " + "was on. Was: " + pipeline[0], origNodes.contains(pipeline[0])); - assertEquals("Should have two targets", 3, pipeline.length); + assertEquals("Should have three targets", 3, pipeline.length); boolean foundOneOnRackA = false; for (int i = 1; i < pipeline.length; i++) { @@ -190,7 +193,7 @@ public void testAllNodesHoldingReplicasDecommissioned() throws Exception { private void doTestAllNodesHoldingReplicasDecommissioned(int testIndex) throws Exception { // Block originally on A1, A2, B1 - List origNodes = nodes(0, 1, 3); + List origNodes = getNodes(0, 1, 3); BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes); // Decommission all of the nodes @@ -242,7 +245,7 @@ public void testOneOfTwoRacksDecommissioned() throws Exception { private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception { // Block originally on A1, A2, B1 - List origNodes = nodes(0, 1, 3); + List origNodes = getNodes(0, 1, 3); BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes); // Decommission all of the nodes in rack A @@ -252,7 +255,7 @@ private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception { assertTrue("Source of replication should be one of the nodes the block " + "was on. Was: " + pipeline[0], origNodes.contains(pipeline[0])); - assertEquals("Should have 2 targets", 3, pipeline.length); + assertEquals("Should have three targets", 3, pipeline.length); boolean foundOneOnRackB = false; for (int i = 1; i < pipeline.length; i++) { @@ -273,7 +276,8 @@ private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception { // the block is still under-replicated. Add a new node. This should allow // the third off-rack replica. - DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7", 100), "/rackC"); + DatanodeDescriptor rackCNode = + new DatanodeDescriptor(new DatanodeID("7.7.7.7", 100), "/rackC"); addNodes(ImmutableList.of(rackCNode)); try { DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo); @@ -359,7 +363,7 @@ private BlockInfo blockOnNodes(long blkId, List nodes) { return blockInfo; } - private List nodes(int ... indexes) { + private List getNodes(int ... indexes) { List ret = Lists.newArrayList(); for (int idx : indexes) { ret.add(nodes.get(idx)); @@ -368,7 +372,7 @@ private List nodes(int ... indexes) { } private List startDecommission(int ... indexes) { - List nodes = nodes(indexes); + List nodes = getNodes(indexes); for (DatanodeDescriptor node : nodes) { node.startDecommission(); } @@ -404,8 +408,9 @@ private DatanodeDescriptor[] scheduleSingleReplication(Block block) { LinkedListMultimap repls = getAllPendingReplications(); assertEquals(1, repls.size()); - Entry repl = repls.entries() - .iterator().next(); + Entry repl = + repls.entries().iterator().next(); + DatanodeDescriptor[] targets = repl.getValue().targets; DatanodeDescriptor[] pipeline = new DatanodeDescriptor[1 + targets.length]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java index 08607093dbe..44fa7c24a5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java @@ -18,73 +18,75 @@ package org.apache.hadoop.hdfs.server.blockmanagement; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - import org.apache.hadoop.hdfs.protocol.DatanodeID; + import org.junit.Before; import org.junit.Test; +import static org.junit.Assert.*; + public class TestHost2NodesMap { private Host2NodesMap map = new Host2NodesMap(); - private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] { - new DatanodeDescriptor(new DatanodeID("ip1", "h1", "", 5020, -1, -1), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("ip2", "h1", "", 5020, -1, -1), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5020, -1, -1), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5030, -1, -1), "/d1/r2"), - }; - private final DatanodeDescriptor NULL_NODE = null; - private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3", 5040), - "/d1/r4"); - + private DatanodeDescriptor dataNodes[]; + @Before public void setup() { - for(DatanodeDescriptor node:dataNodes) { + dataNodes = new DatanodeDescriptor[] { + new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"), + new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"), + new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"), + new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5030), "/d1/r2"), + }; + for (DatanodeDescriptor node : dataNodes) { map.add(node); } - map.add(NULL_NODE); + map.add(null); } @Test public void testContains() throws Exception { - for(int i=0; i q = msgs.takeBlockQueue(block1Gs2DifferentInstance); assertEquals( - "ReportedBlockInfo [block=blk_1_1, dn=fake:100, reportedState=FINALIZED]," + - "ReportedBlockInfo [block=blk_1_2, dn=fake:100, reportedState=FINALIZED]", + "ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010, reportedState=FINALIZED]," + + "ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010, reportedState=FINALIZED]", Joiner.on(",").join(q)); assertEquals(0, msgs.count()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 49925ab885a..9ce24a7fae6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -19,7 +19,7 @@ import static org.junit.Assert.*; -import java.io.IOException; +import java.io.File; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -39,54 +39,55 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; +import org.junit.BeforeClass; import org.junit.Test; public class TestReplicationPolicy { - private Random random= DFSUtil.getRandom(); + private Random random = DFSUtil.getRandom(); private static final int BLOCK_SIZE = 1024; private static final int NUM_OF_DATANODES = 6; - private static final Configuration CONF = new HdfsConfiguration(); - private static final NetworkTopology cluster; - private static final NameNode namenode; - private static final BlockPlacementPolicy replicator; + private static NetworkTopology cluster; + private static NameNode namenode; + private static BlockPlacementPolicy replicator; private static final String filename = "/dummyfile.txt"; - private static final DatanodeDescriptor dataNodes[] = - new DatanodeDescriptor[] { - new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d2/r3"), - new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3") - }; - - private final static DatanodeDescriptor NODE = - new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r4"); - - static { - try { - FileSystem.setDefaultUri(CONF, "hdfs://localhost:0"); - CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); - DFSTestUtil.formatNameNode(CONF); - namenode = new NameNode(CONF); - } catch (IOException e) { - e.printStackTrace(); - throw (RuntimeException)new RuntimeException().initCause(e); - } + private static DatanodeDescriptor dataNodes[]; + + @BeforeClass + public static void setupCluster() throws Exception { + Configuration conf = new HdfsConfiguration(); + dataNodes = new DatanodeDescriptor[] { + new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"), + new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"), + new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"), + new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/d1/r2"), + new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/d2/r3"), + new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/d2/r3") + }; + + FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); + conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); + File baseDir = new File(System.getProperty( + "test.build.data", "build/test/data"), "dfs/"); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + new File(baseDir, "name").getPath()); + + DFSTestUtil.formatNameNode(conf); + namenode = new NameNode(conf); + final BlockManager bm = namenode.getNamesystem().getBlockManager(); replicator = bm.getBlockPlacementPolicy(); cluster = bm.getDatanodeManager().getNetworkTopology(); // construct network topology - for(int i=0; i pickNodesAtRandom(int numNodes, /** * This test checks that chooseRandom works for an excluded node. */ + @Test public void testChooseRandomExcludedNode() { String scope = "~" + NodeBase.getPath(dataNodes[0]); Map frequency = pickNodesAtRandom(100, scope); @@ -186,6 +198,7 @@ public void testChooseRandomExcludedNode() { /** * This test checks that chooseRandom works for an excluded rack. */ + @Test public void testChooseRandomExcludedRack() { Map frequency = pickNodesAtRandom(100, "~" + "/d2"); // all the nodes on the second rack should be zero