HDFS-3230. svn merge -c 1336815 from trunk

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1336867 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-05-10 20:08:41 +00:00
parent 8ae40b7085
commit 2dd260be1b
12 changed files with 179 additions and 143 deletions

View File

@ -515,6 +515,8 @@ Release 2.0.0 - UNRELEASED
HDFS-3395. NN doesn't start with HA+security enabled and HTTP address set to 0.0.0.0. (atm)
HDFS-3230. Cleanup DatanodeID creation in the tests. (eli)
BREAKDOWN OF HDFS-1623 SUBTASKS
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

View File

@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;

View File

@ -706,13 +706,23 @@ public class DFSTestUtil {
.join(nameservices));
}
public static DatanodeID getLocalDatanodeID() {
return new DatanodeID("127.0.0.1", "localhost", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
}
public static DatanodeID getLocalDatanodeID(int port) {
return new DatanodeID("127.0.0.1", "localhost", "", port, port, port);
}
public static DatanodeDescriptor getLocalDatanodeDescriptor() {
return new DatanodeDescriptor(
new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
return new DatanodeDescriptor(getLocalDatanodeID());
}
public static DatanodeInfo getLocalDatanodeInfo() {
return new DatanodeInfo(
new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
return new DatanodeInfo(getLocalDatanodeID());
}
public static DatanodeInfo getLocalDatanodeInfo(int port) {
return new DatanodeInfo(getLocalDatanodeID(port));
}
}

View File

@ -626,8 +626,7 @@ public class TestDFSClientRetries extends TestCase {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = new DatanodeID(
"localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);

View File

@ -62,7 +62,7 @@ public class TestReplaceDatanodeOnFailure {
final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
datanodes[0] = new DatanodeInfo[0];
for(int i = 0; i < infos.length; ) {
infos[i] = new DatanodeInfo(new DatanodeID("dn" + i, 100));
infos[i] = DFSTestUtil.getLocalDatanodeInfo(50020 + i);
i++;
datanodes[i] = new DatanodeInfo[i];
System.arraycopy(infos, 0, datanodes[i], 0, datanodes[i].length);

View File

@ -131,7 +131,7 @@ public class TestPBHelper {
@Test
public void testConvertDatanodeID() {
DatanodeID dn = new DatanodeID("node", "node", "sid", 1, 2, 3);
DatanodeID dn = DFSTestUtil.getLocalDatanodeID();
DatanodeIDProto dnProto = PBHelper.convert(dn);
DatanodeID dn2 = PBHelper.convert(dnProto);
compare(dn, dn2);
@ -280,10 +280,6 @@ public class TestPBHelper {
return new ExtendedBlock("bpid", blkid, 100, 2);
}
private DatanodeInfo getDNInfo() {
return new DatanodeInfo(new DatanodeID("node", "node", "sid", 0, 1, 2));
}
private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
assertEquals(dn1.getAdminState(), dn2.getAdminState());
assertEquals(dn1.getBlockPoolUsed(), dn2.getBlockPoolUsed());
@ -316,7 +312,9 @@ public class TestPBHelper {
@Test
public void testConvertRecoveringBlock() {
DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() };
DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
RecoveringBlock b = new RecoveringBlock(getExtendedBlock(), dnInfo, 3);
RecoveringBlockProto bProto = PBHelper.convert(b);
RecoveringBlock b1 = PBHelper.convert(bProto);
@ -330,7 +328,9 @@ public class TestPBHelper {
@Test
public void testConvertBlockRecoveryCommand() {
DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() };
DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
List<RecoveringBlock> blks = ImmutableList.of(
new RecoveringBlock(getExtendedBlock(1), dnInfo, 3),
@ -401,11 +401,14 @@ public class TestPBHelper {
@Test
public void testConvertLocatedBlock() {
DatanodeInfo [] dnInfos = new DatanodeInfo[3];
dnInfos[0] = new DatanodeInfo("host0", "host0", "0", 5000, 5001, 5002, 20000, 10001, 9999,
dnInfos[0] = new DatanodeInfo("127.0.0.1", "host1", "0",
5000, 5001, 5002, 20000, 10001, 9999,
59, 69, 32, "local", AdminStates.DECOMMISSION_INPROGRESS);
dnInfos[1] = new DatanodeInfo("host1", "host1", "1", 5000, 5001, 5002, 20000, 10001, 9999,
dnInfos[1] = new DatanodeInfo("127.0.0.1", "host2", "1",
5000, 5001, 5002, 20000, 10001, 9999,
59, 69, 32, "local", AdminStates.DECOMMISSIONED);
dnInfos[2] = new DatanodeInfo("host2", "host2", "2", 5000, 5001, 5002, 20000, 10001, 9999,
dnInfos[2] = new DatanodeInfo("127.0.0.1", "host3", "2",
5000, 5001, 5002, 20000, 10001, 9999,
59, 69, 32, "local", AdminStates.NORMAL);
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
@ -424,7 +427,7 @@ public class TestPBHelper {
@Test
public void testConvertDatanodeRegistration() {
DatanodeID dnId = new DatanodeID("host", "host", "xyz", 0, 1, 0);
DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
getBlockKey(1), keys);

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -279,8 +280,7 @@ public class TestBlockToken {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = new DatanodeID("localhost",
"localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);

View File

@ -47,16 +47,9 @@ import com.google.common.collect.LinkedListMultimap;
import com.google.common.collect.Lists;
public class TestBlockManager {
private final List<DatanodeDescriptor> nodes = ImmutableList.of(
new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"),
new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"),
new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"),
new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackB"),
new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackB"),
new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackB")
);
private final List<DatanodeDescriptor> rackA = nodes.subList(0, 3);
private final List<DatanodeDescriptor> rackB = nodes.subList(3, 6);
private List<DatanodeDescriptor> nodes;
private List<DatanodeDescriptor> rackA;
private List<DatanodeDescriptor> rackB;
/**
* Some of these tests exercise code which has some randomness involved -
@ -82,6 +75,16 @@ public class TestBlockManager {
fsn = Mockito.mock(FSNamesystem.class);
Mockito.doReturn(true).when(fsn).hasWriteLock();
bm = new BlockManager(fsn, fsn, conf);
nodes = ImmutableList.of(
new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/rackA"),
new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/rackA"),
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/rackA"),
new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/rackB"),
new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/rackB"),
new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/rackB")
);
rackA = nodes.subList(0, 3);
rackB = nodes.subList(3, 6);
}
private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd) {
@ -116,7 +119,7 @@ public class TestBlockManager {
}
private void doBasicTest(int testIndex) {
List<DatanodeDescriptor> origNodes = nodes(0, 1);
List<DatanodeDescriptor> origNodes = getNodes(0, 1);
BlockInfo blockInfo = addBlockOnNodes((long)testIndex, origNodes);
DatanodeDescriptor[] pipeline = scheduleSingleReplication(blockInfo);
@ -147,7 +150,7 @@ public class TestBlockManager {
private void doTestTwoOfThreeNodesDecommissioned(int testIndex) throws Exception {
// Block originally on A1, A2, B1
List<DatanodeDescriptor> origNodes = nodes(0, 1, 3);
List<DatanodeDescriptor> origNodes = getNodes(0, 1, 3);
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
// Decommission two of the nodes (A1, A2)
@ -157,7 +160,7 @@ public class TestBlockManager {
assertTrue("Source of replication should be one of the nodes the block " +
"was on. Was: " + pipeline[0],
origNodes.contains(pipeline[0]));
assertEquals("Should have two targets", 3, pipeline.length);
assertEquals("Should have three targets", 3, pipeline.length);
boolean foundOneOnRackA = false;
for (int i = 1; i < pipeline.length; i++) {
@ -190,7 +193,7 @@ public class TestBlockManager {
private void doTestAllNodesHoldingReplicasDecommissioned(int testIndex) throws Exception {
// Block originally on A1, A2, B1
List<DatanodeDescriptor> origNodes = nodes(0, 1, 3);
List<DatanodeDescriptor> origNodes = getNodes(0, 1, 3);
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
// Decommission all of the nodes
@ -242,7 +245,7 @@ public class TestBlockManager {
private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception {
// Block originally on A1, A2, B1
List<DatanodeDescriptor> origNodes = nodes(0, 1, 3);
List<DatanodeDescriptor> origNodes = getNodes(0, 1, 3);
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
// Decommission all of the nodes in rack A
@ -252,7 +255,7 @@ public class TestBlockManager {
assertTrue("Source of replication should be one of the nodes the block " +
"was on. Was: " + pipeline[0],
origNodes.contains(pipeline[0]));
assertEquals("Should have 2 targets", 3, pipeline.length);
assertEquals("Should have three targets", 3, pipeline.length);
boolean foundOneOnRackB = false;
for (int i = 1; i < pipeline.length; i++) {
@ -273,7 +276,8 @@ public class TestBlockManager {
// the block is still under-replicated. Add a new node. This should allow
// the third off-rack replica.
DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7", 100), "/rackC");
DatanodeDescriptor rackCNode =
new DatanodeDescriptor(new DatanodeID("7.7.7.7", 100), "/rackC");
addNodes(ImmutableList.of(rackCNode));
try {
DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo);
@ -359,7 +363,7 @@ public class TestBlockManager {
return blockInfo;
}
private List<DatanodeDescriptor> nodes(int ... indexes) {
private List<DatanodeDescriptor> getNodes(int ... indexes) {
List<DatanodeDescriptor> ret = Lists.newArrayList();
for (int idx : indexes) {
ret.add(nodes.get(idx));
@ -368,7 +372,7 @@ public class TestBlockManager {
}
private List<DatanodeDescriptor> startDecommission(int ... indexes) {
List<DatanodeDescriptor> nodes = nodes(indexes);
List<DatanodeDescriptor> nodes = getNodes(indexes);
for (DatanodeDescriptor node : nodes) {
node.startDecommission();
}
@ -404,8 +408,9 @@ public class TestBlockManager {
LinkedListMultimap<DatanodeDescriptor, BlockTargetPair> repls = getAllPendingReplications();
assertEquals(1, repls.size());
Entry<DatanodeDescriptor, BlockTargetPair> repl = repls.entries()
.iterator().next();
Entry<DatanodeDescriptor, BlockTargetPair> repl =
repls.entries().iterator().next();
DatanodeDescriptor[] targets = repl.getValue().targets;
DatanodeDescriptor[] pipeline = new DatanodeDescriptor[1 + targets.length];

View File

@ -18,73 +18,75 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestHost2NodesMap {
private Host2NodesMap map = new Host2NodesMap();
private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
new DatanodeDescriptor(new DatanodeID("ip1", "h1", "", 5020, -1, -1), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("ip2", "h1", "", 5020, -1, -1), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5020, -1, -1), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5030, -1, -1), "/d1/r2"),
};
private final DatanodeDescriptor NULL_NODE = null;
private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3", 5040),
"/d1/r4");
private DatanodeDescriptor dataNodes[];
@Before
public void setup() {
for(DatanodeDescriptor node:dataNodes) {
dataNodes = new DatanodeDescriptor[] {
new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5030), "/d1/r2"),
};
for (DatanodeDescriptor node : dataNodes) {
map.add(node);
}
map.add(NULL_NODE);
map.add(null);
}
@Test
public void testContains() throws Exception {
for(int i=0; i<dataNodes.length; i++) {
DatanodeDescriptor nodeNotInMap =
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5040), "/d1/r4");
for (int i = 0; i < dataNodes.length; i++) {
assertTrue(map.contains(dataNodes[i]));
}
assertFalse(map.contains(NULL_NODE));
assertFalse(map.contains(NODE));
assertFalse(map.contains(null));
assertFalse(map.contains(nodeNotInMap));
}
@Test
public void testGetDatanodeByHost() throws Exception {
assertTrue(map.getDatanodeByHost("ip1")==dataNodes[0]);
assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
DatanodeDescriptor node = map.getDatanodeByHost("ip3");
assertTrue(node==dataNodes[2] || node==dataNodes[3]);
assertTrue(null==map.getDatanodeByHost("ip4"));
assertEquals(map.getDatanodeByHost("1.1.1.1"), dataNodes[0]);
assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
DatanodeDescriptor node = map.getDatanodeByHost("3.3.3.3");
assertTrue(node == dataNodes[2] || node == dataNodes[3]);
assertNull(map.getDatanodeByHost("4.4.4.4"));
}
@Test
public void testRemove() throws Exception {
assertFalse(map.remove(NODE));
DatanodeDescriptor nodeNotInMap =
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5040), "/d1/r4");
assertFalse(map.remove(nodeNotInMap));
assertTrue(map.remove(dataNodes[0]));
assertTrue(map.getDatanodeByHost("ip1")==null);
assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
DatanodeDescriptor node = map.getDatanodeByHost("ip3");
assertTrue(map.getDatanodeByHost("1.1.1.1.")==null);
assertTrue(map.getDatanodeByHost("2.2.2.2")==dataNodes[1]);
DatanodeDescriptor node = map.getDatanodeByHost("3.3.3.3");
assertTrue(node==dataNodes[2] || node==dataNodes[3]);
assertTrue(null==map.getDatanodeByHost("ip4"));
assertNull(map.getDatanodeByHost("4.4.4.4"));
assertTrue(map.remove(dataNodes[2]));
assertTrue(map.getDatanodeByHost("ip1")==null);
assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
assertTrue(map.getDatanodeByHost("ip3")==dataNodes[3]);
assertNull(map.getDatanodeByHost("1.1.1.1"));
assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
assertEquals(map.getDatanodeByHost("3.3.3.3"), dataNodes[3]);
assertTrue(map.remove(dataNodes[3]));
assertTrue(map.getDatanodeByHost("ip1")==null);
assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
assertTrue(map.getDatanodeByHost("ip3")==null);
assertNull(map.getDatanodeByHost("1.1.1.1"));
assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
assertNull(map.getDatanodeByHost("3.3.3.3"));
assertFalse(map.remove(NULL_NODE));
assertFalse(map.remove(null));
assertTrue(map.remove(dataNodes[1]));
assertFalse(map.remove(dataNodes[1]));
}

View File

@ -21,6 +21,7 @@ import static org.junit.Assert.*;
import java.util.Queue;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo;
@ -39,11 +40,9 @@ public class TestPendingDataNodeMessages {
new Block(1, 0, 2);
private final Block block2Gs1 = new Block(2, 0, 1);
private final DatanodeDescriptor fakeDN = new DatanodeDescriptor(
new DatanodeID("fake", 100));
@Test
public void testQueues() {
DatanodeDescriptor fakeDN = DFSTestUtil.getLocalDatanodeDescriptor();
msgs.enqueueReportedBlock(fakeDN, block1Gs1, ReplicaState.FINALIZED);
msgs.enqueueReportedBlock(fakeDN, block1Gs2, ReplicaState.FINALIZED);
@ -56,8 +55,8 @@ public class TestPendingDataNodeMessages {
Queue<ReportedBlockInfo> q =
msgs.takeBlockQueue(block1Gs2DifferentInstance);
assertEquals(
"ReportedBlockInfo [block=blk_1_1, dn=fake:100, reportedState=FINALIZED]," +
"ReportedBlockInfo [block=blk_1_2, dn=fake:100, reportedState=FINALIZED]",
"ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010, reportedState=FINALIZED]," +
"ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010, reportedState=FINALIZED]",
Joiner.on(",").join(q));
assertEquals(0, msgs.count());

View File

@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.*;
import java.io.IOException;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
@ -39,48 +39,49 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestReplicationPolicy {
private Random random= DFSUtil.getRandom();
private Random random = DFSUtil.getRandom();
private static final int BLOCK_SIZE = 1024;
private static final int NUM_OF_DATANODES = 6;
private static final Configuration CONF = new HdfsConfiguration();
private static final NetworkTopology cluster;
private static final NameNode namenode;
private static final BlockPlacementPolicy replicator;
private static NetworkTopology cluster;
private static NameNode namenode;
private static BlockPlacementPolicy replicator;
private static final String filename = "/dummyfile.txt";
private static final DatanodeDescriptor dataNodes[] =
new DatanodeDescriptor[] {
new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d2/r3"),
new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3")
private static DatanodeDescriptor dataNodes[];
@BeforeClass
public static void setupCluster() throws Exception {
Configuration conf = new HdfsConfiguration();
dataNodes = new DatanodeDescriptor[] {
new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/d2/r3"),
new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/d2/r3")
};
private final static DatanodeDescriptor NODE =
new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r4");
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = new File(System.getProperty(
"test.build.data", "build/test/data"), "dfs/");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
DFSTestUtil.formatNameNode(conf);
namenode = new NameNode(conf);
static {
try {
FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
DFSTestUtil.formatNameNode(CONF);
namenode = new NameNode(CONF);
} catch (IOException e) {
e.printStackTrace();
throw (RuntimeException)new RuntimeException().initCause(e);
}
final BlockManager bm = namenode.getNamesystem().getBlockManager();
replicator = bm.getBlockPlacementPolicy();
cluster = bm.getDatanodeManager().getNetworkTopology();
// construct network topology
for(int i=0; i<NUM_OF_DATANODES; i++) {
for (int i=0; i < NUM_OF_DATANODES; i++) {
cluster.add(dataNodes[i]);
}
for(int i=0; i<NUM_OF_DATANODES; i++) {
for (int i=0; i < NUM_OF_DATANODES; i++) {
dataNodes[i].updateHeartbeat(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
@ -337,22 +338,25 @@ public class TestReplicationPolicy {
*/
@Test
public void testChooseTarget5() throws Exception {
DatanodeDescriptor writerDesc =
new DatanodeDescriptor(new DatanodeID("7.7.7.7", 5020), "/d2/r4");
DatanodeDescriptor[] targets;
targets = replicator.chooseTarget(filename,
0, NODE, BLOCK_SIZE);
0, writerDesc, BLOCK_SIZE);
assertEquals(targets.length, 0);
targets = replicator.chooseTarget(filename,
1, NODE, BLOCK_SIZE);
1, writerDesc, BLOCK_SIZE);
assertEquals(targets.length, 1);
targets = replicator.chooseTarget(filename,
2, NODE, BLOCK_SIZE);
2, writerDesc, BLOCK_SIZE);
assertEquals(targets.length, 2);
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
targets = replicator.chooseTarget(filename,
3, NODE, BLOCK_SIZE);
3, writerDesc, BLOCK_SIZE);
assertEquals(targets.length, 3);
assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));

View File

@ -18,52 +18,59 @@
package org.apache.hadoop.net;
import java.util.HashMap;
import java.util.Map;
import junit.framework.TestCase;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
public class TestNetworkTopology extends TestCase {
private final static NetworkTopology cluster = new NetworkTopology();
private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3"),
new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r3")
};
private final static DatanodeDescriptor NODE =
new DatanodeDescriptor(new DatanodeID("h8", 5020), "/d2/r4");
import org.junit.Test;
import org.junit.Before;
static {
for(int i=0; i<dataNodes.length; i++) {
import static org.junit.Assert.*;
public class TestNetworkTopology {
private final static NetworkTopology cluster = new NetworkTopology();
private DatanodeDescriptor dataNodes[];
@Before
public void setupDatanodes() {
dataNodes = new DatanodeDescriptor[] {
new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/d2/r3"),
new DatanodeDescriptor(new DatanodeID("7.7.7.7", 5020), "/d2/r3")
};
for (int i = 0; i < dataNodes.length; i++) {
cluster.add(dataNodes[i]);
}
}
@Test
public void testContains() throws Exception {
for(int i=0; i<dataNodes.length; i++) {
DatanodeDescriptor nodeNotInMap =
new DatanodeDescriptor(new DatanodeID("8.8.8.8", 5020), "/d2/r4");
for (int i=0; i < dataNodes.length; i++) {
assertTrue(cluster.contains(dataNodes[i]));
}
assertFalse(cluster.contains(NODE));
assertFalse(cluster.contains(nodeNotInMap));
}
@Test
public void testNumOfChildren() throws Exception {
assertEquals(cluster.getNumOfLeaves(), dataNodes.length);
}
@Test
public void testCreateInvalidTopology() throws Exception {
NetworkTopology invalCluster = new NetworkTopology();
DatanodeDescriptor invalDataNodes[] = new DatanodeDescriptor[] {
new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1")
new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1")
};
invalCluster.add(invalDataNodes[0]);
invalCluster.add(invalDataNodes[1]);
@ -77,6 +84,7 @@ public class TestNetworkTopology extends TestCase {
}
}
@Test
public void testRacks() throws Exception {
assertEquals(cluster.getNumOfRacks(), 3);
assertTrue(cluster.isOnSameRack(dataNodes[0], dataNodes[1]));
@ -87,6 +95,7 @@ public class TestNetworkTopology extends TestCase {
assertTrue(cluster.isOnSameRack(dataNodes[5], dataNodes[6]));
}
@Test
public void testGetDistance() throws Exception {
assertEquals(cluster.getDistance(dataNodes[0], dataNodes[0]), 0);
assertEquals(cluster.getDistance(dataNodes[0], dataNodes[1]), 2);
@ -94,6 +103,7 @@ public class TestNetworkTopology extends TestCase {
assertEquals(cluster.getDistance(dataNodes[0], dataNodes[6]), 6);
}
@Test
public void testPseudoSortByDistance() throws Exception {
DatanodeDescriptor[] testNodes = new DatanodeDescriptor[3];
@ -136,6 +146,7 @@ public class TestNetworkTopology extends TestCase {
assertTrue(testNodes[2] == dataNodes[3]);
}
@Test
public void testRemove() throws Exception {
for(int i=0; i<dataNodes.length; i++) {
cluster.remove(dataNodes[i]);
@ -173,6 +184,7 @@ public class TestNetworkTopology extends TestCase {
/**
* This test checks that chooseRandom works for an excluded node.
*/
@Test
public void testChooseRandomExcludedNode() {
String scope = "~" + NodeBase.getPath(dataNodes[0]);
Map<Node, Integer> frequency = pickNodesAtRandom(100, scope);
@ -186,6 +198,7 @@ public class TestNetworkTopology extends TestCase {
/**
* This test checks that chooseRandom works for an excluded rack.
*/
@Test
public void testChooseRandomExcludedRack() {
Map<Node, Integer> frequency = pickNodesAtRandom(100, "~" + "/d2");
// all the nodes on the second rack should be zero