svn merge -c 1176994 from trunk for HDFS-2346.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1177019 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2011-09-28 19:00:46 +00:00
parent 0d329d55e9
commit 2738759fb5
3 changed files with 29 additions and 17 deletions

View File

@ -1021,6 +1021,9 @@ Release 0.23.0 - Unreleased
HDFS-2323. start-dfs.sh script fails for tarball install (tomwhite)
HDFS-2346. TestHost2NodesMap & TestReplicasMap will fail depending upon
execution order of test methods (Laxman via atm)
BREAKDOWN OF HDFS-1073 SUBTASKS
HDFS-1521. Persist transaction ID on disk between NN restarts.

View File

@ -18,31 +18,34 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
import junit.framework.TestCase;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap;
import org.junit.Before;
import org.junit.Test;
public class TestHost2NodesMap extends TestCase {
static private Host2NodesMap map = new Host2NodesMap();
private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
public class TestHost2NodesMap {
private Host2NodesMap map = new Host2NodesMap();
private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
new DatanodeDescriptor(new DatanodeID("h3:5030"), "/d1/r2"),
};
private final static DatanodeDescriptor NULL_NODE = null;
private final static DatanodeDescriptor NODE =
new DatanodeDescriptor(new DatanodeID("h3:5040"), "/d1/r4");
private final DatanodeDescriptor NULL_NODE = null;
private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3:5040"),
"/d1/r4");
static {
@Before
public void setup() {
for(DatanodeDescriptor node:dataNodes) {
map.add(node);
}
map.add(NULL_NODE);
}
@Test
public void testContains() throws Exception {
for(int i=0; i<dataNodes.length; i++) {
assertTrue(map.contains(dataNodes[i]));
@ -51,6 +54,7 @@ public void testContains() throws Exception {
assertFalse(map.contains(NODE));
}
@Test
public void testGetDatanodeByHost() throws Exception {
assertTrue(map.getDatanodeByHost("h1")==dataNodes[0]);
assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
@ -59,6 +63,7 @@ public void testGetDatanodeByHost() throws Exception {
assertTrue(null==map.getDatanodeByHost("h4"));
}
@Test
public void testGetDatanodeByName() throws Exception {
assertTrue(map.getDatanodeByName("h1:5020")==dataNodes[0]);
assertTrue(map.getDatanodeByName("h1:5030")==null);
@ -71,6 +76,7 @@ public void testGetDatanodeByName() throws Exception {
assertTrue(map.getDatanodeByName(null)==null);
}
@Test
public void testRemove() throws Exception {
assertFalse(map.remove(NODE));

View File

@ -17,21 +17,24 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail;
import org.apache.hadoop.hdfs.protocol.Block;
import static org.junit.Assert.*;
import org.junit.BeforeClass;
import org.junit.Before;
import org.junit.Test;
/**
* Unit test for ReplicasMap class
*/
public class TestReplicasMap {
private static final ReplicasMap map = new ReplicasMap(TestReplicasMap.class);
private static final String bpid = "BP-TEST";
private static final Block block = new Block(1234, 1234, 1234);
private final ReplicasMap map = new ReplicasMap(TestReplicasMap.class);
private final String bpid = "BP-TEST";
private final Block block = new Block(1234, 1234, 1234);
@BeforeClass
public static void setup() {
@Before
public void setup() {
map.add(bpid, new FinalizedReplica(block, null, null));
}