HDFS-3401. svn merge -c 1336972 from trunk
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1336973 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
45bf0a3cac
commit
c9a9756a2c
|
@ -308,6 +308,10 @@ Release 2.0.0 - UNRELEASED
|
||||||
HDFS-3134. harden edit log loader against malformed or malicious input.
|
HDFS-3134. harden edit log loader against malformed or malicious input.
|
||||||
(Colin Patrick McCabe via eli)
|
(Colin Patrick McCabe via eli)
|
||||||
|
|
||||||
|
HDFS-3230. Cleanup DatanodeID creation in the tests. (eli)
|
||||||
|
|
||||||
|
HDFS-3401. Cleanup DatanodeDescriptor creation in the tests. (eli)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-2477. Optimize computing the diff between a block report and the
|
HDFS-2477. Optimize computing the diff between a block report and the
|
||||||
|
@ -521,8 +525,6 @@ Release 2.0.0 - UNRELEASED
|
||||||
|
|
||||||
HDFS-3395. NN doesn't start with HA+security enabled and HTTP address set to 0.0.0.0. (atm)
|
HDFS-3395. NN doesn't start with HA+security enabled and HTTP address set to 0.0.0.0. (atm)
|
||||||
|
|
||||||
HDFS-3230. Cleanup DatanodeID creation in the tests. (eli)
|
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-1623 SUBTASKS
|
BREAKDOWN OF HDFS-1623 SUBTASKS
|
||||||
|
|
||||||
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
|
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
|
||||||
|
|
|
@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
|
@ -706,12 +707,19 @@ public class DFSTestUtil {
|
||||||
.join(nameservices));
|
.join(nameservices));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static DatanodeID getDatanodeID(String ipAddr) {
|
||||||
|
return new DatanodeID(ipAddr, "localhost",
|
||||||
|
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
|
||||||
|
}
|
||||||
|
|
||||||
public static DatanodeID getLocalDatanodeID() {
|
public static DatanodeID getLocalDatanodeID() {
|
||||||
return new DatanodeID("127.0.0.1", "localhost", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
|
return new DatanodeID("127.0.0.1", "localhost",
|
||||||
|
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static DatanodeID getLocalDatanodeID(int port) {
|
public static DatanodeID getLocalDatanodeID(int port) {
|
||||||
return new DatanodeID("127.0.0.1", "localhost", "", port, port, port);
|
return new DatanodeID("127.0.0.1", "localhost", "",
|
||||||
|
port, port, port);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static DatanodeDescriptor getLocalDatanodeDescriptor() {
|
public static DatanodeDescriptor getLocalDatanodeDescriptor() {
|
||||||
|
@ -722,7 +730,36 @@ public class DFSTestUtil {
|
||||||
return new DatanodeInfo(getLocalDatanodeID());
|
return new DatanodeInfo(getLocalDatanodeID());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static DatanodeInfo getDatanodeInfo(String ipAddr) {
|
||||||
|
return new DatanodeInfo(getDatanodeID(ipAddr));
|
||||||
|
}
|
||||||
|
|
||||||
public static DatanodeInfo getLocalDatanodeInfo(int port) {
|
public static DatanodeInfo getLocalDatanodeInfo(int port) {
|
||||||
return new DatanodeInfo(getLocalDatanodeID(port));
|
return new DatanodeInfo(getLocalDatanodeID(port));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static DatanodeInfo getDatanodeInfo(String ipAddr,
|
||||||
|
String host, int port) {
|
||||||
|
return new DatanodeInfo(new DatanodeID(ipAddr, host, port));
|
||||||
|
}
|
||||||
|
|
||||||
|
public static DatanodeInfo getLocalDatanodeInfo(String ipAddr,
|
||||||
|
String hostname, AdminStates adminState) {
|
||||||
|
return new DatanodeInfo(ipAddr, hostname, "storage",
|
||||||
|
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT,
|
||||||
|
1, 2, 3, 4, 5, 6, "local", adminState);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
|
||||||
|
String rackLocation) {
|
||||||
|
return getDatanodeDescriptor(ipAddr, DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
|
||||||
|
rackLocation);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
|
||||||
|
int port, String rackLocation) {
|
||||||
|
return new DatanodeDescriptor(new DatanodeID(ipAddr, port), rackLocation);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -332,7 +332,7 @@ public class TestDFSClientRetries extends TestCase {
|
||||||
LocatedBlock badLocatedBlock = new LocatedBlock(
|
LocatedBlock badLocatedBlock = new LocatedBlock(
|
||||||
goodLocatedBlock.getBlock(),
|
goodLocatedBlock.getBlock(),
|
||||||
new DatanodeInfo[] {
|
new DatanodeInfo[] {
|
||||||
new DatanodeInfo(new DatanodeID("255.255.255.255", 234))
|
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
|
||||||
},
|
},
|
||||||
goodLocatedBlock.getStartOffset(),
|
goodLocatedBlock.getStartOffset(),
|
||||||
false);
|
false);
|
||||||
|
|
|
@ -121,8 +121,7 @@ public class TestGetBlocks extends TestCase {
|
||||||
getBlocksWithException(namenode, dataNodes[0], -1);
|
getBlocksWithException(namenode, dataNodes[0], -1);
|
||||||
|
|
||||||
// get blocks of size BlockSize from a non-existent datanode
|
// get blocks of size BlockSize from a non-existent datanode
|
||||||
DatanodeInfo info = DFSTestUtil.getLocalDatanodeInfo();
|
DatanodeInfo info = DFSTestUtil.getDatanodeInfo("1.2.3.4");
|
||||||
info.setIpAddr("1.2.3.4");
|
|
||||||
getBlocksWithException(namenode, info, 2);
|
getBlocksWithException(namenode, info, 2);
|
||||||
} finally {
|
} finally {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
|
|
|
@ -400,16 +400,11 @@ public class TestPBHelper {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testConvertLocatedBlock() {
|
public void testConvertLocatedBlock() {
|
||||||
DatanodeInfo [] dnInfos = new DatanodeInfo[3];
|
DatanodeInfo [] dnInfos = {
|
||||||
dnInfos[0] = new DatanodeInfo("127.0.0.1", "host1", "0",
|
DFSTestUtil.getLocalDatanodeInfo("1.1.1.1", "h1", AdminStates.DECOMMISSION_INPROGRESS),
|
||||||
5000, 5001, 5002, 20000, 10001, 9999,
|
DFSTestUtil.getLocalDatanodeInfo("2.2.2.2", "h2", AdminStates.DECOMMISSIONED),
|
||||||
59, 69, 32, "local", AdminStates.DECOMMISSION_INPROGRESS);
|
DFSTestUtil.getLocalDatanodeInfo("3.3.3.3", "h3", AdminStates.NORMAL)
|
||||||
dnInfos[1] = new DatanodeInfo("127.0.0.1", "host2", "1",
|
};
|
||||||
5000, 5001, 5002, 20000, 10001, 9999,
|
|
||||||
59, 69, 32, "local", AdminStates.DECOMMISSIONED);
|
|
||||||
dnInfos[2] = new DatanodeInfo("127.0.0.1", "host3", "2",
|
|
||||||
5000, 5001, 5002, 20000, 10001, 9999,
|
|
||||||
59, 69, 32, "local", AdminStates.NORMAL);
|
|
||||||
LocatedBlock lb = new LocatedBlock(
|
LocatedBlock lb = new LocatedBlock(
|
||||||
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
|
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
|
||||||
LocatedBlockProto lbProto = PBHelper.convert(lb);
|
LocatedBlockProto lbProto = PBHelper.convert(lb);
|
||||||
|
|
|
@ -26,6 +26,7 @@ import java.util.Map.Entry;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
|
@ -76,12 +77,12 @@ public class TestBlockManager {
|
||||||
Mockito.doReturn(true).when(fsn).hasWriteLock();
|
Mockito.doReturn(true).when(fsn).hasWriteLock();
|
||||||
bm = new BlockManager(fsn, fsn, conf);
|
bm = new BlockManager(fsn, fsn, conf);
|
||||||
nodes = ImmutableList.of(
|
nodes = ImmutableList.of(
|
||||||
new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/rackA"),
|
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"),
|
||||||
new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/rackA"),
|
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"),
|
||||||
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/rackA"),
|
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"),
|
||||||
new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/rackB"),
|
DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackB"),
|
||||||
new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/rackB"),
|
DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackB"),
|
||||||
new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/rackB")
|
DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackB")
|
||||||
);
|
);
|
||||||
rackA = nodes.subList(0, 3);
|
rackA = nodes.subList(0, 3);
|
||||||
rackB = nodes.subList(3, 6);
|
rackB = nodes.subList(3, 6);
|
||||||
|
@ -277,7 +278,7 @@ public class TestBlockManager {
|
||||||
// the block is still under-replicated. Add a new node. This should allow
|
// the block is still under-replicated. Add a new node. This should allow
|
||||||
// the third off-rack replica.
|
// the third off-rack replica.
|
||||||
DatanodeDescriptor rackCNode =
|
DatanodeDescriptor rackCNode =
|
||||||
new DatanodeDescriptor(new DatanodeID("7.7.7.7", 100), "/rackC");
|
DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/rackC");
|
||||||
addNodes(ImmutableList.of(rackCNode));
|
addNodes(ImmutableList.of(rackCNode));
|
||||||
try {
|
try {
|
||||||
DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo);
|
DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo);
|
||||||
|
@ -317,13 +318,13 @@ public class TestBlockManager {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testBlocksAreNotUnderreplicatedInSingleRack() throws Exception {
|
public void testBlocksAreNotUnderreplicatedInSingleRack() throws Exception {
|
||||||
List<DatanodeDescriptor> nodes = ImmutableList.of(
|
List<DatanodeDescriptor> nodes = ImmutableList.of(
|
||||||
new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"),
|
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"),
|
||||||
new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"),
|
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"),
|
||||||
new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"),
|
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"),
|
||||||
new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackA"),
|
DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackA"),
|
||||||
new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackA"),
|
DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackA"),
|
||||||
new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackA")
|
DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackA")
|
||||||
);
|
);
|
||||||
addNodes(nodes);
|
addNodes(nodes);
|
||||||
List<DatanodeDescriptor> origNodes = nodes.subList(0, 3);;
|
List<DatanodeDescriptor> origNodes = nodes.subList(0, 3);;
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -32,10 +32,10 @@ public class TestHost2NodesMap {
|
||||||
@Before
|
@Before
|
||||||
public void setup() {
|
public void setup() {
|
||||||
dataNodes = new DatanodeDescriptor[] {
|
dataNodes = new DatanodeDescriptor[] {
|
||||||
new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"),
|
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
|
||||||
new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"),
|
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
|
||||||
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"),
|
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
|
||||||
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5030), "/d1/r2"),
|
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", 5021, "/d1/r2"),
|
||||||
};
|
};
|
||||||
for (DatanodeDescriptor node : dataNodes) {
|
for (DatanodeDescriptor node : dataNodes) {
|
||||||
map.add(node);
|
map.add(node);
|
||||||
|
@ -46,7 +46,7 @@ public class TestHost2NodesMap {
|
||||||
@Test
|
@Test
|
||||||
public void testContains() throws Exception {
|
public void testContains() throws Exception {
|
||||||
DatanodeDescriptor nodeNotInMap =
|
DatanodeDescriptor nodeNotInMap =
|
||||||
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5040), "/d1/r4");
|
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
|
||||||
for (int i = 0; i < dataNodes.length; i++) {
|
for (int i = 0; i < dataNodes.length; i++) {
|
||||||
assertTrue(map.contains(dataNodes[i]));
|
assertTrue(map.contains(dataNodes[i]));
|
||||||
}
|
}
|
||||||
|
@ -66,7 +66,7 @@ public class TestHost2NodesMap {
|
||||||
@Test
|
@Test
|
||||||
public void testRemove() throws Exception {
|
public void testRemove() throws Exception {
|
||||||
DatanodeDescriptor nodeNotInMap =
|
DatanodeDescriptor nodeNotInMap =
|
||||||
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5040), "/d1/r4");
|
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
|
||||||
assertFalse(map.remove(nodeNotInMap));
|
assertFalse(map.remove(nodeNotInMap));
|
||||||
|
|
||||||
assertTrue(map.remove(dataNodes[0]));
|
assertTrue(map.remove(dataNodes[0]));
|
||||||
|
|
|
@ -56,12 +56,12 @@ public class TestReplicationPolicy {
|
||||||
public static void setupCluster() throws Exception {
|
public static void setupCluster() throws Exception {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
dataNodes = new DatanodeDescriptor[] {
|
dataNodes = new DatanodeDescriptor[] {
|
||||||
new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"),
|
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
|
||||||
new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"),
|
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
|
||||||
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"),
|
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
|
||||||
new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/d1/r2"),
|
DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
|
||||||
new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/d2/r3"),
|
DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d2/r3"),
|
||||||
new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/d2/r3")
|
DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3")
|
||||||
};
|
};
|
||||||
|
|
||||||
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
|
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
|
||||||
|
@ -329,6 +329,7 @@ public class TestReplicationPolicy {
|
||||||
HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
|
HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* In this testcase, client is is a node outside of file system.
|
* In this testcase, client is is a node outside of file system.
|
||||||
* So the 1st replica can be placed on any node.
|
* So the 1st replica can be placed on any node.
|
||||||
|
@ -338,8 +339,8 @@ public class TestReplicationPolicy {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testChooseTarget5() throws Exception {
|
public void testChooseTarget5() throws Exception {
|
||||||
DatanodeDescriptor writerDesc =
|
DatanodeDescriptor writerDesc =
|
||||||
new DatanodeDescriptor(new DatanodeID("7.7.7.7", 5020), "/d2/r4");
|
DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r4");
|
||||||
|
|
||||||
DatanodeDescriptor[] targets;
|
DatanodeDescriptor[] targets;
|
||||||
targets = replicator.chooseTarget(filename,
|
targets = replicator.chooseTarget(filename,
|
||||||
|
|
|
@ -115,7 +115,7 @@ public class TestBPOfferService {
|
||||||
0, HdfsConstants.LAYOUT_VERSION))
|
0, HdfsConstants.LAYOUT_VERSION))
|
||||||
.when(mock).versionRequest();
|
.when(mock).versionRequest();
|
||||||
|
|
||||||
Mockito.doReturn(new DatanodeRegistration("fake-node", 100))
|
Mockito.doReturn(new DatanodeRegistration("1.2.3.4", 100))
|
||||||
.when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
|
.when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
|
||||||
|
|
||||||
Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
|
Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
|
||||||
|
|
|
@ -47,6 +47,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
|
@ -197,9 +198,9 @@ public class TestBlockRecovery {
|
||||||
locs, RECOVERY_ID);
|
locs, RECOVERY_ID);
|
||||||
ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
|
ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
|
||||||
BlockRecord record1 = new BlockRecord(
|
BlockRecord record1 = new BlockRecord(
|
||||||
new DatanodeID("xx", "yy", "zz", 1, 2, 3), dn1, replica1);
|
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
|
||||||
BlockRecord record2 = new BlockRecord(
|
BlockRecord record2 = new BlockRecord(
|
||||||
new DatanodeID("aa", "bb", "cc", 1, 2, 3), dn2, replica2);
|
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
|
||||||
syncList.add(record1);
|
syncList.add(record1);
|
||||||
syncList.add(record2);
|
syncList.add(record2);
|
||||||
|
|
||||||
|
@ -401,8 +402,7 @@ public class TestBlockRecovery {
|
||||||
|
|
||||||
private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
|
private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
|
||||||
Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
|
Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
|
||||||
DatanodeInfo mockOtherDN = new DatanodeInfo(
|
DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo();
|
||||||
new DatanodeID("127.0.0.1", "localhost", "storage-1234", 0, 0, 0));
|
|
||||||
DatanodeInfo[] locs = new DatanodeInfo[] {
|
DatanodeInfo[] locs = new DatanodeInfo[] {
|
||||||
new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
|
new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
|
||||||
mockOtherDN };
|
mockOtherDN };
|
||||||
|
|
|
@ -357,8 +357,7 @@ public class TestInterDatanodeProtocol {
|
||||||
server.start();
|
server.start();
|
||||||
|
|
||||||
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||||
DatanodeID fakeDnId = new DatanodeID(
|
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
|
||||||
"localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
|
|
||||||
DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
|
DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
|
||||||
InterDatanodeProtocol proxy = null;
|
InterDatanodeProtocol proxy = null;
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.net;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
|
|
||||||
|
@ -36,13 +37,13 @@ public class TestNetworkTopology {
|
||||||
@Before
|
@Before
|
||||||
public void setupDatanodes() {
|
public void setupDatanodes() {
|
||||||
dataNodes = new DatanodeDescriptor[] {
|
dataNodes = new DatanodeDescriptor[] {
|
||||||
new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"),
|
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
|
||||||
new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"),
|
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
|
||||||
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"),
|
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
|
||||||
new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/d1/r2"),
|
DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
|
||||||
new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/d1/r2"),
|
DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d1/r2"),
|
||||||
new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/d2/r3"),
|
DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3"),
|
||||||
new DatanodeDescriptor(new DatanodeID("7.7.7.7", 5020), "/d2/r3")
|
DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3")
|
||||||
};
|
};
|
||||||
for (int i = 0; i < dataNodes.length; i++) {
|
for (int i = 0; i < dataNodes.length; i++) {
|
||||||
cluster.add(dataNodes[i]);
|
cluster.add(dataNodes[i]);
|
||||||
|
@ -52,7 +53,7 @@ public class TestNetworkTopology {
|
||||||
@Test
|
@Test
|
||||||
public void testContains() throws Exception {
|
public void testContains() throws Exception {
|
||||||
DatanodeDescriptor nodeNotInMap =
|
DatanodeDescriptor nodeNotInMap =
|
||||||
new DatanodeDescriptor(new DatanodeID("8.8.8.8", 5020), "/d2/r4");
|
DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r4");
|
||||||
for (int i=0; i < dataNodes.length; i++) {
|
for (int i=0; i < dataNodes.length; i++) {
|
||||||
assertTrue(cluster.contains(dataNodes[i]));
|
assertTrue(cluster.contains(dataNodes[i]));
|
||||||
}
|
}
|
||||||
|
@ -68,9 +69,9 @@ public class TestNetworkTopology {
|
||||||
public void testCreateInvalidTopology() throws Exception {
|
public void testCreateInvalidTopology() throws Exception {
|
||||||
NetworkTopology invalCluster = new NetworkTopology();
|
NetworkTopology invalCluster = new NetworkTopology();
|
||||||
DatanodeDescriptor invalDataNodes[] = new DatanodeDescriptor[] {
|
DatanodeDescriptor invalDataNodes[] = new DatanodeDescriptor[] {
|
||||||
new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"),
|
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
|
||||||
new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"),
|
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
|
||||||
new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1")
|
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1")
|
||||||
};
|
};
|
||||||
invalCluster.add(invalDataNodes[0]);
|
invalCluster.add(invalDataNodes[0]);
|
||||||
invalCluster.add(invalDataNodes[1]);
|
invalCluster.add(invalDataNodes[1]);
|
||||||
|
|
Loading…
Reference in New Issue