HDFS-6268. Better sorting in NetworkTopology#pseudoSortByDistance when no local node is found. (wang)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1599842 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
bb87b70e37
commit
2abf0c7bea
|
@ -147,6 +147,9 @@ Release 2.5.0 - UNRELEASED
|
||||||
HDFS-6109 let sync_file_range() system call run in background
|
HDFS-6109 let sync_file_range() system call run in background
|
||||||
(Liang Xie via stack)
|
(Liang Xie via stack)
|
||||||
|
|
||||||
|
HDFS-6268. Better sorting in NetworkTopology#pseudoSortByDistance when
|
||||||
|
no local node is found. (wang)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
|
HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
|
||||||
|
|
|
@ -351,7 +351,8 @@ public class DatanodeManager {
|
||||||
DFSUtil.DECOM_COMPARATOR;
|
DFSUtil.DECOM_COMPARATOR;
|
||||||
|
|
||||||
for (LocatedBlock b : locatedblocks) {
|
for (LocatedBlock b : locatedblocks) {
|
||||||
networktopology.pseudoSortByDistance(client, b.getLocations());
|
networktopology.sortByDistance(client, b.getLocations(), b
|
||||||
|
.getBlock().getBlockId());
|
||||||
// Move decommissioned/stale datanodes to the bottom
|
// Move decommissioned/stale datanodes to the bottom
|
||||||
Arrays.sort(b.getLocations(), comparator);
|
Arrays.sort(b.getLocations(), comparator);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1618,9 +1618,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
blockManager.getDatanodeManager().sortLocatedBlocks(
|
blockManager.getDatanodeManager().sortLocatedBlocks(
|
||||||
clientMachine, blocks.getLocatedBlocks());
|
clientMachine, blocks.getLocatedBlocks());
|
||||||
|
|
||||||
|
// lastBlock is not part of getLocatedBlocks(), might need to sort it too
|
||||||
LocatedBlock lastBlock = blocks.getLastLocatedBlock();
|
LocatedBlock lastBlock = blocks.getLastLocatedBlock();
|
||||||
if (lastBlock != null) {
|
if (lastBlock != null) {
|
||||||
ArrayList<LocatedBlock> lastBlockList = new ArrayList<LocatedBlock>();
|
ArrayList<LocatedBlock> lastBlockList =
|
||||||
|
Lists.newArrayListWithCapacity(1);
|
||||||
lastBlockList.add(lastBlock);
|
lastBlockList.add(lastBlock);
|
||||||
blockManager.getDatanodeManager().sortLocatedBlocks(
|
blockManager.getDatanodeManager().sortLocatedBlocks(
|
||||||
clientMachine, lastBlockList);
|
clientMachine, lastBlockList);
|
||||||
|
|
|
@ -169,6 +169,9 @@ public class TestGetBlocks {
|
||||||
if (stm != null) {
|
if (stm != null) {
|
||||||
stm.close();
|
stm.close();
|
||||||
}
|
}
|
||||||
|
if (client != null) {
|
||||||
|
client.close();
|
||||||
|
}
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -143,10 +143,10 @@ public class TestSnapshotFileLength {
|
||||||
|
|
||||||
// Make sure we can read the entire file via its non-snapshot path.
|
// Make sure we can read the entire file via its non-snapshot path.
|
||||||
fileStatus = hdfs.getFileStatus(file1);
|
fileStatus = hdfs.getFileStatus(file1);
|
||||||
assertEquals(fileStatus.getLen(), BLOCKSIZE * 2);
|
assertEquals("Unexpected file length", BLOCKSIZE * 2, fileStatus.getLen());
|
||||||
fis = hdfs.open(file1);
|
fis = hdfs.open(file1);
|
||||||
bytesRead = fis.read(buffer, 0, buffer.length);
|
bytesRead = fis.read(buffer, 0, buffer.length);
|
||||||
assertEquals(bytesRead, BLOCKSIZE * 2);
|
assertEquals("Unexpected # bytes read", BLOCKSIZE * 2, bytesRead);
|
||||||
fis.close();
|
fis.close();
|
||||||
|
|
||||||
Path file1snap1 =
|
Path file1snap1 =
|
||||||
|
@ -156,21 +156,23 @@ public class TestSnapshotFileLength {
|
||||||
assertEquals(fileStatus.getLen(), BLOCKSIZE);
|
assertEquals(fileStatus.getLen(), BLOCKSIZE);
|
||||||
// Make sure we can only read up to the snapshot length.
|
// Make sure we can only read up to the snapshot length.
|
||||||
bytesRead = fis.read(buffer, 0, buffer.length);
|
bytesRead = fis.read(buffer, 0, buffer.length);
|
||||||
assertEquals(bytesRead, BLOCKSIZE);
|
assertEquals("Unexpected # bytes read", BLOCKSIZE, bytesRead);
|
||||||
fis.close();
|
fis.close();
|
||||||
|
|
||||||
PrintStream psBackup = System.out;
|
PrintStream outBackup = System.out;
|
||||||
|
PrintStream errBackup = System.err;
|
||||||
ByteArrayOutputStream bao = new ByteArrayOutputStream();
|
ByteArrayOutputStream bao = new ByteArrayOutputStream();
|
||||||
System.setOut(new PrintStream(bao));
|
System.setOut(new PrintStream(bao));
|
||||||
System.setErr(new PrintStream(bao));
|
System.setErr(new PrintStream(bao));
|
||||||
// Make sure we can cat the file upto to snapshot length
|
// Make sure we can cat the file upto to snapshot length
|
||||||
FsShell shell = new FsShell();
|
FsShell shell = new FsShell();
|
||||||
try{
|
try {
|
||||||
ToolRunner.run(conf, shell, new String[] { "-cat",
|
ToolRunner.run(conf, shell, new String[] { "-cat",
|
||||||
"/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1" });
|
"/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1" });
|
||||||
assertEquals(bao.size(), BLOCKSIZE);
|
assertEquals("Unexpected # bytes from -cat", BLOCKSIZE, bao.size());
|
||||||
}finally{
|
} finally {
|
||||||
System.setOut(psBackup);
|
System.setOut(outBackup);
|
||||||
|
System.setErr(errBackup);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,7 +86,7 @@ public class TestHdfsNetworkTopologyWithNodeGroup extends TestCase {
|
||||||
assertEquals(cluster.getDistance(dataNodes[0], dataNodes[6]), 8);
|
assertEquals(cluster.getDistance(dataNodes[0], dataNodes[6]), 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testPseudoSortByDistance() throws Exception {
|
public void testSortByDistance() throws Exception {
|
||||||
DatanodeDescriptor[] testNodes = new DatanodeDescriptor[4];
|
DatanodeDescriptor[] testNodes = new DatanodeDescriptor[4];
|
||||||
|
|
||||||
// array contains both local node, local node group & local rack node
|
// array contains both local node, local node group & local rack node
|
||||||
|
@ -94,7 +94,7 @@ public class TestHdfsNetworkTopologyWithNodeGroup extends TestCase {
|
||||||
testNodes[1] = dataNodes[2];
|
testNodes[1] = dataNodes[2];
|
||||||
testNodes[2] = dataNodes[3];
|
testNodes[2] = dataNodes[3];
|
||||||
testNodes[3] = dataNodes[0];
|
testNodes[3] = dataNodes[0];
|
||||||
cluster.pseudoSortByDistance(dataNodes[0], testNodes );
|
cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
|
||||||
assertTrue(testNodes[0] == dataNodes[0]);
|
assertTrue(testNodes[0] == dataNodes[0]);
|
||||||
assertTrue(testNodes[1] == dataNodes[1]);
|
assertTrue(testNodes[1] == dataNodes[1]);
|
||||||
assertTrue(testNodes[2] == dataNodes[2]);
|
assertTrue(testNodes[2] == dataNodes[2]);
|
||||||
|
@ -105,7 +105,7 @@ public class TestHdfsNetworkTopologyWithNodeGroup extends TestCase {
|
||||||
testNodes[1] = dataNodes[4];
|
testNodes[1] = dataNodes[4];
|
||||||
testNodes[2] = dataNodes[1];
|
testNodes[2] = dataNodes[1];
|
||||||
testNodes[3] = dataNodes[0];
|
testNodes[3] = dataNodes[0];
|
||||||
cluster.pseudoSortByDistance(dataNodes[0], testNodes );
|
cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
|
||||||
assertTrue(testNodes[0] == dataNodes[0]);
|
assertTrue(testNodes[0] == dataNodes[0]);
|
||||||
assertTrue(testNodes[1] == dataNodes[1]);
|
assertTrue(testNodes[1] == dataNodes[1]);
|
||||||
|
|
||||||
|
@ -114,7 +114,7 @@ public class TestHdfsNetworkTopologyWithNodeGroup extends TestCase {
|
||||||
testNodes[1] = dataNodes[3];
|
testNodes[1] = dataNodes[3];
|
||||||
testNodes[2] = dataNodes[2];
|
testNodes[2] = dataNodes[2];
|
||||||
testNodes[3] = dataNodes[0];
|
testNodes[3] = dataNodes[0];
|
||||||
cluster.pseudoSortByDistance(dataNodes[0], testNodes );
|
cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
|
||||||
assertTrue(testNodes[0] == dataNodes[0]);
|
assertTrue(testNodes[0] == dataNodes[0]);
|
||||||
assertTrue(testNodes[1] == dataNodes[2]);
|
assertTrue(testNodes[1] == dataNodes[2]);
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ public class TestHdfsNetworkTopologyWithNodeGroup extends TestCase {
|
||||||
testNodes[1] = dataNodes[7];
|
testNodes[1] = dataNodes[7];
|
||||||
testNodes[2] = dataNodes[2];
|
testNodes[2] = dataNodes[2];
|
||||||
testNodes[3] = dataNodes[0];
|
testNodes[3] = dataNodes[0];
|
||||||
cluster.pseudoSortByDistance(computeNode, testNodes );
|
cluster.sortByDistance(computeNode, testNodes, 0xDEADBEEF);
|
||||||
assertTrue(testNodes[0] == dataNodes[0]);
|
assertTrue(testNodes[0] == dataNodes[0]);
|
||||||
assertTrue(testNodes[1] == dataNodes[2]);
|
assertTrue(testNodes[1] == dataNodes[2]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,7 +54,8 @@ public class TestNetworkTopology {
|
||||||
DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
|
DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
|
||||||
DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d1/r2"),
|
DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d1/r2"),
|
||||||
DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3"),
|
DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3"),
|
||||||
DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3")
|
DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3"),
|
||||||
|
DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r3")
|
||||||
};
|
};
|
||||||
for (int i = 0; i < dataNodes.length; i++) {
|
for (int i = 0; i < dataNodes.length; i++) {
|
||||||
cluster.add(dataNodes[i]);
|
cluster.add(dataNodes[i]);
|
||||||
|
@ -117,14 +118,14 @@ public class TestNetworkTopology {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testPseudoSortByDistance() throws Exception {
|
public void testSortByDistance() throws Exception {
|
||||||
DatanodeDescriptor[] testNodes = new DatanodeDescriptor[3];
|
DatanodeDescriptor[] testNodes = new DatanodeDescriptor[3];
|
||||||
|
|
||||||
// array contains both local node & local rack node
|
// array contains both local node & local rack node
|
||||||
testNodes[0] = dataNodes[1];
|
testNodes[0] = dataNodes[1];
|
||||||
testNodes[1] = dataNodes[2];
|
testNodes[1] = dataNodes[2];
|
||||||
testNodes[2] = dataNodes[0];
|
testNodes[2] = dataNodes[0];
|
||||||
cluster.pseudoSortByDistance(dataNodes[0], testNodes );
|
cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
|
||||||
assertTrue(testNodes[0] == dataNodes[0]);
|
assertTrue(testNodes[0] == dataNodes[0]);
|
||||||
assertTrue(testNodes[1] == dataNodes[1]);
|
assertTrue(testNodes[1] == dataNodes[1]);
|
||||||
assertTrue(testNodes[2] == dataNodes[2]);
|
assertTrue(testNodes[2] == dataNodes[2]);
|
||||||
|
@ -133,7 +134,7 @@ public class TestNetworkTopology {
|
||||||
testNodes[0] = dataNodes[1];
|
testNodes[0] = dataNodes[1];
|
||||||
testNodes[1] = dataNodes[3];
|
testNodes[1] = dataNodes[3];
|
||||||
testNodes[2] = dataNodes[0];
|
testNodes[2] = dataNodes[0];
|
||||||
cluster.pseudoSortByDistance(dataNodes[0], testNodes );
|
cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
|
||||||
assertTrue(testNodes[0] == dataNodes[0]);
|
assertTrue(testNodes[0] == dataNodes[0]);
|
||||||
assertTrue(testNodes[1] == dataNodes[1]);
|
assertTrue(testNodes[1] == dataNodes[1]);
|
||||||
assertTrue(testNodes[2] == dataNodes[3]);
|
assertTrue(testNodes[2] == dataNodes[3]);
|
||||||
|
@ -142,21 +143,50 @@ public class TestNetworkTopology {
|
||||||
testNodes[0] = dataNodes[5];
|
testNodes[0] = dataNodes[5];
|
||||||
testNodes[1] = dataNodes[3];
|
testNodes[1] = dataNodes[3];
|
||||||
testNodes[2] = dataNodes[1];
|
testNodes[2] = dataNodes[1];
|
||||||
cluster.pseudoSortByDistance(dataNodes[0], testNodes );
|
cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
|
||||||
assertTrue(testNodes[0] == dataNodes[1]);
|
assertTrue(testNodes[0] == dataNodes[1]);
|
||||||
assertTrue(testNodes[1] == dataNodes[3]);
|
assertTrue(testNodes[1] == dataNodes[3]);
|
||||||
assertTrue(testNodes[2] == dataNodes[5]);
|
assertTrue(testNodes[2] == dataNodes[5]);
|
||||||
|
|
||||||
// array contains local rack node which happens to be in position 0
|
// array contains local rack node which happens to be in position 0
|
||||||
testNodes[0] = dataNodes[1];
|
testNodes[0] = dataNodes[1];
|
||||||
testNodes[1] = dataNodes[5];
|
testNodes[1] = dataNodes[5];
|
||||||
testNodes[2] = dataNodes[3];
|
testNodes[2] = dataNodes[3];
|
||||||
cluster.pseudoSortByDistance(dataNodes[0], testNodes );
|
cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
|
||||||
// peudoSortByDistance does not take the "data center" layer into consideration
|
assertTrue(testNodes[0] == dataNodes[1]);
|
||||||
|
assertTrue(testNodes[1] == dataNodes[3]);
|
||||||
|
assertTrue(testNodes[2] == dataNodes[5]);
|
||||||
|
|
||||||
|
// Same as previous, but with a different random seed to test randomization
|
||||||
|
testNodes[0] = dataNodes[1];
|
||||||
|
testNodes[1] = dataNodes[5];
|
||||||
|
testNodes[2] = dataNodes[3];
|
||||||
|
cluster.sortByDistance(dataNodes[0], testNodes, 0xDEAD);
|
||||||
|
// sortByDistance does not take the "data center" layer into consideration
|
||||||
// and it doesn't sort by getDistance, so 1, 5, 3 is also valid here
|
// and it doesn't sort by getDistance, so 1, 5, 3 is also valid here
|
||||||
assertTrue(testNodes[0] == dataNodes[1]);
|
assertTrue(testNodes[0] == dataNodes[1]);
|
||||||
assertTrue(testNodes[1] == dataNodes[5]);
|
assertTrue(testNodes[1] == dataNodes[5]);
|
||||||
assertTrue(testNodes[2] == dataNodes[3]);
|
assertTrue(testNodes[2] == dataNodes[3]);
|
||||||
|
|
||||||
|
// Array is just local rack nodes
|
||||||
|
// Expect a random first node depending on the seed (normally the block ID).
|
||||||
|
DatanodeDescriptor first = null;
|
||||||
|
boolean foundRandom = false;
|
||||||
|
for (int i=5; i<=7; i++) {
|
||||||
|
testNodes[0] = dataNodes[5];
|
||||||
|
testNodes[1] = dataNodes[6];
|
||||||
|
testNodes[2] = dataNodes[7];
|
||||||
|
cluster.sortByDistance(dataNodes[i], testNodes, 0xBEADED+i);
|
||||||
|
if (first == null) {
|
||||||
|
first = testNodes[0];
|
||||||
|
} else {
|
||||||
|
if (first != testNodes[0]) {
|
||||||
|
foundRandom = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertTrue("Expected to find a different first location", foundRandom);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
Loading…
Reference in New Issue