From 584caaef1b760fc54b0e3920870991c218a156bf Mon Sep 17 00:00:00 2001 From: Junping Du Date: Fri, 16 May 2014 09:59:24 +0000 Subject: [PATCH] Merge r1595145 from trunk: HDFS-6250. Fix test failed in TestBalancerWithNodeGroup.testBalancerWithRackLocality (Contributed by Binglin Chang and Chen He git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1595146 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../server/balancer/NameNodeConnector.java | 2 +- .../balancer/TestBalancerWithNodeGroup.java | 50 +++++++++++-------- 3 files changed, 34 insertions(+), 21 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c9a3a31c189..b16672695cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -237,6 +237,9 @@ Release 2.5.0 - UNRELEASED HDFS-6400. Cannot execute hdfs oiv_legacy. (Akira AJISAKA via kihwal) + HDFS-6250. Fix test failed in TestBalancerWithNodeGroup.testBalancerWithRackLocality + (Binglin Chang and Chen He via junping_du) + Release 2.4.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java index 1ab7f9148b7..25784a26cfa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java @@ -170,7 +170,7 @@ class NameNodeConnector { } /* The idea for making sure that there is no more than one balancer - * running in an HDFS is to create a file in the HDFS, writes the IP address + * running in an HDFS is to create a file in the HDFS, writes the hostname * of the machine on which the balancer is running to the file, but did not * close the file until the balancer exits. * This prevents the second balancer from running because it can not diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java index 7a39dee427b..667204c0c9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java @@ -22,8 +22,9 @@ import static org.junit.Assert.assertEquals; import java.io.IOException; import java.net.URI; import java.util.Collection; -import java.util.HashMap; -import java.util.Map; +import java.util.HashSet; +import java.util.List; +import java.util.Set; import java.util.concurrent.TimeoutException; import org.apache.commons.logging.Log; @@ -39,6 +40,9 @@ import org.apache.hadoop.hdfs.MiniDFSClusterWithNodeGroup; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup; import org.apache.hadoop.net.NetworkTopology; @@ -53,7 +57,7 @@ public class TestBalancerWithNodeGroup { private static final Log LOG = LogFactory.getLog( "org.apache.hadoop.hdfs.TestBalancerWithNodeGroup"); - final private static long CAPACITY = 6000L; + final private static long CAPACITY = 5000L; final private static String RACK0 = "/rack0"; final private static String RACK1 = "/rack1"; final private static String NODEGROUP0 = "/nodegroup0"; @@ -77,6 +81,7 @@ public class TestBalancerWithNodeGroup { static Configuration createConf() { Configuration conf = new HdfsConfiguration(); TestBalancer.initConf(conf); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, NetworkTopologyWithNodeGroup.class.getName()); conf.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, @@ -191,6 +196,19 @@ public class TestBalancerWithNodeGroup { LOG.info("Rebalancing with default factor."); } + private Set getBlocksOnRack(List blks, String rack) { + Set ret = new HashSet(); + for (LocatedBlock blk : blks) { + for (DatanodeInfo di : blk.getLocations()) { + if (rack.equals(NetworkTopology.getFirstHalf(di.getNetworkLocation()))) { + ret.add(blk.getBlock()); + break; + } + } + } + return ret; + } + /** * Create a cluster with even distribution, and a new empty node is added to * the cluster, then test rack locality for balancer policy. @@ -220,9 +238,14 @@ public class TestBalancerWithNodeGroup { // fill up the cluster to be 30% full long totalUsedSpace = totalCapacity * 3 / 10; - TestBalancer.createFile(cluster, filePath, totalUsedSpace / numOfDatanodes, + long length = totalUsedSpace / numOfDatanodes; + TestBalancer.createFile(cluster, filePath, length, (short) numOfDatanodes, 0); + LocatedBlocks lbs = client.getBlockLocations(filePath.toUri().getPath(), 0, + length); + Set before = getBlocksOnRack(lbs.getLocatedBlocks(), RACK0); + long newCapacity = CAPACITY; String newRack = RACK1; String newNodeGroup = NODEGROUP2; @@ -235,22 +258,9 @@ public class TestBalancerWithNodeGroup { // run balancer and validate results runBalancerCanFinish(conf, totalUsedSpace, totalCapacity); - DatanodeInfo[] datanodeReport = - client.getDatanodeReport(DatanodeReportType.ALL); - - Map rackToUsedCapacity = new HashMap(); - for (DatanodeInfo datanode: datanodeReport) { - String rack = NetworkTopology.getFirstHalf(datanode.getNetworkLocation()); - int usedCapacity = (int) datanode.getDfsUsed(); - - if (rackToUsedCapacity.get(rack) != null) { - rackToUsedCapacity.put(rack, usedCapacity + rackToUsedCapacity.get(rack)); - } else { - rackToUsedCapacity.put(rack, usedCapacity); - } - } - assertEquals(rackToUsedCapacity.size(), 2); - assertEquals(rackToUsedCapacity.get(RACK0), rackToUsedCapacity.get(RACK1)); + lbs = client.getBlockLocations(filePath.toUri().getPath(), 0, length); + Set after = getBlocksOnRack(lbs.getLocatedBlocks(), RACK0); + assertEquals(before, after); } finally { cluster.shutdown();