HBASE-6175 TestFSUtils flaky on hdfs getFileStatus method

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1357238 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
nkeywal 2012-07-04 13:13:31 +00:00
parent b1ab069bdd
commit 1544913988
1 changed files with 41 additions and 24 deletions

View File

@ -118,16 +118,22 @@ public class TestFSUtils {
// given the default replication factor is 3, the same as the number of // given the default replication factor is 3, the same as the number of
// datanodes; the locality index for each host should be 100%, // datanodes; the locality index for each host should be 100%,
// or getWeight for each host should be the same as getUniqueBlocksWeights // or getWeight for each host should be the same as getUniqueBlocksWeights
FileStatus status = fs.getFileStatus(testFile); final long maxTime = System.currentTimeMillis() + 2000;
HDFSBlocksDistribution blocksDistribution = boolean ok;
FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen()); do {
long uniqueBlocksTotalWeight = ok = true;
blocksDistribution.getUniqueBlocksTotalWeight(); FileStatus status = fs.getFileStatus(testFile);
for (String host : hosts) { HDFSBlocksDistribution blocksDistribution =
long weight = blocksDistribution.getWeight(host); FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
assertTrue(uniqueBlocksTotalWeight == weight); long uniqueBlocksTotalWeight =
} blocksDistribution.getUniqueBlocksTotalWeight();
} finally { for (String host : hosts) {
long weight = blocksDistribution.getWeight(host);
ok = (ok && uniqueBlocksTotalWeight == weight);
}
} while (!ok && System.currentTimeMillis() < maxTime);
assertTrue(ok);
} finally {
htu.shutdownMiniDFSCluster(); htu.shutdownMiniDFSCluster();
} }
@ -146,14 +152,20 @@ public class TestFSUtils {
// given the default replication factor is 3, we will have total of 9 // given the default replication factor is 3, we will have total of 9
// replica of blocks; thus the host with the highest weight should have // replica of blocks; thus the host with the highest weight should have
// weight == 3 * DEFAULT_BLOCK_SIZE // weight == 3 * DEFAULT_BLOCK_SIZE
FileStatus status = fs.getFileStatus(testFile); final long maxTime = System.currentTimeMillis() + 2000;
HDFSBlocksDistribution blocksDistribution = long weight;
FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen()); long uniqueBlocksTotalWeight;
long uniqueBlocksTotalWeight = do {
blocksDistribution.getUniqueBlocksTotalWeight(); FileStatus status = fs.getFileStatus(testFile);
HDFSBlocksDistribution blocksDistribution =
FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
uniqueBlocksTotalWeight = blocksDistribution.getUniqueBlocksTotalWeight();
String tophost = blocksDistribution.getTopHosts().get(0); String tophost = blocksDistribution.getTopHosts().get(0);
long weight = blocksDistribution.getWeight(tophost); weight = blocksDistribution.getWeight(tophost);
// NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
} while (uniqueBlocksTotalWeight != weight && System.currentTimeMillis() < maxTime);
assertTrue(uniqueBlocksTotalWeight == weight); assertTrue(uniqueBlocksTotalWeight == weight);
} finally { } finally {
@ -174,11 +186,16 @@ public class TestFSUtils {
// given the default replication factor is 3, we will have total of 3 // given the default replication factor is 3, we will have total of 3
// replica of blocks; thus there is one host without weight // replica of blocks; thus there is one host without weight
FileStatus status = fs.getFileStatus(testFile); final long maxTime = System.currentTimeMillis() + 2000;
HDFSBlocksDistribution blocksDistribution = HDFSBlocksDistribution blocksDistribution;
FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen()); do {
FileStatus status = fs.getFileStatus(testFile);
blocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
// NameNode is informed asynchronously, so we may have a delay. See HBASE-6175
}
while (blocksDistribution.getTopHosts().size() != 3 && System.currentTimeMillis() < maxTime);
assertEquals("Wrong number of hosts distributing blocks.", 3, assertEquals("Wrong number of hosts distributing blocks.", 3,
blocksDistribution.getTopHosts().size()); blocksDistribution.getTopHosts().size());
} finally { } finally {
htu.shutdownMiniDFSCluster(); htu.shutdownMiniDFSCluster();
} }