HBASE-8957 TestDrainingServer. testDrainingServerWithAbort fails; ADDENDUM

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1504308 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2013-07-17 22:24:32 +00:00
parent 81434835de
commit 9bf0760123
2 changed files with 24 additions and 14 deletions

View File

@ -2072,10 +2072,11 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
}
/**
* @return A new Map of online regions sorted by region size with the first
* entry being the biggest.
* @return A new Map of online regions sorted by region size with the first entry being the
* biggest. If two regions are the same size, then the last one found wins; i.e. this method
* may NOT return all regions.
*/
public SortedMap<Long, HRegion> getCopyOfOnlineRegionsSortedBySize() {
SortedMap<Long, HRegion> getCopyOfOnlineRegionsSortedBySize() {
// we'll sort the regions in reverse
SortedMap<Long, HRegion> sortedRegions = new TreeMap<Long, HRegion>(
new Comparator<Long>() {
@ -3361,7 +3362,6 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
}
}
// Region open/close direct RPCs
/**

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@ -195,8 +196,7 @@ public class TestDrainingServer {
ServerManager sm = master.getServerManager();
Collection<HRegion> regionsBefore = drainingServer.
getCopyOfOnlineRegionsSortedBySize().values();
Collection<HRegion> regionsBefore = drainingServer.getOnlineRegionsLocalContext();
LOG.info("Regions of drained server are: "+ regionsBefore );
try {
@ -222,17 +222,23 @@ public class TestDrainingServer {
hrs.abort("Aborting");
}
// Wait for regions to come back online again.
// Wait for regions to come back online again. waitForAllRegionsOnline can come back before
// we've assigned out regions on the cluster so retry if we are shy the wanted number
Collection<HRegion> regionsAfter = null;
for (int i = 0; i < 1000; i++) {
waitForAllRegionsOnline();
Collection<HRegion> regionsAfter =
drainingServer.getCopyOfOnlineRegionsSortedBySize().values();
regionsAfter = getRegions();
if (regionsAfter.size() >= regionCount) break;
LOG.info("Expecting " + regionCount + " but only " + regionsAfter);
Threads.sleep(10);
}
LOG.info("Regions of drained server: " + regionsAfter + ", all regions: " + getRegions());
Assert.assertEquals("Test conditions are not met: regions were" +
" created/deleted during the test. ",
regionCount, TEST_UTIL.getMiniHBaseCluster().countServedRegions());
// Assert the draining server still has the same regions.
regionsAfter = drainingServer.getOnlineRegionsLocalContext();
StringBuilder result = new StringBuilder();
for (HRegion r: regionsAfter){
if (!regionsBefore.contains(r)){
@ -259,9 +265,13 @@ public class TestDrainingServer {
private Collection<HRegion> getRegions() {
Collection<HRegion> regions = new ArrayList<HRegion>();
for (int i = 0; i < NB_SLAVES; i++) {
HRegionServer hrs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i);
regions.addAll( hrs.getCopyOfOnlineRegionsSortedBySize().values() );
List<RegionServerThread> rsthreads =
TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads();
for (RegionServerThread t: rsthreads) {
HRegionServer rs = t.getRegionServer();
Collection<HRegion> lr = rs.getOnlineRegionsLocalContext();
LOG.info("Found " + lr + " on " + rs);
regions.addAll(lr);
}
return regions;
}