HBASE-5811 TestLoadAndSwitchEncodeOnDisk fails sometimes

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1327696 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2012-04-18 21:57:23 +00:00
parent 56a4420af1
commit 7972efeea8
2 changed files with 36 additions and 10 deletions

View File

@ -1346,8 +1346,7 @@ public class AssignmentManager extends ZooKeeperListener {
* @param region
* @param setOfflineInZK
* @param forceNewPlan
* @param hijack
* - true new assignment is needed, false otherwise
* @param hijack True if new assignment is needed, false otherwise
*/
public void assign(HRegionInfo region, boolean setOfflineInZK,
boolean forceNewPlan, boolean hijack) {
@ -1598,20 +1597,17 @@ public class AssignmentManager extends ZooKeeperListener {
if (setOfflineInZK) {
// get the version of the znode after setting it to OFFLINE.
// versionOfOfflineNode will be -1 if the znode was not set to OFFLINE
versionOfOfflineNode = setOfflineInZooKeeper(state,
hijack);
if(versionOfOfflineNode != -1){
versionOfOfflineNode = setOfflineInZooKeeper(state, hijack);
if (versionOfOfflineNode != -1) {
if (isDisabledorDisablingRegionInRIT(region)) {
return;
}
setEnabledTable(region);
}
}
if (setOfflineInZK && versionOfOfflineNode == -1) {
return;
}
if (this.master.isStopped()) {
LOG.debug("Server stopped; skipping assign of " + state);
return;
@ -2218,12 +2214,16 @@ public class AssignmentManager extends ZooKeeperListener {
LOG.info("Bulk assigning done");
}
// TODO: This method seems way wrong. Why would we mark a table enabled based
// off a single region? We seem to call this on bulk assign on startup which
// isn't too bad but then its also called in assign. It makes the enabled
// flag up in zk meaningless. St.Ack
private void setEnabledTable(HRegionInfo hri) {
String tableName = hri.getTableNameAsString();
boolean isTableEnabled = this.zkTable.isEnabledTable(tableName);
if (!isTableEnabled) {
setEnabledTable(tableName);
}
}
}
/**

View File

@ -16,12 +16,21 @@
*/
package org.apache.hadoop.hbase.io.encoding;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
import java.util.NavigableMap;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
@ -64,8 +73,10 @@ public class TestLoadAndSwitchEncodeOnDisk extends
super.loadTest();
HColumnDescriptor hcd = getColumnDesc(admin);
System.err.println("\nDisabling encode-on-disk. Old column descriptor: " +
hcd + "\n");
System.err.println("\nDisabling encode-on-disk. Old column descriptor: " + hcd + "\n");
HTable t = new HTable(this.conf, TABLE);
assertAllOnLine(t);
admin.disableTable(TABLE);
hcd.setEncodeOnDisk(false);
admin.modifyColumn(TABLE, hcd);
@ -76,6 +87,10 @@ public class TestLoadAndSwitchEncodeOnDisk extends
System.err.println("\nNew column descriptor: " +
getColumnDesc(admin) + "\n");
// The table may not have all regions on line yet. Assert online before
// moving to major compact.
assertAllOnLine(t);
System.err.println("\nCompacting the table\n");
admin.majorCompact(TABLE);
// Wait until compaction completes
@ -88,4 +103,15 @@ public class TestLoadAndSwitchEncodeOnDisk extends
System.err.println("\nDone with the test, shutting down the cluster\n");
}
private void assertAllOnLine(final HTable t) throws IOException {
NavigableMap<HRegionInfo, ServerName> regions = t.getRegionLocations();
for (Map.Entry<HRegionInfo, ServerName> e: regions.entrySet()) {
byte [] startkey = e.getKey().getStartKey();
Scan s = new Scan(startkey);
ResultScanner scanner = t.getScanner(s);
Result r = scanner.next();
org.junit.Assert.assertTrue(r != null && r.size() > 0);
scanner.close();
}
}
}