HBASE-5811 TestLoadAndSwitchEncodeOnDisk fails sometimes
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1327696 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
56a4420af1
commit
7972efeea8
|
@ -1346,8 +1346,7 @@ public class AssignmentManager extends ZooKeeperListener {
|
||||||
* @param region
|
* @param region
|
||||||
* @param setOfflineInZK
|
* @param setOfflineInZK
|
||||||
* @param forceNewPlan
|
* @param forceNewPlan
|
||||||
* @param hijack
|
* @param hijack True if new assignment is needed, false otherwise
|
||||||
* - true new assignment is needed, false otherwise
|
|
||||||
*/
|
*/
|
||||||
public void assign(HRegionInfo region, boolean setOfflineInZK,
|
public void assign(HRegionInfo region, boolean setOfflineInZK,
|
||||||
boolean forceNewPlan, boolean hijack) {
|
boolean forceNewPlan, boolean hijack) {
|
||||||
|
@ -1598,20 +1597,17 @@ public class AssignmentManager extends ZooKeeperListener {
|
||||||
if (setOfflineInZK) {
|
if (setOfflineInZK) {
|
||||||
// get the version of the znode after setting it to OFFLINE.
|
// get the version of the znode after setting it to OFFLINE.
|
||||||
// versionOfOfflineNode will be -1 if the znode was not set to OFFLINE
|
// versionOfOfflineNode will be -1 if the znode was not set to OFFLINE
|
||||||
versionOfOfflineNode = setOfflineInZooKeeper(state,
|
versionOfOfflineNode = setOfflineInZooKeeper(state, hijack);
|
||||||
hijack);
|
if (versionOfOfflineNode != -1) {
|
||||||
if(versionOfOfflineNode != -1){
|
|
||||||
if (isDisabledorDisablingRegionInRIT(region)) {
|
if (isDisabledorDisablingRegionInRIT(region)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
setEnabledTable(region);
|
setEnabledTable(region);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (setOfflineInZK && versionOfOfflineNode == -1) {
|
if (setOfflineInZK && versionOfOfflineNode == -1) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (this.master.isStopped()) {
|
if (this.master.isStopped()) {
|
||||||
LOG.debug("Server stopped; skipping assign of " + state);
|
LOG.debug("Server stopped; skipping assign of " + state);
|
||||||
return;
|
return;
|
||||||
|
@ -2218,12 +2214,16 @@ public class AssignmentManager extends ZooKeeperListener {
|
||||||
LOG.info("Bulk assigning done");
|
LOG.info("Bulk assigning done");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: This method seems way wrong. Why would we mark a table enabled based
|
||||||
|
// off a single region? We seem to call this on bulk assign on startup which
|
||||||
|
// isn't too bad but then its also called in assign. It makes the enabled
|
||||||
|
// flag up in zk meaningless. St.Ack
|
||||||
private void setEnabledTable(HRegionInfo hri) {
|
private void setEnabledTable(HRegionInfo hri) {
|
||||||
String tableName = hri.getTableNameAsString();
|
String tableName = hri.getTableNameAsString();
|
||||||
boolean isTableEnabled = this.zkTable.isEnabledTable(tableName);
|
boolean isTableEnabled = this.zkTable.isEnabledTable(tableName);
|
||||||
if (!isTableEnabled) {
|
if (!isTableEnabled) {
|
||||||
setEnabledTable(tableName);
|
setEnabledTable(tableName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -16,12 +16,21 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.io.encoding;
|
package org.apache.hadoop.hbase.io.encoding;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.NavigableMap;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.MediumTests;
|
import org.apache.hadoop.hbase.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
|
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||||
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||||
import org.apache.hadoop.hbase.io.hfile.Compression;
|
import org.apache.hadoop.hbase.io.hfile.Compression;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
|
@ -64,8 +73,10 @@ public class TestLoadAndSwitchEncodeOnDisk extends
|
||||||
super.loadTest();
|
super.loadTest();
|
||||||
|
|
||||||
HColumnDescriptor hcd = getColumnDesc(admin);
|
HColumnDescriptor hcd = getColumnDesc(admin);
|
||||||
System.err.println("\nDisabling encode-on-disk. Old column descriptor: " +
|
System.err.println("\nDisabling encode-on-disk. Old column descriptor: " + hcd + "\n");
|
||||||
hcd + "\n");
|
HTable t = new HTable(this.conf, TABLE);
|
||||||
|
assertAllOnLine(t);
|
||||||
|
|
||||||
admin.disableTable(TABLE);
|
admin.disableTable(TABLE);
|
||||||
hcd.setEncodeOnDisk(false);
|
hcd.setEncodeOnDisk(false);
|
||||||
admin.modifyColumn(TABLE, hcd);
|
admin.modifyColumn(TABLE, hcd);
|
||||||
|
@ -76,6 +87,10 @@ public class TestLoadAndSwitchEncodeOnDisk extends
|
||||||
System.err.println("\nNew column descriptor: " +
|
System.err.println("\nNew column descriptor: " +
|
||||||
getColumnDesc(admin) + "\n");
|
getColumnDesc(admin) + "\n");
|
||||||
|
|
||||||
|
// The table may not have all regions on line yet. Assert online before
|
||||||
|
// moving to major compact.
|
||||||
|
assertAllOnLine(t);
|
||||||
|
|
||||||
System.err.println("\nCompacting the table\n");
|
System.err.println("\nCompacting the table\n");
|
||||||
admin.majorCompact(TABLE);
|
admin.majorCompact(TABLE);
|
||||||
// Wait until compaction completes
|
// Wait until compaction completes
|
||||||
|
@ -88,4 +103,15 @@ public class TestLoadAndSwitchEncodeOnDisk extends
|
||||||
System.err.println("\nDone with the test, shutting down the cluster\n");
|
System.err.println("\nDone with the test, shutting down the cluster\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void assertAllOnLine(final HTable t) throws IOException {
|
||||||
|
NavigableMap<HRegionInfo, ServerName> regions = t.getRegionLocations();
|
||||||
|
for (Map.Entry<HRegionInfo, ServerName> e: regions.entrySet()) {
|
||||||
|
byte [] startkey = e.getKey().getStartKey();
|
||||||
|
Scan s = new Scan(startkey);
|
||||||
|
ResultScanner scanner = t.getScanner(s);
|
||||||
|
Result r = scanner.next();
|
||||||
|
org.junit.Assert.assertTrue(r != null && r.size() > 0);
|
||||||
|
scanner.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue