From 7972efeea87273d764cd20b81d6a8f6158821e2e Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Wed, 18 Apr 2012 21:57:23 +0000 Subject: [PATCH] HBASE-5811 TestLoadAndSwitchEncodeOnDisk fails sometimes git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1327696 13f79535-47bb-0310-9956-ffa450edef68 --- .../hbase/master/AssignmentManager.java | 16 +++++----- .../TestLoadAndSwitchEncodeOnDisk.java | 30 +++++++++++++++++-- 2 files changed, 36 insertions(+), 10 deletions(-) diff --git a/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 6e214c8f092..7239c5a12b1 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -1346,8 +1346,7 @@ public class AssignmentManager extends ZooKeeperListener { * @param region * @param setOfflineInZK * @param forceNewPlan - * @param hijack - * - true new assignment is needed, false otherwise + * @param hijack True if new assignment is needed, false otherwise */ public void assign(HRegionInfo region, boolean setOfflineInZK, boolean forceNewPlan, boolean hijack) { @@ -1598,20 +1597,17 @@ public class AssignmentManager extends ZooKeeperListener { if (setOfflineInZK) { // get the version of the znode after setting it to OFFLINE. // versionOfOfflineNode will be -1 if the znode was not set to OFFLINE - versionOfOfflineNode = setOfflineInZooKeeper(state, - hijack); - if(versionOfOfflineNode != -1){ + versionOfOfflineNode = setOfflineInZooKeeper(state, hijack); + if (versionOfOfflineNode != -1) { if (isDisabledorDisablingRegionInRIT(region)) { return; } setEnabledTable(region); } } - if (setOfflineInZK && versionOfOfflineNode == -1) { return; } - if (this.master.isStopped()) { LOG.debug("Server stopped; skipping assign of " + state); return; @@ -2218,12 +2214,16 @@ public class AssignmentManager extends ZooKeeperListener { LOG.info("Bulk assigning done"); } + // TODO: This method seems way wrong. Why would we mark a table enabled based + // off a single region? We seem to call this on bulk assign on startup which + // isn't too bad but then its also called in assign. It makes the enabled + // flag up in zk meaningless. St.Ack private void setEnabledTable(HRegionInfo hri) { String tableName = hri.getTableNameAsString(); boolean isTableEnabled = this.zkTable.isEnabledTable(tableName); if (!isTableEnabled) { setEnabledTable(tableName); - } + } } /** diff --git a/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java b/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java index 4e63608bd7a..7fd0fa7d5ea 100644 --- a/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java +++ b/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java @@ -16,12 +16,21 @@ */ package org.apache.hadoop.hbase.io.encoding; +import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.Map; +import java.util.NavigableMap; import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -64,8 +73,10 @@ public class TestLoadAndSwitchEncodeOnDisk extends super.loadTest(); HColumnDescriptor hcd = getColumnDesc(admin); - System.err.println("\nDisabling encode-on-disk. Old column descriptor: " + - hcd + "\n"); + System.err.println("\nDisabling encode-on-disk. Old column descriptor: " + hcd + "\n"); + HTable t = new HTable(this.conf, TABLE); + assertAllOnLine(t); + admin.disableTable(TABLE); hcd.setEncodeOnDisk(false); admin.modifyColumn(TABLE, hcd); @@ -76,6 +87,10 @@ public class TestLoadAndSwitchEncodeOnDisk extends System.err.println("\nNew column descriptor: " + getColumnDesc(admin) + "\n"); + // The table may not have all regions on line yet. Assert online before + // moving to major compact. + assertAllOnLine(t); + System.err.println("\nCompacting the table\n"); admin.majorCompact(TABLE); // Wait until compaction completes @@ -88,4 +103,15 @@ public class TestLoadAndSwitchEncodeOnDisk extends System.err.println("\nDone with the test, shutting down the cluster\n"); } + private void assertAllOnLine(final HTable t) throws IOException { + NavigableMap regions = t.getRegionLocations(); + for (Map.Entry e: regions.entrySet()) { + byte [] startkey = e.getKey().getStartKey(); + Scan s = new Scan(startkey); + ResultScanner scanner = t.getScanner(s); + Result r = scanner.next(); + org.junit.Assert.assertTrue(r != null && r.size() > 0); + scanner.close(); + } + } }