HBASE-43[45] TestTableIndex and TestTableMapReduce failed in Hudson builds
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@627195 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
8b2c345d4c
commit
e6c687d159
|
@ -18,6 +18,7 @@ Hbase Change Log
|
|||
HBASE-421 TestRegionServerExit broken
|
||||
HBASE-426 hbase can't find remote filesystem
|
||||
HBASE-437 Clear Command should use system.out (Edward Yoon via Stack)
|
||||
HBASE-43[45] TestTableIndex and TestTableMapReduce failed in Hudson builds
|
||||
|
||||
IMPROVEMENTS
|
||||
HBASE-415 Rewrite leases to use DelayedBlockingQueue instead of polling
|
||||
|
|
|
@ -88,11 +88,17 @@ public class MultiRegionTable extends HBaseTestCase {
|
|||
// with EMPTY_START_ROW will be one of the unsplittable daughters.
|
||||
HRegionInfo hri = null;
|
||||
HRegion r = null;
|
||||
HRegionServer server = cluster.getRegionThreads().get(0).getRegionServer();
|
||||
for (int i = 0; i < 30; i++) {
|
||||
hri = t.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
|
||||
try {
|
||||
hri = t.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
|
||||
} catch (IOException e) {
|
||||
e = RemoteExceptionHandler.checkIOException(e);
|
||||
e.printStackTrace();
|
||||
continue;
|
||||
}
|
||||
LOG.info("Region location: " + hri);
|
||||
r = cluster.getRegionThreads().get(0).getRegionServer().
|
||||
onlineRegions.get(hri.getRegionName());
|
||||
r = server.onlineRegions.get(hri.getRegionName());
|
||||
if (r != null) {
|
||||
break;
|
||||
}
|
||||
|
@ -102,10 +108,10 @@ public class MultiRegionTable extends HBaseTestCase {
|
|||
LOG.warn("Waiting on region to come online", e);
|
||||
}
|
||||
}
|
||||
assertNotNull(r);
|
||||
|
||||
// Flush the cache
|
||||
cluster.getRegionThreads().get(0).getRegionServer().getCacheFlushListener().
|
||||
flushRequested(r);
|
||||
server.getCacheFlushListener().flushRequested(r);
|
||||
|
||||
// Now, wait until split makes it into the meta table.
|
||||
int oldCount = count;
|
||||
|
@ -158,7 +164,8 @@ public class MultiRegionTable extends HBaseTestCase {
|
|||
// still has references.
|
||||
while (true) {
|
||||
data = getSplitParentInfo(meta, parent);
|
||||
if (data == null || data.size() == 3) {
|
||||
if (data != null && data.size() == 3) {
|
||||
LOG.info("Waiting for splitA to release reference to parent");
|
||||
try {
|
||||
Thread.sleep(waitTime);
|
||||
} catch (InterruptedException e) {
|
||||
|
@ -168,7 +175,9 @@ public class MultiRegionTable extends HBaseTestCase {
|
|||
}
|
||||
break;
|
||||
}
|
||||
LOG.info("Parent split info returned " + data.keySet().toString());
|
||||
if (data != null) {
|
||||
LOG.info("Parent split info returned " + data.keySet().toString());
|
||||
}
|
||||
}
|
||||
|
||||
if (splitB == null) {
|
||||
|
@ -199,8 +208,10 @@ public class MultiRegionTable extends HBaseTestCase {
|
|||
|
||||
for (int i = 0; i < retries; i++) {
|
||||
if (!fs.exists(parentDir)) {
|
||||
LOG.info("Parent directory was deleted. tries=" + i);
|
||||
break;
|
||||
}
|
||||
LOG.info("Waiting for parent directory to be deleted. tries=" + i);
|
||||
try {
|
||||
Thread.sleep(waitTime);
|
||||
} catch (InterruptedException e) {
|
||||
|
@ -260,8 +271,7 @@ public class MultiRegionTable extends HBaseTestCase {
|
|||
continue;
|
||||
}
|
||||
// Make sure I get the parent.
|
||||
if (hri.getRegionName().toString().
|
||||
equals(parent.getRegionName().toString()) &&
|
||||
if (hri.getRegionName().equals(parent.getRegionName()) &&
|
||||
hri.getRegionId() == parent.getRegionId()) {
|
||||
return curVals;
|
||||
}
|
||||
|
@ -316,8 +326,7 @@ public class MultiRegionTable extends HBaseTestCase {
|
|||
* @throws IOException
|
||||
*/
|
||||
protected static void compact(final MiniHBaseCluster cluster,
|
||||
final HRegionInfo r)
|
||||
throws IOException {
|
||||
final HRegionInfo r) throws IOException {
|
||||
if (r == null) {
|
||||
LOG.debug("Passed region is null");
|
||||
return;
|
||||
|
@ -332,8 +341,7 @@ public class MultiRegionTable extends HBaseTestCase {
|
|||
for (int i = 0; i < 10; i++) {
|
||||
try {
|
||||
for (HRegion online: regions.values()) {
|
||||
if (online.getRegionName().toString().
|
||||
equals(r.getRegionName().toString())) {
|
||||
if (online.getRegionName().equals(r.getRegionName())) {
|
||||
online.compactStores();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import junit.textui.TestRunner;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.dfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseAdmin;
|
||||
|
@ -96,6 +97,9 @@ public class TestTableIndex extends MultiRegionTable {
|
|||
// below. After adding all data, the first region is 1.3M
|
||||
conf.setLong("hbase.hregion.max.filesize", 1024 * 1024);
|
||||
|
||||
// Always compact if there is more than one store file.
|
||||
conf.setInt("hbase.hstore.compactionThreshold", 2);
|
||||
|
||||
desc = new HTableDescriptor(TABLE_NAME);
|
||||
desc.addFamily(new HColumnDescriptor(INPUT_COLUMN));
|
||||
desc.addFamily(new HColumnDescriptor(OUTPUT_COLUMN));
|
||||
|
@ -160,7 +164,6 @@ public class TestTableIndex extends MultiRegionTable {
|
|||
}
|
||||
scanTable(printResults);
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1);
|
||||
|
||||
// set configuration parameter for index build
|
||||
|
@ -267,17 +270,17 @@ public class TestTableIndex extends MultiRegionTable {
|
|||
Integer.toString(new Random().nextInt()));
|
||||
this.fs.copyToLocalFile(new Path(INDEX_DIR), localDir);
|
||||
FileSystem localfs = FileSystem.getLocal(conf);
|
||||
Path [] indexDirs = localfs.listPaths(new Path [] {localDir});
|
||||
FileStatus [] indexDirs = localfs.listStatus(localDir);
|
||||
Searcher searcher = null;
|
||||
HScannerInterface scanner = null;
|
||||
try {
|
||||
if (indexDirs.length == 1) {
|
||||
searcher = new IndexSearcher((new File(indexDirs[0].
|
||||
searcher = new IndexSearcher((new File(indexDirs[0].getPath().
|
||||
toUri())).getAbsolutePath());
|
||||
} else if (indexDirs.length > 1) {
|
||||
Searchable[] searchers = new Searchable[indexDirs.length];
|
||||
for (int i = 0; i < indexDirs.length; i++) {
|
||||
searchers[i] = new IndexSearcher((new File(indexDirs[i].
|
||||
searchers[i] = new IndexSearcher((new File(indexDirs[i].getPath().
|
||||
toUri()).getAbsolutePath()));
|
||||
}
|
||||
searcher = new MultiSearcher(searchers);
|
||||
|
@ -301,7 +304,6 @@ public class TestTableIndex extends MultiRegionTable {
|
|||
int count = 0;
|
||||
while (scanner.next(key, results)) {
|
||||
String value = key.getRow().toString();
|
||||
LOG.debug("Scanned over " + key.getRow());
|
||||
Term term = new Term(rowkeyName, value);
|
||||
int hitCount = searcher.search(new TermQuery(term)).length();
|
||||
assertEquals("check row " + value, 1, hitCount);
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.MultiRegionTable;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.io.MapWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
@ -99,7 +100,7 @@ public class TestTableMapReduce extends MultiRegionTable {
|
|||
|
||||
// This size should make it so we always split using the addContent
|
||||
// below. After adding all data, the first region is 1.3M
|
||||
conf.setLong("hbase.hregion.max.filesize", 256 * 1024);
|
||||
conf.setLong("hbase.hregion.max.filesize", 1024 * 1024);
|
||||
|
||||
// Make lease timeout longer, lease checks less frequent
|
||||
conf.setInt("hbase.master.lease.period", 10 * 1000);
|
||||
|
@ -156,7 +157,7 @@ public class TestTableMapReduce extends MultiRegionTable {
|
|||
/**
|
||||
* Pass the key, and reversed value to reduce
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.mapred.TableMap#map(org.apache.hadoop.hbase.HStoreKey, org.apache.hadoop.io.MapWritable, org.apache.hadoop.hbase.mapred.TableOutputCollector, org.apache.hadoop.mapred.Reporter)
|
||||
* @see org.apache.hadoop.hbase.mapred.TableMap#map(org.apache.hadoop.hbase.HStoreKey, org.apache.hadoop.io.MapWritable, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
|
@ -222,17 +223,11 @@ public class TestTableMapReduce extends MultiRegionTable {
|
|||
HTable table = new HTable(conf, new Text(SINGLE_REGION_TABLE_NAME));
|
||||
|
||||
for(int i = 0; i < values.length; i++) {
|
||||
long lockid = table.startUpdate(new Text("row_"
|
||||
+ String.format("%1$05d", i)));
|
||||
BatchUpdate b = new BatchUpdate(new Text("row_" +
|
||||
String.format("%1$05d", i)));
|
||||
|
||||
try {
|
||||
table.put(lockid, TEXT_INPUT_COLUMN, values[i]);
|
||||
table.commit(lockid, System.currentTimeMillis());
|
||||
lockid = -1;
|
||||
} finally {
|
||||
if (lockid != -1)
|
||||
table.abort(lockid);
|
||||
}
|
||||
b.put(TEXT_INPUT_COLUMN, values[i]);
|
||||
table.commit(b);
|
||||
}
|
||||
|
||||
LOG.info("Print table contents before map/reduce for " +
|
||||
|
|
Loading…
Reference in New Issue