HADOOP-1847 Many HBase tests do not fail well.

HADOOP-1793 (Phase 1) Remove TestHClient

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@573492 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2007-09-07 07:28:42 +00:00
parent 84ef0f95ef
commit 6626ee072a
18 changed files with 354 additions and 450 deletions

View File

@ -31,6 +31,7 @@ Trunk (unreleased changes)
HADOOP-1821 Replace all String.getBytes() with String.getBytes("UTF-8")
HADOOP-1832 listTables() returns duplicate tables
HADOOP-1834 Scanners ignore timestamp passed on creation
HADOOP-1847 Many HBase tests do not fail well.
IMPROVEMENTS
HADOOP-1737 Make HColumnDescriptor data publically members settable
@ -39,6 +40,7 @@ Trunk (unreleased changes)
filter types
HADOOP-1760 Use new MapWritable and SortedMapWritable classes from
org.apache.hadoop.io
HADOOP-1793 (Phase 1) Remove TestHClient
HADOOP-1794 Remove deprecated APIs
HADOOP-1802 Startup scripts should wait until hdfs as cleared 'safe mode'
HADOOP-1835 Updated Documentation for HBase setup/installation

View File

@ -20,7 +20,6 @@
package org.apache.hadoop.hbase;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.Random;
import org.apache.hadoop.dfs.MiniDFSCluster;
@ -32,7 +31,7 @@ import org.apache.hadoop.io.Text;
/** Abstract base class for merge tests */
public abstract class AbstractMergeTestBase extends HBaseTestCase {
protected static final Text COLUMN_NAME = new Text("contents:");
protected Random rand;
protected final Random rand = new Random();
protected HTableDescriptor desc;
protected ImmutableBytesWritable value;
@ -46,7 +45,6 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
@Override
public void setUp() throws Exception {
super.setUp();
rand = new Random();
desc = new HTableDescriptor("test");
desc.addFamily(new HColumnDescriptor(COLUMN_NAME.toString()));
@ -57,23 +55,11 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
while(val.length() < 1024) {
val.append(partialValue);
}
try {
value = new ImmutableBytesWritable(val.toString().getBytes(HConstants.UTF8_ENCODING));
} catch(UnsupportedEncodingException e) {
fail();
}
value = new ImmutableBytesWritable(
val.toString().getBytes(HConstants.UTF8_ENCODING));
try {
dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
fs = dfsCluster.getFileSystem();
dir = new Path("/hbase");
fs.mkdirs(dir);
} catch(Throwable t) {
t.printStackTrace();
fail();
}
// We create three data regions: The first is too large to merge since it
// will be > 64 MB in size. The second two will be smaller and will be
@ -83,6 +69,10 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
// least 65536 rows. We will make certain by writing 70000
try {
fs = dfsCluster.getFileSystem();
dir = new Path("/hbase");
fs.mkdirs(dir);
Text row_70001 = new Text("row_70001");
Text row_80001 = new Text("row_80001");
@ -95,8 +85,11 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
// Now create the root and meta regions and insert the data regions
// created above into the meta
HRegion root = createNewHRegion(dir, conf, HGlobals.rootTableDesc, 0L, null, null);
HRegion meta = createNewHRegion(dir, conf, HGlobals.metaTableDesc, 1L, null, null);
HRegion root =
createNewHRegion(dir, conf, HGlobals.rootTableDesc, 0L, null, null);
HRegion meta =
createNewHRegion(dir, conf, HGlobals.metaTableDesc, 1L, null, null);
HRegion.addRegionToMETA(root, meta);
@ -109,12 +102,11 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
meta.close();
meta.getLog().closeAndDelete();
} catch(Throwable t) {
t.printStackTrace();
} catch (Exception e) {
if(dfsCluster != null) {
dfsCluster.shutdown();
}
fail();
throw e;
}
}
@ -124,13 +116,16 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
@Override
public void tearDown() throws Exception {
super.tearDown();
if (dfsCluster != null) {
dfsCluster.shutdown();
}
}
private HRegion createAregion(Text startKey, Text endKey, int firstRow, int nrows)
throws IOException {
private HRegion createAregion(Text startKey, Text endKey, int firstRow,
int nrows) throws IOException {
HRegion region = createNewHRegion(dir, conf, desc, rand.nextLong(), startKey, endKey);
HRegion region =
createNewHRegion(dir, conf, desc, rand.nextLong(), startKey, endKey);
System.out.println("created region " + region.getRegionName());

View File

@ -70,14 +70,10 @@ public abstract class HBaseTestCase extends TestCase {
@Override
protected void tearDown() throws Exception {
try {
if (this.localFs != null && this.testDir != null &&
this.localFs.exists(testDir)) {
this.localFs.delete(testDir);
}
} catch (Exception e) {
e.printStackTrace();
}
super.tearDown();
}

View File

@ -62,6 +62,7 @@ public class MiniHBaseCluster implements HConstants {
*/
public MiniHBaseCluster(Configuration conf, int nRegionNodes)
throws IOException {
this(conf, nRegionNodes, true, true, true);
}
@ -76,6 +77,7 @@ public class MiniHBaseCluster implements HConstants {
*/
public MiniHBaseCluster(Configuration conf, int nRegionNodes,
final boolean miniHdfsFilesystem) throws IOException {
this(conf, nRegionNodes, miniHdfsFilesystem, true, true);
}
@ -88,8 +90,7 @@ public class MiniHBaseCluster implements HConstants {
* @throws IOException
*/
public MiniHBaseCluster(Configuration conf, int nRegionNodes,
MiniDFSCluster dfsCluster)
throws IOException {
MiniDFSCluster dfsCluster) throws IOException {
this.conf = conf;
this.cluster = dfsCluster;
@ -110,33 +111,23 @@ public class MiniHBaseCluster implements HConstants {
public MiniHBaseCluster(Configuration conf, int nRegionNodes,
final boolean miniHdfsFilesystem, boolean format, boolean deleteOnExit)
throws IOException {
this.conf = conf;
this.deleteOnExit = deleteOnExit;
if (miniHdfsFilesystem) {
try {
this.cluster = new MiniDFSCluster(this.conf, 2, format, (String[])null);
} catch(Throwable t) {
LOG.error("Failed setup of mini dfs cluster", t);
t.printStackTrace();
return;
}
}
init(nRegionNodes);
}
private void init(final int nRegionNodes)
throws IOException {
try {
private void init(final int nRegionNodes) throws IOException {
try {
this.fs = FileSystem.get(conf);
this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
fs.mkdirs(parentdir);
} catch(IOException e) {
LOG.error("Failed setup of FileSystem", e);
throw e;
}
this.masterThread = startMaster(this.conf);
this.regionThreads = startRegionServers(this.conf, nRegionNodes);
} catch(IOException e) {
shutdown();
throw e;
@ -200,6 +191,7 @@ public class MiniHBaseCluster implements HConstants {
*/
public static MasterThread startMaster(final Configuration c)
throws IOException {
if(c.get(MASTER_ADDRESS) == null) {
c.set(MASTER_ADDRESS, "localhost:0");
}
@ -222,8 +214,8 @@ public class MiniHBaseCluster implements HConstants {
* @see #startMaster(Configuration)
*/
public static ArrayList<RegionServerThread> startRegionServers(
final Configuration c, final int count)
throws IOException {
final Configuration c, final int count) throws IOException {
// Start the HRegionServers. Always have regionservers come up on
// port '0' so there won't be clashes over default port as unit tests
// start/stop ports at different times during the life of the test.
@ -249,8 +241,8 @@ public class MiniHBaseCluster implements HConstants {
}
private static RegionServerThread startRegionServer(final Configuration c,
final int index)
throws IOException {
final int index) throws IOException {
final HRegionServer hsr = new HRegionServer(c);
RegionServerThread t = new RegionServerThread(hsr, index);
t.start();
@ -362,20 +354,25 @@ public class MiniHBaseCluster implements HConstants {
}
void shutdown() {
shutdown(this.masterThread, this.regionThreads);
// Close the file system. Will complain if files open so helps w/ leaks.
MiniHBaseCluster.shutdown(this.masterThread, this.regionThreads);
try {
if (this.cluster != null && this.cluster.getFileSystem() != null) {
this.cluster.getFileSystem().close();
}
} catch (IOException e) {
LOG.error("Closing down dfs", e);
}
if(cluster != null) {
if (cluster != null) {
FileSystem fs = cluster.getFileSystem();
LOG.info("Shutting down Mini DFS cluster");
cluster.shutdown();
if (fs != null) {
LOG.info("Shutting down FileSystem");
fs.close();
}
}
} catch (IOException e) {
LOG.error("shutdown", e);
} finally {
// Delete all DFS files
if(deleteOnExit) {
deleteFile(new File(System.getProperty(
@ -383,6 +380,8 @@ public class MiniHBaseCluster implements HConstants {
}
}
}
private void deleteFile(File f) {
if(f.isDirectory()) {
File[] children = f.listFiles();

View File

@ -34,11 +34,12 @@ public class MultiRegionTable extends HBaseTestCase {
*/
public static void makeMultiRegionTable(Configuration conf,
MiniHBaseCluster cluster, FileSystem localFs, String tableName,
String columnName)
throws IOException {
String columnName) throws IOException {
// This size should make it so we always split using the addContent
// below. After adding all data, the first region is 1.3M. Should
// set max filesize to be <= 1M.
assertTrue(conf.getLong("hbase.hregion.max.filesize",
HConstants.DEFAULT_MAX_FILE_SIZE) <= 1024 * 1024);
@ -46,24 +47,33 @@ public class MultiRegionTable extends HBaseTestCase {
Path d = cluster.regionThreads.get(0).getRegionServer().rootDir;
FileSystem fs = (cluster.getDFSCluster() == null) ?
localFs : cluster.getDFSCluster().getFileSystem();
assertTrue(fs != null);
assertNotNull(fs);
// Get connection on the meta table and get count of rows.
HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
int count = count(meta, HConstants.COLUMN_FAMILY_STR);
HTable t = new HTable(conf, new Text(tableName));
addContent(new HTableLoader(t), columnName);
// All is running in the one JVM so I should be able to get the single
// region instance and bring on a split.
HRegionInfo hri =
t.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
HRegion r = cluster.regionThreads.get(0).getRegionServer().
onlineRegions.get(hri.getRegionName());
// Flush will provoke a split next time the split-checker thread runs.
r.flushcache(false);
// Now, wait until split makes it into the meta table.
for (int i = 0; i < retries &&
(count(meta, HConstants.COLUMN_FAMILY_STR) <= count); i++) {
for (int i = 0;
i < retries && (count(meta, HConstants.COLUMN_FAMILY_STR) <= count);
i++) {
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
@ -75,9 +85,11 @@ public class MultiRegionTable extends HBaseTestCase {
if (count <= oldCount) {
throw new IOException("Failed waiting on splits to show up");
}
// Get info on the parent from the meta table. Pass in 'hri'. Its the
// region we have been dealing with up to this. Its the parent of the
// region split.
Map<Text, byte []> data = getSplitParentInfo(meta, hri);
HRegionInfo parent =
Writables.getHRegionInfoOrNull(data.get(HConstants.COL_REGIONINFO));
@ -92,13 +104,19 @@ public class MultiRegionTable extends HBaseTestCase {
LOG.info("Split happened. Parent is " + parent.getRegionName() +
" and daughters are " + splitA.getRegionName() + ", " +
splitB.getRegionName());
// Recalibrate will cause us to wait on new regions' deployment
recalibrate(t, new Text(columnName), retries);
// Compact a region at a time so we can test case where one region has
// no references but the other still has some
compact(cluster, splitA);
// Wait till the parent only has reference to remaining split, one that
// still has references.
while (getSplitParentInfo(meta, parent).size() == 3) {
try {
Thread.sleep(5000);
@ -108,21 +126,28 @@ public class MultiRegionTable extends HBaseTestCase {
}
LOG.info("Parent split returned " +
getSplitParentInfo(meta, parent).keySet().toString());
// Call second split.
compact(cluster, splitB);
// Now wait until parent disappears.
LOG.info("Waiting on parent " + parent.getRegionName() +
" to disappear");
for (int i = 0; i < retries &&
getSplitParentInfo(meta, parent) != null; i++) {
LOG.info("Waiting on parent " + parent.getRegionName() + " to disappear");
for (int i = 0;
i < retries && getSplitParentInfo(meta, parent) != null;
i++) {
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
// continue
}
}
assertTrue(getSplitParentInfo(meta, parent) == null);
assertNull(getSplitParentInfo(meta, parent));
// Assert cleaned up.
for (int i = 0; i < retries && fs.exists(parentDir); i++) {
try {
Thread.sleep(5000);
@ -142,6 +167,7 @@ public class MultiRegionTable extends HBaseTestCase {
*/
private static int count(final HTable t, final String column)
throws IOException {
int size = 0;
Text [] cols = new Text[] {new Text(column)};
HScannerInterface s = t.obtainScanner(cols, HConstants.EMPTY_START_ROW,
@ -162,8 +188,8 @@ public class MultiRegionTable extends HBaseTestCase {
* @return Return row info for passed in region or null if not found in scan.
*/
private static Map<Text, byte []> getSplitParentInfo(final HTable t,
final HRegionInfo parent)
throws IOException {
final HRegionInfo parent) throws IOException {
HScannerInterface s = t.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY,
HConstants.EMPTY_START_ROW, System.currentTimeMillis(), null);
try {
@ -199,6 +225,7 @@ public class MultiRegionTable extends HBaseTestCase {
*/
private static void recalibrate(final HTable t, final Text column,
final int retries) throws IOException {
for (int i = 0; i < retries; i++) {
try {
HScannerInterface s =
@ -229,14 +256,15 @@ public class MultiRegionTable extends HBaseTestCase {
* @throws IOException
*/
private static void compact(final MiniHBaseCluster cluster,
final HRegionInfo r)
throws IOException {
final HRegionInfo r) throws IOException {
LOG.info("Starting compaction");
for (MiniHBaseCluster.RegionServerThread thread: cluster.regionThreads) {
SortedMap<Text, HRegion> regions =
thread.getRegionServer().onlineRegions;
SortedMap<Text, HRegion> regions = thread.getRegionServer().onlineRegions;
// Retry if ConcurrentModification... alternative of sync'ing is not
// worth it for sake of unit test.
for (int i = 0; i < 10; i++) {
try {
for (HRegion online: regions.values()) {

View File

@ -19,6 +19,7 @@
*/
package org.apache.hadoop.hbase;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.Map;
import java.util.TreeMap;
@ -35,8 +36,9 @@ public class TestBatchUpdate extends HBaseClusterTestCase {
private HTableDescriptor desc = null;
private HTable table = null;
/** constructor
* @throws UnsupportedEncodingException */
/**
* @throws UnsupportedEncodingException
*/
public TestBatchUpdate() throws UnsupportedEncodingException {
value = "abcd".getBytes(HConstants.UTF8_ENCODING);
}
@ -49,19 +51,15 @@ public class TestBatchUpdate extends HBaseClusterTestCase {
super.setUp();
this.desc = new HTableDescriptor("test");
desc.addFamily(new HColumnDescriptor(CONTENTS_STR));
try {
HBaseAdmin admin = new HBaseAdmin(conf);
admin.createTable(desc);
table = new HTable(conf, desc.getName());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
/** the test case */
public void testBatchUpdate() {
/**
* @throws IOException
*/
public void testBatchUpdate() throws IOException {
try {
table.commit(-1L);
@ -74,7 +72,6 @@ public class TestBatchUpdate extends HBaseClusterTestCase {
long lockid = table.startUpdate(new Text("row1"));
try {
try {
@SuppressWarnings("unused")
long dummy = table.startUpdate(new Text("row2"));
@ -102,9 +99,5 @@ public class TestBatchUpdate extends HBaseClusterTestCase {
new String(e.getValue(), HConstants.UTF8_ENCODING));
}
}
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
}

View File

@ -19,6 +19,7 @@
*/
package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text;
@ -146,11 +147,15 @@ public class TestBloomFilters extends HBaseClusterTestCase {
conf.set("hbase.regionserver.maxlogentries", "90"); // and roll log too
}
/** Test that specifies explicit parameters for the bloom filter */
public void testExplicitParameters() {
/**
* Test that specifies explicit parameters for the bloom filter
* @throws IOException
*/
public void testExplicitParameters() throws IOException {
HTable table = null;
try {
// Setup
HTableDescriptor desc = new HTableDescriptor(getName());
BloomFilterDescriptor bloomFilter =
new BloomFilterDescriptor( // if we insert 1000 values
@ -187,10 +192,6 @@ public class TestBloomFilters extends HBaseClusterTestCase {
table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
table.commit(lockid);
}
} catch (Exception e) {
e.printStackTrace();
fail();
}
try {
// Give cache flusher and log roller a chance to run
// Otherwise we'll never hit the bloom filter, just the memcache
@ -201,8 +202,6 @@ public class TestBloomFilters extends HBaseClusterTestCase {
}
try {
if (table != null) {
for(int i = 0; i < testKeys.length; i++) {
byte[] value = table.get(testKeys[i], CONTENTS);
if(value != null && value.length != 0) {
@ -211,17 +210,16 @@ public class TestBloomFilters extends HBaseClusterTestCase {
}
}
}
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
/** Test that uses computed for the bloom filter */
public void testComputedParameters() {
/**
* Test that uses computed for the bloom filter
* @throws IOException
*/
public void testComputedParameters() throws IOException {
HTable table = null;
try {
// Setup
HTableDescriptor desc = new HTableDescriptor(getName());
BloomFilterDescriptor bloomFilter =
@ -259,10 +257,6 @@ public class TestBloomFilters extends HBaseClusterTestCase {
table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
table.commit(lockid);
}
} catch (Exception e) {
e.printStackTrace();
fail();
}
try {
// Give cache flusher and log roller a chance to run
// Otherwise we'll never hit the bloom filter, just the memcache
@ -272,8 +266,6 @@ public class TestBloomFilters extends HBaseClusterTestCase {
// ignore
}
try {
if (table != null) {
for(int i = 0; i < testKeys.length; i++) {
byte[] value = table.get(testKeys[i], CONTENTS);
if(value != null && value.length != 0) {
@ -282,9 +274,4 @@ public class TestBloomFilters extends HBaseClusterTestCase {
}
}
}
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
}

View File

@ -30,13 +30,15 @@ import org.apache.commons.logging.LogFactory;
public class TestCompaction extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(TestCompaction.class.getName());
/** {@inheritDoc} */
@Override
protected void setUp() throws Exception {
public void setUp() throws Exception {
super.setUp();
}
/** {@inheritDoc} */
@Override
protected void tearDown() throws Exception {
public void tearDown() throws Exception {
super.tearDown();
}

View File

@ -172,10 +172,6 @@ public class TestGet extends HBaseTestCase {
r.close();
log.closeAndDelete();
} catch(IOException e) {
e.printStackTrace();
throw e;
} finally {
if(cluster != null) {
cluster.shutdown();

View File

@ -1,55 +0,0 @@
/**
* Copyright 2007 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Test HClient.
*/
@Deprecated
public class TestHClient extends HBaseClusterTestCase {
private Log LOG = LogFactory.getLog(this.getClass().getName());
private HClient client;
/** {@inheritDoc} */
@Override
public void setUp() throws Exception {
super.setUp();
this.client = new HClient(this.conf);
}
/** the test
* @throws Exception
*/
public void testCommandline() throws Exception {
final String m = "--master=" + this.conf.get(HConstants.MASTER_ADDRESS);
LOG.info("Creating table");
// Assert each of below returns 0: i.e. success.
assertEquals("create table", 0,
this.client.doCommandLine(
new String [] {m, "createTable", getName(), "family:", "1"}));
assertEquals("list tables", 0,
this.client.doCommandLine(new String [] {m, "listTables"}));
assertEquals("delete table", 0,
this.client.doCommandLine(new String [] {m, "deleteTable", getName()}));
}
}

View File

@ -37,9 +37,10 @@ public class TestHLog extends HBaseTestCase implements HConstants {
super.setUp();
}
/** The test */
public void testAppend() {
try {
/**
* @throws IOException
*/
public void testAppend() throws IOException {
Path dir = getUnitTestdir(getName());
FileSystem fs = FileSystem.get(this.conf);
if (fs.exists(dir)) {
@ -98,10 +99,6 @@ public class TestHLog extends HBaseTestCase implements HConstants {
fs.delete(dir);
}
}
} catch(IOException e) {
e.printStackTrace();
fail();
}
}
/** {@inheritDoc} */

View File

@ -21,7 +21,6 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.Iterator;
import java.util.Map;
import java.util.TreeMap;
@ -46,11 +45,9 @@ public class TestHMemcache extends TestCase {
private static final String COLUMN_FAMILY = "column";
/* (non-Javadoc)
* @see junit.framework.TestCase#setUp()
*/
/** {@inheritDoc} */
@Override
protected void setUp() throws Exception {
public void setUp() throws Exception {
super.setUp();
this.hmemcache = new HMemcache();
// Set up a configuration that has configuration for a file
@ -58,11 +55,9 @@ public class TestHMemcache extends TestCase {
this.conf = new HBaseConfiguration();
}
/* (non-Javadoc)
* @see junit.framework.TestCase#tearDown()
*/
/** {@inheritDoc} */
@Override
protected void tearDown() throws Exception {
public void tearDown() throws Exception {
super.tearDown();
}
@ -70,10 +65,8 @@ public class TestHMemcache extends TestCase {
return new Text("row" + Integer.toString(index));
}
private Text getColumnName(final int rowIndex,
final int colIndex) {
return new Text(COLUMN_FAMILY + ":" +
Integer.toString(rowIndex) + ";" +
private Text getColumnName(final int rowIndex, final int colIndex) {
return new Text(COLUMN_FAMILY + ":" + Integer.toString(rowIndex) + ";" +
Integer.toString(colIndex));
}
@ -81,16 +74,12 @@ public class TestHMemcache extends TestCase {
* Adds {@link #ROW_COUNT} rows and {@link #COLUMNS_COUNT}
* @param hmc Instance to add rows to.
*/
private void addRows(final HMemcache hmc) {
private void addRows(final HMemcache hmc) throws UnsupportedEncodingException {
for (int i = 0; i < ROW_COUNT; i++) {
TreeMap<Text, byte []> columns = new TreeMap<Text, byte []>();
for (int ii = 0; ii < COLUMNS_COUNT; ii++) {
Text k = getColumnName(i, ii);
try {
columns.put(k, k.toString().getBytes(HConstants.UTF8_ENCODING));
} catch (UnsupportedEncodingException e) {
fail();
}
}
hmc.add(getRowName(i), columns, System.currentTimeMillis());
}
@ -98,8 +87,8 @@ public class TestHMemcache extends TestCase {
private HLog getLogfile() throws IOException {
// Create a log file.
Path testDir = new Path(conf.get("hadoop.tmp.dir", System
.getProperty("java.tmp.dir")), "hbase");
Path testDir = new Path(conf.get("hadoop.tmp.dir",
System.getProperty("java.tmp.dir")), "hbase");
Path logFile = new Path(testDir, this.getName());
FileSystem fs = testDir.getFileSystem(conf);
// Cleanup any old log file.
@ -111,6 +100,7 @@ public class TestHMemcache extends TestCase {
private Snapshot runSnapshot(final HMemcache hmc, final HLog log)
throws IOException {
// Save off old state.
int oldHistorySize = hmc.history.size();
TreeMap<HStoreKey, byte []> oldMemcache = hmc.memcache;
@ -151,12 +141,12 @@ public class TestHMemcache extends TestCase {
log.closeAndDelete();
}
private void isExpectedRow(final int rowIndex,
TreeMap<Text, byte []> row) throws UnsupportedEncodingException {
private void isExpectedRow(final int rowIndex, TreeMap<Text, byte []> row)
throws UnsupportedEncodingException {
int i = 0;
for (Text colname: row.keySet()) {
String expectedColname =
getColumnName(rowIndex, i++).toString();
String expectedColname = getColumnName(rowIndex, i++).toString();
String colnameStr = colname.toString();
assertEquals("Column name", colnameStr, expectedColname);
// Value is column name as bytes. Usually result is
@ -204,9 +194,7 @@ public class TestHMemcache extends TestCase {
assertEquals("Count of columns", COLUMNS_COUNT,
results.size());
TreeMap<Text, byte []> row = new TreeMap<Text, byte []>();
for(Iterator<Map.Entry<Text, byte []>> it = results.entrySet().iterator();
it.hasNext(); ) {
Map.Entry<Text, byte []> e = it.next();
for(Map.Entry<Text, byte []> e: results.entrySet() ) {
row.put(e.getKey(), e.getValue());
}
isExpectedRow(i, row);

View File

@ -587,7 +587,7 @@ public class TestHRegion extends HBaseTestCase implements RegionUnavailableListe
}
// NOTE: This test depends on testBatchWrite succeeding
void splitAndMerge() throws IOException {
private void splitAndMerge() throws IOException {
Text midKey = new Text();
if(region.needsSplit(midKey)) {
@ -829,8 +829,10 @@ public class TestHRegion extends HBaseTestCase implements RegionUnavailableListe
} catch (IOException e) {
e.printStackTrace();
}
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
// Delete all the DFS files

View File

@ -38,18 +38,12 @@ public class TestMasterAdmin extends HBaseClusterTestCase {
admin = null;
}
/** the test */
public void testMasterAdmin() {
try {
/** @throws Exception */
public void testMasterAdmin() throws Exception {
admin = new HBaseAdmin(conf);
admin.createTable(testDesc);
admin.disableTable(testDesc.getName());
} catch(Exception e) {
e.printStackTrace();
fail();
}
try {
try {
@SuppressWarnings("unused")
@ -76,13 +70,7 @@ public class TestMasterAdmin extends HBaseClusterTestCase {
fail();
} finally {
try {
admin.deleteTable(testDesc.getName());
} catch(Exception e) {
e.printStackTrace();
fail();
}
}
}
}

View File

@ -19,19 +19,17 @@
*/
package org.apache.hadoop.hbase;
import java.io.IOException;
/** Tests region merging */
public class TestMergeMeta extends AbstractMergeTestBase {
/**
* test case
* @throws IOException
*/
public void testMergeMeta() {
try {
public void testMergeMeta() throws IOException {
assertNotNull(dfsCluster);
HMerge.merge(conf, fs, HConstants.META_TABLE_NAME);
} catch(Throwable t) {
t.printStackTrace();
fail();
}
}
}

View File

@ -31,6 +31,7 @@ public class TestMergeTable extends AbstractMergeTestBase {
* @throws IOException
*/
public void testMergeTable() throws IOException {
assertNotNull(dfsCluster);
MiniHBaseCluster hCluster = new MiniHBaseCluster(conf, 1, dfsCluster);
try {
HMerge.merge(conf, fs, desc.getName());

View File

@ -40,15 +40,9 @@ public class TestMultipleUpdates extends HBaseClusterTestCase {
super.setUp();
this.desc = new HTableDescriptor("test");
desc.addFamily(new HColumnDescriptor(CONTENTS_STR));
try {
HBaseAdmin admin = new HBaseAdmin(conf);
admin.createTable(desc);
table = new HTable(conf, desc.getName());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
/** the test */

View File

@ -108,10 +108,6 @@ public class TestScanner extends HBaseTestCase {
results.clear();
}
} catch(IOException e) {
e.printStackTrace();
throw e;
} finally {
if(scanner != null) {
scanner.close();
@ -258,9 +254,6 @@ public class TestScanner extends HBaseTestCase {
region.close();
log.closeAndDelete();
} catch(IOException e) {
e.printStackTrace();
throw e;
} finally {
if(cluster != null) {