HBASE-2594 Fix broken build caused by hbase-2692 commit

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@991713 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2010-09-01 21:40:34 +00:00
parent 6d747b7f38
commit 34efa17d46
4 changed files with 21 additions and 19 deletions

View File

@ -495,6 +495,7 @@ Release 0.21.0 - Unreleased
hbase shell (Kannan via jgray) hbase shell (Kannan via jgray)
HBASE-2948 bin/hbase shell broken (after hbase-2692) HBASE-2948 bin/hbase shell broken (after hbase-2692)
(Sebastian Bauer via Stack) (Sebastian Bauer via Stack)
HBASE-2954 Fix broken build caused by hbase-2692 commit
IMPROVEMENTS IMPROVEMENTS
HBASE-1760 Cleanup TODOs in HTable HBASE-1760 Cleanup TODOs in HTable

View File

@ -192,6 +192,8 @@ public class MiniHBaseCluster {
public void run() { public void run() {
try { try {
super.run(); super.run();
} catch (Throwable t) {
LOG.error("Exception in run", t);
} finally { } finally {
// Run this on the way out. // Run this on the way out.
if (this.shutdownThread != null) { if (this.shutdownThread != null) {

View File

@ -611,7 +611,7 @@ public class TestHLogSplit {
throws IOException { throws IOException {
Path tdir = HTableDescriptor.getTableDir(rootdir, table); Path tdir = HTableDescriptor.getTableDir(rootdir, table);
Path editsdir = HLog.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, Path editsdir = HLog.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir,
HRegionInfo.encodeRegionName(region.getBytes()))); Bytes.toString(region.getBytes())));
FileStatus [] files = this.fs.listStatus(editsdir); FileStatus [] files = this.fs.listStatus(editsdir);
assertEquals(1, files.length); assertEquals(1, files.length);
return files[0].getPath(); return files[0].getPath();

View File

@ -19,16 +19,18 @@
*/ */
package org.apache.hadoop.hbase.replication; package org.apache.hadoop.hbase.replication;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.EmptyWatcher;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
// import org.apache.hadoop.hbase.MiniZooKeeperCluster;
import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.UnknownScannerException;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
@ -39,17 +41,13 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
// import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Before; import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test; import org.junit.Test;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class TestReplication { public class TestReplication {
private static final Log LOG = LogFactory.getLog(TestReplication.class); private static final Log LOG = LogFactory.getLog(TestReplication.class);
@ -94,10 +92,9 @@ public class TestReplication {
conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true); conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
conf1.setBoolean("dfs.support.append", true); conf1.setBoolean("dfs.support.append", true);
conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
/* REENALBE
utility1 = new HBaseTestingUtility(conf1); utility1 = new HBaseTestingUtility(conf1);
utility1.startMiniZKCluster(); utility1.startMiniZKCluster();
/* REENALBE
MiniZooKeeperCluster miniZK = utility1.getZkCluster(); MiniZooKeeperCluster miniZK = utility1.getZkCluster();
zkw1 = ZooKeeperWrapper.createInstance(conf1, "cluster1"); zkw1 = ZooKeeperWrapper.createInstance(conf1, "cluster1");
zkw1.writeZNode("/1", "replication", ""); zkw1.writeZNode("/1", "replication", "");
@ -105,7 +102,6 @@ public class TestReplication {
conf1.get(HConstants.ZOOKEEPER_QUORUM)+":" + conf1.get(HConstants.ZOOKEEPER_QUORUM)+":" +
conf1.get("hbase.zookeeper.property.clientPort")+":/1"); conf1.get("hbase.zookeeper.property.clientPort")+":/1");
setIsReplication(true); setIsReplication(true);
*/
LOG.info("Setup first Zk"); LOG.info("Setup first Zk");
conf2 = HBaseConfiguration.create(); conf2 = HBaseConfiguration.create();
@ -114,7 +110,7 @@ public class TestReplication {
conf2.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true); conf2.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
conf2.setBoolean("dfs.support.append", true); conf2.setBoolean("dfs.support.append", true);
conf2.setLong("hbase.regions.percheckin", 1); conf2.setLong("hbase.regions.percheckin", 1);
/* REENALBE
utility2 = new HBaseTestingUtility(conf2); utility2 = new HBaseTestingUtility(conf2);
utility2.setZkCluster(miniZK); utility2.setZkCluster(miniZK);
zkw2 = ZooKeeperWrapper.createInstance(conf2, "cluster2"); zkw2 = ZooKeeperWrapper.createInstance(conf2, "cluster2");
@ -126,7 +122,7 @@ public class TestReplication {
zkw1.writeZNode("/1/replication/peers", "1", zkw1.writeZNode("/1/replication/peers", "1",
conf2.get(HConstants.ZOOKEEPER_QUORUM)+":" + conf2.get(HConstants.ZOOKEEPER_QUORUM)+":" +
conf2.get("hbase.zookeeper.property.clientPort")+":/2"); conf2.get("hbase.zookeeper.property.clientPort")+":/2");
*/
LOG.info("Setup second Zk"); LOG.info("Setup second Zk");
utility1.startMiniCluster(2); utility1.startMiniCluster(2);
@ -147,6 +143,7 @@ public class TestReplication {
htable1 = new HTable(conf1, tableName); htable1 = new HTable(conf1, tableName);
htable1.setWriteBufferSize(1024); htable1.setWriteBufferSize(1024);
htable2 = new HTable(conf2, tableName); htable2 = new HTable(conf2, tableName);
*/
} }
private static void setIsReplication(boolean rep) throws Exception { private static void setIsReplication(boolean rep) throws Exception {
@ -174,15 +171,17 @@ public class TestReplication {
*/ */
@AfterClass @AfterClass
public static void tearDownAfterClass() throws Exception { public static void tearDownAfterClass() throws Exception {
/* REENABLE
utility2.shutdownMiniCluster(); utility2.shutdownMiniCluster();
utility1.shutdownMiniCluster(); utility1.shutdownMiniCluster();
*/
} }
/** /**
* Add a row, check it's replicated, delete it, check's gone * Add a row, check it's replicated, delete it, check's gone
* @throws Exception * @throws Exception
*/ */
@Test @Ignore @Test
public void testSimplePutDelete() throws Exception { public void testSimplePutDelete() throws Exception {
LOG.info("testSimplePutDelete"); LOG.info("testSimplePutDelete");
Put put = new Put(row); Put put = new Put(row);
@ -230,7 +229,7 @@ public class TestReplication {
* Try a small batch upload using the write buffer, check it's replicated * Try a small batch upload using the write buffer, check it's replicated
* @throws Exception * @throws Exception
*/ */
@Test @Ignore @Test
public void testSmallBatch() throws Exception { public void testSmallBatch() throws Exception {
LOG.info("testSmallBatch"); LOG.info("testSmallBatch");
Put put; Put put;
@ -274,7 +273,7 @@ public class TestReplication {
* replicated, enable it, try replicating and it should work * replicated, enable it, try replicating and it should work
* @throws Exception * @throws Exception
*/ */
@Test @Ignore @Test
public void testStartStop() throws Exception { public void testStartStop() throws Exception {
// Test stopping replication // Test stopping replication
@ -343,7 +342,7 @@ public class TestReplication {
* hlog rolling and other non-trivial code paths * hlog rolling and other non-trivial code paths
* @throws Exception * @throws Exception
*/ */
@Test @Ignore @Test
public void loadTesting() throws Exception { public void loadTesting() throws Exception {
htable1.setWriteBufferSize(1024); htable1.setWriteBufferSize(1024);
htable1.setAutoFlush(false); htable1.setAutoFlush(false);
@ -397,7 +396,7 @@ public class TestReplication {
* the upload. The failover happens internally. * the upload. The failover happens internally.
* @throws Exception * @throws Exception
*/ */
@Test @Ignore @Test
public void queueFailover() throws Exception { public void queueFailover() throws Exception {
utility1.createMultiRegions(htable1, famName); utility1.createMultiRegions(htable1, famName);