HBASE-3094 Fixes for miscellaneous broken tests

M src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
  Added passing a Configuration to createTable.
M src/test/java/org/apache/hadoop/hbase/HBaseClusterTestCase.java
  Make a new configuration each time we create a cluster else this
  old testing utility fails when we do restart in middle of a test
  suite using this old stuff.
M src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
  Fixed imports.
M src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
  Added catch for other than IOE... shutdown if anything thrown.
M src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
  This test was for sure horked.  Fixed it so we were testing
  the javadoc description of how its supposed to work.
M src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java
  Another test that just wasn't working -- couldn't have worked on
  commit.
M src/test/java/org/apache/hadoop/hbase/master/TestLoadBalancer.java
  Must have resolvable host (How did this test ever work)?
M src/test/java/org/apache/hadoop/hbase/client/TestGetRowVersions.java
   Fixup to make this test pass.  New Configuration after cluster restart.
M src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java
  Fixup to make this test pass.  The low scanner timeout was stopping
  this test from even starting up.
M src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
  Use new method from HTU.
M src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
  If passed server is null, we are in test mode.
M src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
  Catch other dropped connection exception types when trying to prove
  we have a good root/meta address.


git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1005990 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2010-10-08 19:45:11 +00:00
parent 449fd9aa94
commit d46e5634a3
13 changed files with 78 additions and 57 deletions

View File

@ -19,6 +19,7 @@
*/ */
package org.apache.hadoop.hbase.catalog; package org.apache.hadoop.hbase.catalog;
import java.io.EOFException;
import java.io.IOException; import java.io.IOException;
import java.net.ConnectException; import java.net.ConnectException;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
@ -355,6 +356,17 @@ public class CatalogTracker {
} else { } else {
throw e; throw e;
} }
} catch (IOException ioe) {
Throwable cause = ioe.getCause();
if (cause != null && cause instanceof EOFException) {
// Catch. Other end disconnected us.
} else if (cause != null && cause.getMessage() != null &&
cause.getMessage().toLowerCase().contains("connection reset")) {
// Catch. Connection reset.
} else {
throw ioe;
}
} }
return protocol; return protocol;
} }

View File

@ -120,6 +120,7 @@ class CatalogJanitor extends Chore {
* @return Null if not found (and logs fact that expected COL_REGIONINFO * @return Null if not found (and logs fact that expected COL_REGIONINFO
* was missing) else deserialized {@link HRegionInfo} * was missing) else deserialized {@link HRegionInfo}
* @throws IOException * @throws IOException
* @throws NullPointerException if null cell
*/ */
static HRegionInfo getHRegionInfo(final Result result) static HRegionInfo getHRegionInfo(final Result result)
throws IOException { throws IOException {

View File

@ -184,7 +184,7 @@ class SplitTransaction {
assert !this.parent.lock.writeLock().isHeldByCurrentThread() : "Unsafe to hold write lock while performing RPCs"; assert !this.parent.lock.writeLock().isHeldByCurrentThread() : "Unsafe to hold write lock while performing RPCs";
// If true, no cluster to write meta edits into. // If true, no cluster to write meta edits into.
boolean testing = boolean testing = server == null? true:
server.getConfiguration().getBoolean("hbase.testing.nocluster", false); server.getConfiguration().getBoolean("hbase.testing.nocluster", false);
createSplitDir(this.parent.getFilesystem(), this.splitdir); createSplitDir(this.parent.getFilesystem(), this.splitdir);

View File

@ -25,6 +25,7 @@ import java.io.PrintWriter;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionManager;
@ -102,13 +103,13 @@ public abstract class HBaseClusterTestCase extends HBaseTestCase {
this.zooKeeperCluster = new MiniZooKeeperCluster(); this.zooKeeperCluster = new MiniZooKeeperCluster();
int clientPort = this.zooKeeperCluster.startup(testDir); int clientPort = this.zooKeeperCluster.startup(testDir);
conf.set("hbase.zookeeper.property.clientPort", Integer.toString(clientPort)); conf.set("hbase.zookeeper.property.clientPort", Integer.toString(clientPort));
Configuration c = new Configuration(this.conf);
// start the mini cluster // start the mini cluster
this.cluster = new MiniHBaseCluster(conf, regionServers); this.cluster = new MiniHBaseCluster(c, regionServers);
if (openMetaTable) { if (openMetaTable) {
// opening the META table ensures that cluster is running // opening the META table ensures that cluster is running
new HTable(conf, HConstants.META_TABLE_NAME); new HTable(c, HConstants.META_TABLE_NAME);
} }
} }

View File

@ -450,13 +450,28 @@ public class HBaseTestingUtility {
* @throws IOException * @throws IOException
*/ */
public HTable createTable(byte[] tableName, byte[][] families) public HTable createTable(byte[] tableName, byte[][] families)
throws IOException {
return createTable(tableName, families,
new Configuration(getConfiguration()));
}
/**
* Create a table.
* @param tableName
* @param families
* @param c Configuration to use
* @return An HTable instance for the created table.
* @throws IOException
*/
public HTable createTable(byte[] tableName, byte[][] families,
final Configuration c)
throws IOException { throws IOException {
HTableDescriptor desc = new HTableDescriptor(tableName); HTableDescriptor desc = new HTableDescriptor(tableName);
for(byte[] family : families) { for(byte[] family : families) {
desc.addFamily(new HColumnDescriptor(family)); desc.addFamily(new HColumnDescriptor(family));
} }
getHBaseAdmin().createTable(desc); getHBaseAdmin().createTable(desc);
return new HTable(new Configuration(getConfiguration()), tableName); return new HTable(c, tableName);
} }
/** /**

View File

@ -253,9 +253,12 @@ public class MiniHBaseCluster {
MiniHBaseCluster.MiniHBaseClusterMaster.class, MiniHBaseCluster.MiniHBaseClusterMaster.class,
MiniHBaseCluster.MiniHBaseClusterRegionServer.class); MiniHBaseCluster.MiniHBaseClusterRegionServer.class);
hbaseCluster.startup(); hbaseCluster.startup();
} catch(IOException e) { } catch (IOException e) {
shutdown(); shutdown();
throw e; throw e;
} catch (Throwable t) {
shutdown();
throw new IOException("Shutting down", t);
} }
} }

View File

@ -122,7 +122,7 @@ public class TestFromClientSide {
byte[] TABLE = Bytes.toBytes("testGetConfiguration"); byte[] TABLE = Bytes.toBytes("testGetConfiguration");
byte[][] FAMILIES = new byte[][] { Bytes.toBytes("foo") }; byte[][] FAMILIES = new byte[][] { Bytes.toBytes("foo") };
Configuration conf = TEST_UTIL.getConfiguration(); Configuration conf = TEST_UTIL.getConfiguration();
HTable table = TEST_UTIL.createTable(TABLE, FAMILIES); HTable table = TEST_UTIL.createTable(TABLE, FAMILIES, conf);
assertSame(conf, table.getConfiguration()); assertSame(conf, table.getConfiguration());
} }

View File

@ -24,6 +24,7 @@ import java.util.NavigableMap;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClusterTestCase; import org.apache.hadoop.hbase.HBaseClusterTestCase;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
@ -43,35 +44,34 @@ public class TestGetRowVersions extends HBaseClusterTestCase {
private static final byte [] VALUE2 = Bytes.toBytes("value2"); private static final byte [] VALUE2 = Bytes.toBytes("value2");
private static final long TIMESTAMP1 = 100L; private static final long TIMESTAMP1 = 100L;
private static final long TIMESTAMP2 = 200L; private static final long TIMESTAMP2 = 200L;
private HBaseAdmin admin = null;
private HTable table = null;
@Override @Override
public void setUp() throws Exception { public void setUp() throws Exception {
super.setUp(); super.setUp();
HTableDescriptor desc = new HTableDescriptor(TABLE_NAME); HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
desc.addFamily(new HColumnDescriptor(CONTENTS)); desc.addFamily(new HColumnDescriptor(CONTENTS));
this.admin = new HBaseAdmin(conf); HBaseAdmin admin = new HBaseAdmin(conf);
this.admin.createTable(desc); admin.createTable(desc);
this.table = new HTable(conf, TABLE_NAME);
} }
/** @throws Exception */ /** @throws Exception */
public void testGetRowMultipleVersions() throws Exception { public void testGetRowMultipleVersions() throws Exception {
Put put = new Put(ROW, TIMESTAMP1, null); Put put = new Put(ROW, TIMESTAMP1, null);
put.add(CONTENTS, CONTENTS, VALUE1); put.add(CONTENTS, CONTENTS, VALUE1);
this.table.put(put); HTable table = new HTable(new Configuration(conf), TABLE_NAME);
table.put(put);
// Shut down and restart the HBase cluster // Shut down and restart the HBase cluster
this.cluster.shutdown(); this.cluster.shutdown();
this.zooKeeperCluster.shutdown(); this.zooKeeperCluster.shutdown();
LOG.debug("HBase cluster shut down -- restarting"); LOG.debug("HBase cluster shut down -- restarting");
this.hBaseClusterSetup(); this.hBaseClusterSetup();
// Make a new connection // Make a new connection. Use new Configuration instance because old one
this.table = new HTable(conf, TABLE_NAME); // is tied to an HConnection that has since gone statle.
table = new HTable(new Configuration(conf), TABLE_NAME);
// Overwrite previous value // Overwrite previous value
put = new Put(ROW, TIMESTAMP2, null); put = new Put(ROW, TIMESTAMP2, null);
put.add(CONTENTS, CONTENTS, VALUE2); put.add(CONTENTS, CONTENTS, VALUE2);
this.table.put(put); table.put(put);
// Now verify that getRow(row, column, latest) works // Now verify that getRow(row, column, latest) works
Get get = new Get(ROW); Get get = new Get(ROW);
// Should get one version by default // Should get one version by default

View File

@ -19,6 +19,9 @@
*/ */
package org.apache.hadoop.hbase.client; package org.apache.hadoop.hbase.client;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -26,15 +29,11 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Before; import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
/** /**
* Test various scanner timeout issues. * Test various scanner timeout issues.
*/ */
@ -47,8 +46,7 @@ public class TestScannerTimeout {
private final static byte[] SOME_BYTES = Bytes.toBytes("f"); private final static byte[] SOME_BYTES = Bytes.toBytes("f");
private final static byte[] TABLE_NAME = Bytes.toBytes("t"); private final static byte[] TABLE_NAME = Bytes.toBytes("t");
private final static int NB_ROWS = 10; private final static int NB_ROWS = 10;
private final static int SCANNER_TIMEOUT = 1000; private final static int SCANNER_TIMEOUT = 10000;
private static HTable table;
/** /**
* @throws java.lang.Exception * @throws java.lang.Exception
@ -58,7 +56,7 @@ public class TestScannerTimeout {
Configuration c = TEST_UTIL.getConfiguration(); Configuration c = TEST_UTIL.getConfiguration();
c.setInt("hbase.regionserver.lease.period", SCANNER_TIMEOUT); c.setInt("hbase.regionserver.lease.period", SCANNER_TIMEOUT);
TEST_UTIL.startMiniCluster(2); TEST_UTIL.startMiniCluster(2);
table = TEST_UTIL.createTable(Bytes.toBytes("t"), SOME_BYTES); HTable table = TEST_UTIL.createTable(TABLE_NAME, SOME_BYTES);
for (int i = 0; i < NB_ROWS; i++) { for (int i = 0; i < NB_ROWS; i++) {
Put put = new Put(Bytes.toBytes(i)); Put put = new Put(Bytes.toBytes(i));
put.add(SOME_BYTES, SOME_BYTES, SOME_BYTES); put.add(SOME_BYTES, SOME_BYTES, SOME_BYTES);
@ -89,6 +87,8 @@ public class TestScannerTimeout {
@Test @Test
public void test2481() throws Exception { public void test2481() throws Exception {
Scan scan = new Scan(); Scan scan = new Scan();
HTable table =
new HTable(new Configuration(TEST_UTIL.getConfiguration()), TABLE_NAME);
ResultScanner r = table.getScanner(scan); ResultScanner r = table.getScanner(scan);
int count = 0; int count = 0;
try { try {
@ -131,6 +131,5 @@ public class TestScannerTimeout {
Result[] results = r.next(NB_ROWS); Result[] results = r.next(NB_ROWS);
assertEquals(NB_ROWS, results.length); assertEquals(NB_ROWS, results.length);
r.close(); r.close();
} }
} }

View File

@ -168,19 +168,14 @@ public class TestCatalogJanitor {
// Make a key that does not have a regioninfo value. // Make a key that does not have a regioninfo value.
kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
HConstants.REGIONINFO_QUALIFIER, f)); HConstants.REGIONINFO_QUALIFIER, f));
boolean exception = false; HRegionInfo hri = CatalogJanitor.getHRegionInfo(new Result(kvs));
try { assertTrue(hri == null);
CatalogJanitor.getHRegionInfo(new Result(kvs));
} catch (Exception ioe) {
exception = true;
}
assertTrue(exception);
// OK, give it what it expects // OK, give it what it expects
kvs.clear(); kvs.clear();
kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f,
HConstants.REGIONINFO_QUALIFIER, HConstants.REGIONINFO_QUALIFIER,
Writables.getBytes(HRegionInfo.FIRST_META_REGIONINFO))); Writables.getBytes(HRegionInfo.FIRST_META_REGIONINFO)));
HRegionInfo hri = CatalogJanitor.getHRegionInfo(new Result(kvs)); hri = CatalogJanitor.getHRegionInfo(new Result(kvs));
assertNotNull(hri); assertNotNull(hri);
assertTrue(hri.equals(HRegionInfo.FIRST_META_REGIONINFO)); assertTrue(hri.equals(HRegionInfo.FIRST_META_REGIONINFO));
} }

View File

@ -17,25 +17,22 @@
*/ */
package org.apache.hadoop.hbase.master; package org.apache.hadoop.hbase.master;
import static org.junit.Assert.*; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.util.HashSet;
import java.util.Set;
import org.junit.Test; import org.junit.Test;
public class TestDeadServer { public class TestDeadServer {
@Test public void testIsDead() { @Test public void testIsDead() {
Set<String> deadServers = new HashSet<String>();
DeadServer ds = new DeadServer(); DeadServer ds = new DeadServer();
final String hostname123 = "one,123,3"; final String hostname123 = "127.0.0.1,123,3";
assertFalse(ds.isDeadServer(hostname123, false)); assertFalse(ds.isDeadServer(hostname123, false));
assertFalse(ds.isDeadServer(hostname123, true)); assertFalse(ds.isDeadServer(hostname123, true));
deadServers.add(hostname123); ds.add(hostname123);
assertTrue(ds.isDeadServer(hostname123, false)); assertTrue(ds.isDeadServer(hostname123, false));
assertFalse(ds.isDeadServer("one:1", true)); assertFalse(ds.isDeadServer("127.0.0.1:1", true));
assertFalse(ds.isDeadServer("one:1234", true)); assertFalse(ds.isDeadServer("127.0.0.1:1234", true));
assertTrue(ds.isDeadServer("one:123", true)); assertTrue(ds.isDeadServer("127.0.0.1:123", true));
} }
} }

View File

@ -352,7 +352,7 @@ public class TestLoadBalancer {
server.getLoad().setNumberOfRegions(numRegions); server.getLoad().setNumberOfRegions(numRegions);
return server; return server;
} }
String host = RandomStringUtils.random(16); String host = "127.0.0.1";
int port = rand.nextInt(60000); int port = rand.nextInt(60000);
long startCode = rand.nextLong(); long startCode = rand.nextLong();
HServerInfo hsi = HServerInfo hsi =

View File

@ -19,6 +19,17 @@
*/ */
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -51,27 +62,14 @@ import org.apache.hadoop.hbase.regionserver.HRegion.RegionScanner;
import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
/** /**
* Basic stand-alone testing of HRegion. * Basic stand-alone testing of HRegion.