HBASE-3114 Test up on hudson are leaking zookeeper ensemble
Trying this patch to see if it cures hudson of leakage M src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Add a passedZkCluster flag. If we didn't start the zk cluster, don't shut it down on way out. M src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java Add a test that starts up three clusters in a vm all sharing single zk, each to its own chroot location. Add to tables in each and very the add by doing same as the old TestMultiClusters used) M src/test/java/org/apache/hadoop/hbase/replication/TestReplication.java Reenable shutdown of started clusters. Hopefully this clears the leaking of zk ensembles up on hudson. M src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java Testing, it looks like legitimately the callback for session connected can be invoked before construction of zookeeperwatcher finishes. In particular the zookeeper data member can be null. Hang around for a second of so before throwing an exception (Make the exception indicate the particular zkw by adding to the error message stack trace made at zkw construction -- helps with debugging this stuff) git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1023181 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
dcbcd4f2d4
commit
0f27ee880c
|
@ -30,6 +30,8 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.hbase.Abortable;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
||||
import org.apache.hadoop.hbase.client.HConnectionManager;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.WatchedEvent;
|
||||
import org.apache.zookeeper.Watcher;
|
||||
|
@ -86,6 +88,10 @@ public class ZooKeeperWatcher implements Watcher {
|
|||
// znode used for table disabling/enabling
|
||||
public String tableZNode;
|
||||
|
||||
private final Configuration conf;
|
||||
|
||||
private final Exception constructorCaller;
|
||||
|
||||
/**
|
||||
* Instantiate a ZooKeeper connection and watcher.
|
||||
* @param descriptor Descriptive string that is added to zookeeper sessionid
|
||||
|
@ -96,6 +102,14 @@ public class ZooKeeperWatcher implements Watcher {
|
|||
public ZooKeeperWatcher(Configuration conf, String descriptor,
|
||||
Abortable abortable)
|
||||
throws IOException, ZooKeeperConnectionException {
|
||||
this.conf = conf;
|
||||
// Capture a stack trace now. Will print it out later if problem so we can
|
||||
// distingush amongst the myriad ZKWs.
|
||||
try {
|
||||
throw new Exception("ZKW CONSTRUCTOR STACK TRACE FOR DEBUGGING");
|
||||
} catch (Exception e) {
|
||||
this.constructorCaller = e;
|
||||
}
|
||||
this.quorum = ZKConfig.getZKQuorumServersString(conf);
|
||||
// Identifier will get the sessionid appended later below down when we
|
||||
// handle the syncconnect event.
|
||||
|
@ -270,6 +284,20 @@ public class ZooKeeperWatcher implements Watcher {
|
|||
case SyncConnected:
|
||||
// Update our identifier. Otherwise ignore.
|
||||
LOG.info(this.identifier + " connected");
|
||||
// Now, this callback can be invoked before the this.zookeeper is set.
|
||||
// Wait a little while.
|
||||
long finished = System.currentTimeMillis() +
|
||||
this.conf.getLong("hbase.zookeeper.watcher.sync.connected.wait", 2000);
|
||||
while (System.currentTimeMillis() < finished) {
|
||||
Threads.sleep(1);
|
||||
if (this.zooKeeper != null) break;
|
||||
}
|
||||
if (this.zooKeeper == null) {
|
||||
LOG.error("ZK is null on connection event -- see stack trace " +
|
||||
"for the stack trace when constructor was called on this zkw",
|
||||
this.constructorCaller);
|
||||
throw new NullPointerException("ZK is null");
|
||||
}
|
||||
this.identifier = this.identifier + "-0x" +
|
||||
Long.toHexString(this.zooKeeper.getSessionId());
|
||||
break;
|
||||
|
|
|
@ -85,6 +85,11 @@ public class HBaseTestingUtility {
|
|||
private final static Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
|
||||
private Configuration conf;
|
||||
private MiniZooKeeperCluster zkCluster = null;
|
||||
/**
|
||||
* Set if we were passed a zkCluster. If so, we won't shutdown zk as
|
||||
* part of general shutdown.
|
||||
*/
|
||||
private boolean passedZkCluster = false;
|
||||
private MiniDFSCluster dfsCluster = null;
|
||||
private MiniHBaseCluster hbaseCluster = null;
|
||||
private MiniMRCluster mrCluster = null;
|
||||
|
@ -248,6 +253,7 @@ public class HBaseTestingUtility {
|
|||
|
||||
private MiniZooKeeperCluster startMiniZKCluster(final File dir)
|
||||
throws Exception {
|
||||
this.passedZkCluster = false;
|
||||
if (this.zkCluster != null) {
|
||||
throw new IOException("Cluster already running at " + dir);
|
||||
}
|
||||
|
@ -408,7 +414,7 @@ public class HBaseTestingUtility {
|
|||
// Wait till hbase is down before going on to shutdown zk.
|
||||
this.hbaseCluster.join();
|
||||
}
|
||||
shutdownMiniZKCluster();
|
||||
if (!this.passedZkCluster) shutdownMiniZKCluster();
|
||||
if (this.dfsCluster != null) {
|
||||
// The below throws an exception per dn, AsynchronousCloseException.
|
||||
this.dfsCluster.shutdown();
|
||||
|
@ -985,6 +991,7 @@ public class HBaseTestingUtility {
|
|||
}
|
||||
|
||||
public void setZkCluster(MiniZooKeeperCluster zkCluster) {
|
||||
this.passedZkCluster = true;
|
||||
this.zkCluster = zkCluster;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,9 @@
|
|||
package org.apache.hadoop.hbase;
|
||||
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
@ -29,6 +31,11 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -62,6 +69,65 @@ public class TestHBaseTestingUtility {
|
|||
public void tearDown() throws Exception {
|
||||
}
|
||||
|
||||
/**
|
||||
* Basic sanity test that spins up multiple HDFS and HBase clusters that share
|
||||
* the same ZK ensemble. We then create the same table in both and make sure
|
||||
* that what we insert in one place doesn't end up in the other.
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test (timeout=180000)
|
||||
public void multiClusters() throws Exception {
|
||||
// Create three clusters
|
||||
|
||||
// Cluster 1.
|
||||
HBaseTestingUtility htu1 = new HBaseTestingUtility();
|
||||
// Set a different zk path for each cluster
|
||||
htu1.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
|
||||
htu1.startMiniZKCluster();
|
||||
|
||||
// Cluster 2
|
||||
HBaseTestingUtility htu2 = new HBaseTestingUtility();
|
||||
htu2.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
|
||||
htu2.setZkCluster(htu1.getZkCluster());
|
||||
|
||||
// Cluster 3.
|
||||
HBaseTestingUtility htu3 = new HBaseTestingUtility();
|
||||
htu3.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/3");
|
||||
htu3.setZkCluster(htu1.getZkCluster());
|
||||
|
||||
try {
|
||||
htu1.startMiniCluster();
|
||||
htu2.startMiniCluster();
|
||||
htu3.startMiniCluster();
|
||||
|
||||
final byte[] TABLE_NAME = Bytes.toBytes("test");
|
||||
final byte[] FAM_NAME = Bytes.toBytes("fam");
|
||||
final byte[] ROW = Bytes.toBytes("row");
|
||||
final byte[] QUAL_NAME = Bytes.toBytes("qual");
|
||||
final byte[] VALUE = Bytes.toBytes("value");
|
||||
|
||||
HTable table1 = htu1.createTable(TABLE_NAME, FAM_NAME);
|
||||
HTable table2 = htu2.createTable(TABLE_NAME, FAM_NAME);
|
||||
|
||||
Put put = new Put(ROW);
|
||||
put.add(FAM_NAME, QUAL_NAME, VALUE);
|
||||
table1.put(put);
|
||||
|
||||
Get get = new Get(ROW);
|
||||
get.addColumn(FAM_NAME, QUAL_NAME);
|
||||
Result res = table1.get(get);
|
||||
assertEquals(1, res.size());
|
||||
|
||||
res = table2.get(get);
|
||||
assertEquals(0, res.size());
|
||||
|
||||
} finally {
|
||||
htu3.shutdownMiniCluster();
|
||||
htu2.shutdownMiniCluster();
|
||||
htu1.shutdownMiniCluster();
|
||||
}
|
||||
}
|
||||
|
||||
@Test public void testMiniCluster() throws Exception {
|
||||
MiniHBaseCluster cluster = this.hbt.startMiniCluster();
|
||||
try {
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
|||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestReplication {
|
||||
|
@ -170,10 +169,8 @@ public class TestReplication {
|
|||
*/
|
||||
@AfterClass
|
||||
public static void tearDownAfterClass() throws Exception {
|
||||
/* REENABLE
|
||||
utility2.shutdownMiniCluster();
|
||||
utility1.shutdownMiniCluster();
|
||||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue