HBASE-2925 LRU of HConnectionManager.HBASE_INSTANCES breaks if HBaseConfiguration is changed
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@993078 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
72ddbced0c
commit
f0f9240de4
|
@ -503,6 +503,9 @@ Release 0.21.0 - Unreleased
|
||||||
HBASE-2943 major_compact (and other admin commands) broken for .META.
|
HBASE-2943 major_compact (and other admin commands) broken for .META.
|
||||||
HBASE-2643 Figure how to deal with eof splitting logs
|
HBASE-2643 Figure how to deal with eof splitting logs
|
||||||
(Nicolas Spiegelberg via Stack)
|
(Nicolas Spiegelberg via Stack)
|
||||||
|
HBASE-2925 LRU of HConnectionManager.HBASE_INSTANCES breaks if
|
||||||
|
HBaseConfiguration is changed
|
||||||
|
(Robert Mahfoud via Stack)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HBASE-1760 Cleanup TODOs in HTable
|
HBASE-1760 Cleanup TODOs in HTable
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase;
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
|
@ -87,57 +86,4 @@ public class HBaseConfiguration extends Configuration {
|
||||||
}
|
}
|
||||||
return conf;
|
return conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the hash code value for this HBaseConfiguration. The hash code of a
|
|
||||||
* HBaseConfiguration is defined by the xor of the hash codes of its entries.
|
|
||||||
*
|
|
||||||
* @see Configuration#iterator() How the entries are obtained.
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public int hashCode() {
|
|
||||||
return hashCode(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the hash code value for this HBaseConfiguration. The hash code of a
|
|
||||||
* Configuration is defined by the xor of the hash codes of its entries.
|
|
||||||
*
|
|
||||||
* @see Configuration#iterator() How the entries are obtained.
|
|
||||||
*/
|
|
||||||
public static int hashCode(Configuration conf) {
|
|
||||||
int hash = 0;
|
|
||||||
|
|
||||||
Iterator<Entry<String, String>> propertyIterator = conf.iterator();
|
|
||||||
while (propertyIterator.hasNext()) {
|
|
||||||
hash ^= propertyIterator.next().hashCode();
|
|
||||||
}
|
|
||||||
return hash;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean equals(Object obj) {
|
|
||||||
if (this == obj)
|
|
||||||
return true;
|
|
||||||
if (obj == null)
|
|
||||||
return false;
|
|
||||||
if (!(obj instanceof HBaseConfiguration))
|
|
||||||
return false;
|
|
||||||
HBaseConfiguration otherConf = (HBaseConfiguration) obj;
|
|
||||||
if (size() != otherConf.size()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
Iterator<Entry<String, String>> propertyIterator = this.iterator();
|
|
||||||
while (propertyIterator.hasNext()) {
|
|
||||||
Entry<String, String> entry = propertyIterator.next();
|
|
||||||
String key = entry.getKey();
|
|
||||||
String value = entry.getValue();
|
|
||||||
if (!value.equals(otherConf.getRaw(key))) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -408,7 +408,7 @@ public class HBaseAdmin implements Abortable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Delete cached information to prevent clients from using old locations
|
// Delete cached information to prevent clients from using old locations
|
||||||
HConnectionManager.deleteConnectionInfo(conf, false);
|
HConnectionManager.deleteConnection(conf, false);
|
||||||
LOG.info("Deleted " + Bytes.toString(tableName));
|
LOG.info("Deleted " + Bytes.toString(tableName));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,6 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Abortable;
|
import org.apache.hadoop.hbase.Abortable;
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HRegionLocation;
|
import org.apache.hadoop.hbase.HRegionLocation;
|
||||||
|
@ -74,9 +73,7 @@ import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A non-instantiable class that manages connections to multiple tables in
|
* A non-instantiable class that manages connections to tables.
|
||||||
* multiple HBase instances.
|
|
||||||
*
|
|
||||||
* Used by {@link HTable} and {@link HBaseAdmin}
|
* Used by {@link HTable} and {@link HBaseAdmin}
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("serial")
|
@SuppressWarnings("serial")
|
||||||
|
@ -91,44 +88,43 @@ public class HConnectionManager {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static final int MAX_CACHED_HBASE_INSTANCES = 31;
|
||||||
|
|
||||||
|
// A LRU Map of Configuration hashcode -> TableServers. We set instances to 31.
|
||||||
|
// The zk default max connections to the ensemble from the one client is 30 so
|
||||||
|
// should run into zk issues before hit this value of 31.
|
||||||
|
private static final Map<Configuration, TableServers> HBASE_INSTANCES =
|
||||||
|
new LinkedHashMap<Configuration, TableServers>
|
||||||
|
((int) (MAX_CACHED_HBASE_INSTANCES/0.75F)+1, 0.75F, true) {
|
||||||
|
@Override
|
||||||
|
protected boolean removeEldestEntry(Map.Entry<Configuration, TableServers> eldest) {
|
||||||
|
return size() > MAX_CACHED_HBASE_INSTANCES;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Not instantiable.
|
* Non-instantiable.
|
||||||
*/
|
*/
|
||||||
protected HConnectionManager() {
|
protected HConnectionManager() {
|
||||||
super();
|
super();
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final int MAX_CACHED_HBASE_INSTANCES=31;
|
|
||||||
// A LRU Map of master HBaseConfiguration -> connection information for that
|
|
||||||
// instance. The objects it contains are mutable and hence require
|
|
||||||
// synchronized access to them. We set instances to 31. The zk default max
|
|
||||||
// connections is 30 so should run into zk issues before hit this value of 31.
|
|
||||||
private static
|
|
||||||
final Map<Integer, TableServers> HBASE_INSTANCES =
|
|
||||||
new LinkedHashMap<Integer, TableServers>
|
|
||||||
((int) (MAX_CACHED_HBASE_INSTANCES/0.75F)+1, 0.75F, true) {
|
|
||||||
@Override
|
|
||||||
protected boolean removeEldestEntry(Map.Entry<Integer, TableServers> eldest) {
|
|
||||||
return size() > MAX_CACHED_HBASE_INSTANCES;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the connection object for the instance specified by the configuration
|
* Get the connection that goes with the passed <code>conf</code> configuration.
|
||||||
* If no current connection exists, create a new connection for that instance
|
* If no current connection exists, method creates a new connection for the
|
||||||
|
* passed <code>conf</code> instance.
|
||||||
* @param conf configuration
|
* @param conf configuration
|
||||||
* @return HConnection object for the instance specified by the configuration
|
* @return HConnection object for <code>conf</code>
|
||||||
* @throws ZooKeeperConnectionException
|
* @throws ZooKeeperConnectionException
|
||||||
*/
|
*/
|
||||||
public static HConnection getConnection(Configuration conf)
|
public static HConnection getConnection(Configuration conf)
|
||||||
throws ZooKeeperConnectionException {
|
throws ZooKeeperConnectionException {
|
||||||
TableServers connection;
|
TableServers connection;
|
||||||
Integer key = HBaseConfiguration.hashCode(conf);
|
|
||||||
synchronized (HBASE_INSTANCES) {
|
synchronized (HBASE_INSTANCES) {
|
||||||
connection = HBASE_INSTANCES.get(key);
|
connection = HBASE_INSTANCES.get(conf);
|
||||||
if (connection == null) {
|
if (connection == null) {
|
||||||
connection = new TableServers(conf);
|
connection = new TableServers(conf);
|
||||||
HBASE_INSTANCES.put(key, connection);
|
HBASE_INSTANCES.put(conf, connection);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return connection;
|
return connection;
|
||||||
|
@ -139,11 +135,9 @@ public class HConnectionManager {
|
||||||
* @param conf configuration
|
* @param conf configuration
|
||||||
* @param stopProxy stop the proxy as well
|
* @param stopProxy stop the proxy as well
|
||||||
*/
|
*/
|
||||||
public static void deleteConnectionInfo(Configuration conf,
|
public static void deleteConnection(Configuration conf, boolean stopProxy) {
|
||||||
boolean stopProxy) {
|
|
||||||
synchronized (HBASE_INSTANCES) {
|
synchronized (HBASE_INSTANCES) {
|
||||||
Integer key = HBaseConfiguration.hashCode(conf);
|
TableServers t = HBASE_INSTANCES.remove(conf);
|
||||||
TableServers t = HBASE_INSTANCES.remove(key);
|
|
||||||
if (t != null) {
|
if (t != null) {
|
||||||
t.close(stopProxy);
|
t.close(stopProxy);
|
||||||
}
|
}
|
||||||
|
@ -172,7 +166,8 @@ public class HConnectionManager {
|
||||||
* @throws ZooKeeperConnectionException
|
* @throws ZooKeeperConnectionException
|
||||||
*/
|
*/
|
||||||
static int getCachedRegionCount(Configuration conf,
|
static int getCachedRegionCount(Configuration conf,
|
||||||
byte[] tableName) throws ZooKeeperConnectionException {
|
byte[] tableName)
|
||||||
|
throws ZooKeeperConnectionException {
|
||||||
TableServers connection = (TableServers)getConnection(conf);
|
TableServers connection = (TableServers)getConnection(conf);
|
||||||
return connection.getNumberOfCachedRegionLocations(tableName);
|
return connection.getNumberOfCachedRegionLocations(tableName);
|
||||||
}
|
}
|
||||||
|
@ -189,7 +184,7 @@ public class HConnectionManager {
|
||||||
return connection.isRegionCached(tableName, row);
|
return connection.isRegionCached(tableName, row);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Encapsulates finding the servers for an HBase instance */
|
/* Encapsulates connection to zookeeper and regionservers.*/
|
||||||
static class TableServers implements ServerConnection, Abortable {
|
static class TableServers implements ServerConnection, Abortable {
|
||||||
static final Log LOG = LogFactory.getLog(TableServers.class);
|
static final Log LOG = LogFactory.getLog(TableServers.class);
|
||||||
private final Class<? extends HRegionInterface> serverInterfaceClass;
|
private final Class<? extends HRegionInterface> serverInterfaceClass;
|
||||||
|
@ -212,7 +207,7 @@ public class HConnectionManager {
|
||||||
private final Object metaRegionLock = new Object();
|
private final Object metaRegionLock = new Object();
|
||||||
private final Object userRegionLock = new Object();
|
private final Object userRegionLock = new Object();
|
||||||
|
|
||||||
private volatile Configuration conf;
|
private final Configuration conf;
|
||||||
|
|
||||||
// Known region HServerAddress.toString() -> HRegionInterface
|
// Known region HServerAddress.toString() -> HRegionInterface
|
||||||
private final Map<String, HRegionInterface> servers =
|
private final Map<String, HRegionInterface> servers =
|
||||||
|
|
|
@ -60,12 +60,25 @@ import java.io.DataOutput;
|
||||||
/**
|
/**
|
||||||
* Used to communicate with a single HBase table.
|
* Used to communicate with a single HBase table.
|
||||||
*
|
*
|
||||||
* This class is not thread safe for writes.
|
* This class is not thread safe for updates; the underlying write buffer can
|
||||||
* Gets, puts, and deletes take out a row lock for the duration
|
* be corrupted if multiple threads contend over a single HTable instance.
|
||||||
* of their operation. Scans (currently) do not respect
|
|
||||||
* row locking.
|
|
||||||
*
|
*
|
||||||
* See {@link HBaseAdmin} to create, drop, list, enable and disable tables.
|
* <p>Instances of HTable passed the same {@link Configuration} instance will
|
||||||
|
* share connections to master and the zookeeper ensemble as well as caches of
|
||||||
|
* region locations. This happens because they will all share the same
|
||||||
|
* {@link HConnection} instance (internally we keep a Map of {@link HConnection}
|
||||||
|
* instances keyed by {@link Configuration}).
|
||||||
|
* {@link HConnection} will read most of the
|
||||||
|
* configuration it needs from the passed {@link Configuration} on initial
|
||||||
|
* construction. Thereafter, for settings such as
|
||||||
|
* <code>hbase.client.pause</code>, <code>hbase.client.retries.number</code>,
|
||||||
|
* and <code>hbase.client.rpc.maxattempts</code> updating their values in the
|
||||||
|
* passed {@link Configuration} subsequent to {@link HConnection} construction
|
||||||
|
* will go unnoticed. To run with changed values, make a new
|
||||||
|
* {@link HTable} passing a new {@link Configuration} instance that has the
|
||||||
|
* new configuration.
|
||||||
|
*
|
||||||
|
* @see HBaseAdmin for create, drop, list, enable and disable of tables.
|
||||||
*/
|
*/
|
||||||
public class HTable implements HTableInterface {
|
public class HTable implements HTableInterface {
|
||||||
private final HConnection connection;
|
private final HConnection connection;
|
||||||
|
@ -83,9 +96,13 @@ public class HTable implements HTableInterface {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates an object to access a HBase table.
|
* Creates an object to access a HBase table.
|
||||||
*
|
* Internally it creates a new instance of {@link Configuration} and a new
|
||||||
* @param tableName Name of the table.
|
* client to zookeeper as well as other resources. It also comes up with
|
||||||
|
* a fresh view of the cluster and must do discovery from scratch of region
|
||||||
|
* locations; i.e. it will not make use of already-cached region locations if
|
||||||
|
* available. Use only when being quick and dirty.
|
||||||
* @throws IOException if a remote or network exception occurs
|
* @throws IOException if a remote or network exception occurs
|
||||||
|
* @see #HTable(Configuration, String)
|
||||||
*/
|
*/
|
||||||
public HTable(final String tableName)
|
public HTable(final String tableName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -94,9 +111,14 @@ public class HTable implements HTableInterface {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates an object to access a HBase table.
|
* Creates an object to access a HBase table.
|
||||||
*
|
* Internally it creates a new instance of {@link Configuration} and a new
|
||||||
|
* client to zookeeper as well as other resources. It also comes up with
|
||||||
|
* a fresh view of the cluster and must do discovery from scratch of region
|
||||||
|
* locations; i.e. it will not make use of already-cached region locations if
|
||||||
|
* available. Use only when being quick and dirty.
|
||||||
* @param tableName Name of the table.
|
* @param tableName Name of the table.
|
||||||
* @throws IOException if a remote or network exception occurs
|
* @throws IOException if a remote or network exception occurs
|
||||||
|
* @see #HTable(Configuration, String)
|
||||||
*/
|
*/
|
||||||
public HTable(final byte [] tableName)
|
public HTable(final byte [] tableName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -105,7 +127,10 @@ public class HTable implements HTableInterface {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates an object to access a HBase table.
|
* Creates an object to access a HBase table.
|
||||||
*
|
* Shares zookeeper connection and other resources with other HTable instances
|
||||||
|
* created with the same <code>conf</code> instance. Uses already-populated
|
||||||
|
* region cache if one is available, populated by any other HTable instances
|
||||||
|
* sharing this <code>conf</code> instance. Recommended.
|
||||||
* @param conf Configuration object to use.
|
* @param conf Configuration object to use.
|
||||||
* @param tableName Name of the table.
|
* @param tableName Name of the table.
|
||||||
* @throws IOException if a remote or network exception occurs
|
* @throws IOException if a remote or network exception occurs
|
||||||
|
@ -118,7 +143,10 @@ public class HTable implements HTableInterface {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates an object to access a HBase table.
|
* Creates an object to access a HBase table.
|
||||||
*
|
* Shares zookeeper connection and other resources with other HTable instances
|
||||||
|
* created with the same <code>conf</code> instance. Uses already-populated
|
||||||
|
* region cache if one is available, populated by any other HTable instances
|
||||||
|
* sharing this <code>conf</code> instance. Recommended.
|
||||||
* @param conf Configuration object to use.
|
* @param conf Configuration object to use.
|
||||||
* @param tableName Name of the table.
|
* @param tableName Name of the table.
|
||||||
* @throws IOException if a remote or network exception occurs
|
* @throws IOException if a remote or network exception occurs
|
||||||
|
@ -722,6 +750,10 @@ public class HTable implements HTableInterface {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Close down this HTable instance.
|
||||||
|
* Calls {@link #flushCommits()}.
|
||||||
|
*/
|
||||||
public void close() throws IOException{
|
public void close() throws IOException{
|
||||||
flushCommits();
|
flushCommits();
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,6 @@ import java.io.IOException;
|
||||||
* @since 0.21.0
|
* @since 0.21.0
|
||||||
*/
|
*/
|
||||||
public class HTableFactory implements HTableInterfaceFactory {
|
public class HTableFactory implements HTableInterfaceFactory {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public HTableInterface createHTableInterface(Configuration config,
|
public HTableInterface createHTableInterface(Configuration config,
|
||||||
byte[] tableName) {
|
byte[] tableName) {
|
||||||
|
@ -47,9 +46,5 @@ public class HTableFactory implements HTableInterfaceFactory {
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
throw new RuntimeException(ioe);
|
throw new RuntimeException(ioe);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
|
|
@ -106,7 +106,7 @@ class HMerge {
|
||||||
HConnection connection = HConnectionManager.getConnection(conf);
|
HConnection connection = HConnectionManager.getConnection(conf);
|
||||||
masterIsRunning = connection.isMasterRunning();
|
masterIsRunning = connection.isMasterRunning();
|
||||||
}
|
}
|
||||||
HConnectionManager.deleteConnectionInfo(conf, false);
|
HConnectionManager.deleteConnection(conf, false);
|
||||||
if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
|
if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
|
||||||
if (masterIsRunning) {
|
if (masterIsRunning) {
|
||||||
throw new IllegalStateException(
|
throw new IllegalStateException(
|
||||||
|
|
|
@ -172,7 +172,7 @@ public abstract class HBaseClusterTestCase extends HBaseTestCase {
|
||||||
}
|
}
|
||||||
super.tearDown();
|
super.tearDown();
|
||||||
try {
|
try {
|
||||||
HConnectionManager.deleteConnectionInfo(conf, true);
|
HConnectionManager.deleteConnection(conf, true);
|
||||||
if (this.cluster != null) {
|
if (this.cluster != null) {
|
||||||
try {
|
try {
|
||||||
this.cluster.shutdown();
|
this.cluster.shutdown();
|
||||||
|
|
|
@ -19,11 +19,25 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.client;
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
|
import java.lang.reflect.Field;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Random;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HRegionLocation;
|
import org.apache.hadoop.hbase.HRegionLocation;
|
||||||
|
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.Assert;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.mortbay.log.Log;
|
||||||
|
|
||||||
import static org.junit.Assert.assertNull;
|
import static org.junit.Assert.assertNull;
|
||||||
import static org.junit.Assert.assertNotNull;
|
import static org.junit.Assert.assertNotNull;
|
||||||
|
@ -32,7 +46,6 @@ import static org.junit.Assert.assertNotNull;
|
||||||
* This class is for testing HCM features
|
* This class is for testing HCM features
|
||||||
*/
|
*/
|
||||||
public class TestHCM {
|
public class TestHCM {
|
||||||
|
|
||||||
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||||
private static final byte[] TABLE_NAME = Bytes.toBytes("test");
|
private static final byte[] TABLE_NAME = Bytes.toBytes("test");
|
||||||
private static final byte[] FAM_NAM = Bytes.toBytes("f");
|
private static final byte[] FAM_NAM = Bytes.toBytes("f");
|
||||||
|
@ -43,6 +56,85 @@ public class TestHCM {
|
||||||
TEST_UTIL.startMiniCluster(1);
|
TEST_UTIL.startMiniCluster(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@AfterClass public static void tearDownAfterClass() throws Exception {
|
||||||
|
TEST_UTIL.shutdownMiniCluster();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws InterruptedException
|
||||||
|
* @throws IllegalAccessException
|
||||||
|
* @throws NoSuchFieldException
|
||||||
|
* @throws ZooKeeperConnectionException
|
||||||
|
* @throws IllegalArgumentException
|
||||||
|
* @throws SecurityException
|
||||||
|
* @see https://issues.apache.org/jira/browse/HBASE-2925
|
||||||
|
*/
|
||||||
|
@Test public void testManyNewConnectionsDoesnotOOME()
|
||||||
|
throws SecurityException, IllegalArgumentException,
|
||||||
|
ZooKeeperConnectionException, NoSuchFieldException, IllegalAccessException,
|
||||||
|
InterruptedException {
|
||||||
|
createNewConfigurations();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Random _randy = new Random();
|
||||||
|
|
||||||
|
public static void createNewConfigurations() throws SecurityException,
|
||||||
|
IllegalArgumentException, NoSuchFieldException,
|
||||||
|
IllegalAccessException, InterruptedException, ZooKeeperConnectionException {
|
||||||
|
HConnection last = null;
|
||||||
|
for (int i = 0; i <= (HConnectionManager.MAX_CACHED_HBASE_INSTANCES * 2); i++) {
|
||||||
|
// set random key to differentiate the connection from previous ones
|
||||||
|
Configuration configuration = HBaseConfiguration.create();
|
||||||
|
configuration.set("somekey", String.valueOf(_randy.nextInt()));
|
||||||
|
System.out.println("Hash Code: " + configuration.hashCode());
|
||||||
|
HConnection connection =
|
||||||
|
HConnectionManager.getConnection(configuration);
|
||||||
|
if (last != null) {
|
||||||
|
if (last == connection) {
|
||||||
|
System.out.println("!! Got same connection for once !!");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// change the configuration once, and the cached connection is lost forever:
|
||||||
|
// the hashtable holding the cache won't be able to find its own keys
|
||||||
|
// to remove them, so the LRU strategy does not work.
|
||||||
|
configuration.set("someotherkey", String.valueOf(_randy.nextInt()));
|
||||||
|
last = connection;
|
||||||
|
Log.info("Cache Size: "
|
||||||
|
+ getHConnectionManagerCacheSize() + ", Valid Keys: "
|
||||||
|
+ getValidKeyCount());
|
||||||
|
Thread.sleep(100);
|
||||||
|
}
|
||||||
|
Assert.assertEquals(HConnectionManager.MAX_CACHED_HBASE_INSTANCES,
|
||||||
|
getHConnectionManagerCacheSize());
|
||||||
|
Assert.assertEquals(HConnectionManager.MAX_CACHED_HBASE_INSTANCES,
|
||||||
|
getValidKeyCount());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static int getHConnectionManagerCacheSize()
|
||||||
|
throws SecurityException, NoSuchFieldException,
|
||||||
|
IllegalArgumentException, IllegalAccessException {
|
||||||
|
Field cacheField =
|
||||||
|
HConnectionManager.class.getDeclaredField("HBASE_INSTANCES");
|
||||||
|
cacheField.setAccessible(true);
|
||||||
|
Map<?, ?> cache = (Map<?, ?>) cacheField.get(null);
|
||||||
|
return cache.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static int getValidKeyCount() throws SecurityException,
|
||||||
|
NoSuchFieldException, IllegalArgumentException,
|
||||||
|
IllegalAccessException {
|
||||||
|
Field cacheField =
|
||||||
|
HConnectionManager.class.getDeclaredField("HBASE_INSTANCES");
|
||||||
|
cacheField.setAccessible(true);
|
||||||
|
Map<?, ?> cache = (Map<?, ?>) cacheField.get(null);
|
||||||
|
List<Object> keys = new ArrayList<Object>(cache.keySet());
|
||||||
|
Set<Object> values = new HashSet<Object>();
|
||||||
|
for (Object key : keys) {
|
||||||
|
values.add(cache.get(key));
|
||||||
|
}
|
||||||
|
return values.size();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test that when we delete a location using the first row of a region
|
* Test that when we delete a location using the first row of a region
|
||||||
* that we really delete it.
|
* that we really delete it.
|
||||||
|
@ -62,5 +154,4 @@ public class TestHCM {
|
||||||
HRegionLocation rl = conn.getCachedLocation(TABLE_NAME, ROW);
|
HRegionLocation rl = conn.getCachedLocation(TABLE_NAME, ROW);
|
||||||
assertNull("What is this location?? " + rl, rl);
|
assertNull("What is this location?? " + rl, rl);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,7 +50,7 @@ public class DisabledTestMetaUtils extends HBaseClusterTestCase {
|
||||||
utils.deleteColumn(editTable, Bytes.toBytes(oldColumn));
|
utils.deleteColumn(editTable, Bytes.toBytes(oldColumn));
|
||||||
utils.shutdown();
|
utils.shutdown();
|
||||||
// Delete again so we go get it all fresh.
|
// Delete again so we go get it all fresh.
|
||||||
HConnectionManager.deleteConnectionInfo(conf, false);
|
HConnectionManager.deleteConnection(conf, false);
|
||||||
// Now assert columns were added and deleted.
|
// Now assert columns were added and deleted.
|
||||||
this.cluster = new MiniHBaseCluster(this.conf, 1);
|
this.cluster = new MiniHBaseCluster(this.conf, 1);
|
||||||
// Now assert columns were added and deleted.
|
// Now assert columns were added and deleted.
|
||||||
|
|
Loading…
Reference in New Issue