HBASE-25307 ThreadLocal pooling leads to NullPointerException (#2709)
* PoolMap does not discard any elements anymore. If an element is put, it always stores it. The reason: it stores expensive resources (rpc connections) which would lead to resource leak if we simple discard it. RpcClients can reference netty ByteBufs which are reference counted. Resource cleanup is done by AbstractRpcClient.cleanupIdleConnections(). * PoolMap does not implement Map interface anymore, so ensuring thread-safety has become easier. Put method is replaced with getOrCreate(). * ThreadLocalPool doesn't use ThreadLocal class anymore. It stores resources on thread basis, but it doesn't remove values when a thread exits. Again, proper cleanup is done by cleanupIdleConnections(). Signed-off-by: Sean Busbey <busbey@apache.org> Signed-off-by: Wellington Chevreuil <wellington.chevreuil@gmail.com>
This commit is contained in:
parent
d6d0a20247
commit
b75b772bc6
@ -100,7 +100,7 @@ public abstract class AbstractRpcClient<T extends RpcConnection> implements RpcC
|
|||||||
new ThreadFactoryBuilder().setNameFormat("Idle-Rpc-Conn-Sweeper-pool-%d").setDaemon(true)
|
new ThreadFactoryBuilder().setNameFormat("Idle-Rpc-Conn-Sweeper-pool-%d").setDaemon(true)
|
||||||
.setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
|
.setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
|
||||||
|
|
||||||
protected boolean running = true; // if client runs
|
private boolean running = true; // if client runs
|
||||||
|
|
||||||
protected final Configuration conf;
|
protected final Configuration conf;
|
||||||
protected final String clusterId;
|
protected final String clusterId;
|
||||||
@ -126,7 +126,7 @@ public abstract class AbstractRpcClient<T extends RpcConnection> implements RpcC
|
|||||||
protected final int readTO;
|
protected final int readTO;
|
||||||
protected final int writeTO;
|
protected final int writeTO;
|
||||||
|
|
||||||
protected final PoolMap<ConnectionId, T> connections;
|
private final PoolMap<ConnectionId, T> connections;
|
||||||
|
|
||||||
private final AtomicInteger callIdCnt = new AtomicInteger(0);
|
private final AtomicInteger callIdCnt = new AtomicInteger(0);
|
||||||
|
|
||||||
@ -208,7 +208,7 @@ public abstract class AbstractRpcClient<T extends RpcConnection> implements RpcC
|
|||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace("Cleanup idle connection to {}", conn.remoteId().address);
|
LOG.trace("Cleanup idle connection to {}", conn.remoteId().address);
|
||||||
}
|
}
|
||||||
connections.removeValue(conn.remoteId(), conn);
|
connections.remove(conn.remoteId(), conn);
|
||||||
conn.cleanupConnection();
|
conn.cleanupConnection();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -291,7 +291,14 @@ public abstract class AbstractRpcClient<T extends RpcConnection> implements RpcC
|
|||||||
* @return the maximum pool size
|
* @return the maximum pool size
|
||||||
*/
|
*/
|
||||||
private static int getPoolSize(Configuration config) {
|
private static int getPoolSize(Configuration config) {
|
||||||
return config.getInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, 1);
|
int poolSize = config.getInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, 1);
|
||||||
|
|
||||||
|
if (poolSize <= 0) {
|
||||||
|
LOG.warn("{} must be positive. Using default value: 1", HConstants.HBASE_CLIENT_IPC_POOL_SIZE);
|
||||||
|
return 1;
|
||||||
|
} else {
|
||||||
|
return poolSize;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private int nextCallId() {
|
private int nextCallId() {
|
||||||
@ -347,11 +354,7 @@ public abstract class AbstractRpcClient<T extends RpcConnection> implements RpcC
|
|||||||
if (!running) {
|
if (!running) {
|
||||||
throw new StoppedRpcClientException();
|
throw new StoppedRpcClientException();
|
||||||
}
|
}
|
||||||
conn = connections.get(remoteId);
|
conn = connections.getOrCreate(remoteId, () -> createConnection(remoteId));
|
||||||
if (conn == null) {
|
|
||||||
conn = createConnection(remoteId);
|
|
||||||
connections.put(remoteId, conn);
|
|
||||||
}
|
|
||||||
conn.setLastTouched(EnvironmentEdgeManager.currentTime());
|
conn.setLastTouched(EnvironmentEdgeManager.currentTime());
|
||||||
}
|
}
|
||||||
return conn;
|
return conn;
|
||||||
@ -450,7 +453,7 @@ public abstract class AbstractRpcClient<T extends RpcConnection> implements RpcC
|
|||||||
&& remoteId.address.getHostName().equals(sn.getHostname())) {
|
&& remoteId.address.getHostName().equals(sn.getHostname())) {
|
||||||
LOG.info("The server on " + sn.toString() + " is dead - stopping the connection "
|
LOG.info("The server on " + sn.toString() + " is dead - stopping the connection "
|
||||||
+ connection.remoteId);
|
+ connection.remoteId);
|
||||||
connections.removeValue(remoteId, connection);
|
connections.remove(remoteId, connection);
|
||||||
connection.shutdown();
|
connection.shutdown();
|
||||||
connection.cleanupConnection();
|
connection.cleanupConnection();
|
||||||
}
|
}
|
||||||
|
@ -343,13 +343,13 @@ class BlockingRpcConnection extends RpcConnection implements Runnable {
|
|||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace(threadName + ": starting, connections " + this.rpcClient.connections.size());
|
LOG.trace(threadName + ": starting");
|
||||||
}
|
}
|
||||||
while (waitForWork()) {
|
while (waitForWork()) {
|
||||||
readResponse();
|
readResponse();
|
||||||
}
|
}
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace(threadName + ": stopped, connections " + this.rpcClient.connections.size());
|
LOG.trace(threadName + ": stopped");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,17 +18,14 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.util;
|
package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Objects;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.CopyOnWriteArrayList;
|
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
|
||||||
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
@ -45,177 +42,102 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||||||
* key. A size of {@link Integer#MAX_VALUE} is interpreted as an unbounded pool.
|
* key. A size of {@link Integer#MAX_VALUE} is interpreted as an unbounded pool.
|
||||||
* </p>
|
* </p>
|
||||||
*
|
*
|
||||||
|
* <p>
|
||||||
|
* PoolMap is thread-safe. It does not remove elements automatically. Unused resources
|
||||||
|
* must be closed and removed explicitly.
|
||||||
|
* </p>
|
||||||
|
*
|
||||||
* @param <K>
|
* @param <K>
|
||||||
* the type of the key to the resource
|
* the type of the key to the resource
|
||||||
* @param <V>
|
* @param <V>
|
||||||
* the type of the resource being pooled
|
* the type of the resource being pooled
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class PoolMap<K, V> implements Map<K, V> {
|
public class PoolMap<K, V> {
|
||||||
private PoolType poolType;
|
private final Map<K, Pool<V>> pools;
|
||||||
|
private final PoolType poolType;
|
||||||
|
private final int poolMaxSize;
|
||||||
|
|
||||||
private int poolMaxSize;
|
public PoolMap(PoolType poolType, int poolMaxSize) {
|
||||||
|
pools = new HashMap<>();
|
||||||
private Map<K, Pool<V>> pools = new ConcurrentHashMap<>();
|
this.poolType = poolType;
|
||||||
|
this.poolMaxSize = poolMaxSize;
|
||||||
public PoolMap(PoolType poolType) {
|
|
||||||
this.poolType = poolType;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public PoolMap(PoolType poolType, int poolMaxSize) {
|
public V getOrCreate(K key, PoolResourceSupplier<V> supplier) throws IOException {
|
||||||
this.poolType = poolType;
|
synchronized (pools) {
|
||||||
this.poolMaxSize = poolMaxSize;
|
Pool<V> pool = pools.get(key);
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
if (pool == null) {
|
||||||
public V get(Object key) {
|
pool = createPool();
|
||||||
Pool<V> pool = pools.get(key);
|
pools.put(key, pool);
|
||||||
return pool != null ? pool.get() : null;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
try {
|
||||||
public V put(K key, V value) {
|
return pool.getOrCreate(supplier);
|
||||||
Pool<V> pool = pools.get(key);
|
} catch (IOException | RuntimeException | Error e) {
|
||||||
if (pool == null) {
|
if (pool.size() == 0) {
|
||||||
pools.put(key, pool = createPool());
|
pools.remove(key);
|
||||||
}
|
}
|
||||||
return pool != null ? pool.put(value) : null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
throw e;
|
||||||
@Override
|
}
|
||||||
public V remove(Object key) {
|
}
|
||||||
Pool<V> pool = pools.remove(key);
|
|
||||||
if (pool != null) {
|
|
||||||
removeValue((K) key, pool.get());
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
}
|
||||||
|
public boolean remove(K key, V value) {
|
||||||
|
synchronized (pools) {
|
||||||
|
Pool<V> pool = pools.get(key);
|
||||||
|
|
||||||
public boolean removeValue(K key, V value) {
|
if (pool == null) {
|
||||||
Pool<V> pool = pools.get(key);
|
return false;
|
||||||
boolean res = false;
|
}
|
||||||
if (pool != null) {
|
|
||||||
res = pool.remove(value);
|
boolean removed = pool.remove(value);
|
||||||
if (res && pool.size() == 0) {
|
|
||||||
|
if (removed && pool.size() == 0) {
|
||||||
pools.remove(key);
|
pools.remove(key);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
return removed;
|
||||||
public Collection<V> values() {
|
|
||||||
Collection<V> values = new ArrayList<>();
|
|
||||||
for (Pool<V> pool : pools.values()) {
|
|
||||||
Collection<V> poolValues = pool.values();
|
|
||||||
if (poolValues != null) {
|
|
||||||
values.addAll(poolValues);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return values;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Collection<V> values(K key) {
|
|
||||||
Collection<V> values = new ArrayList<>();
|
|
||||||
Pool<V> pool = pools.get(key);
|
|
||||||
if (pool != null) {
|
|
||||||
Collection<V> poolValues = pool.values();
|
|
||||||
if (poolValues != null) {
|
|
||||||
values.addAll(poolValues);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return values;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isEmpty() {
|
|
||||||
return pools.isEmpty();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int size() {
|
|
||||||
return pools.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
public int size(K key) {
|
|
||||||
Pool<V> pool = pools.get(key);
|
|
||||||
return pool != null ? pool.size() : 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean containsKey(Object key) {
|
|
||||||
return pools.containsKey(key);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean containsValue(Object value) {
|
|
||||||
if (value == null) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
for (Pool<V> pool : pools.values()) {
|
|
||||||
if (value.equals(pool.get())) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void putAll(Map<? extends K, ? extends V> map) {
|
|
||||||
for (Map.Entry<? extends K, ? extends V> entry : map.entrySet()) {
|
|
||||||
put(entry.getKey(), entry.getValue());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
public List<V> values() {
|
||||||
public void clear() {
|
List<V> values = new ArrayList<>();
|
||||||
for (Pool<V> pool : pools.values()) {
|
|
||||||
pool.clear();
|
|
||||||
}
|
|
||||||
pools.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
synchronized (pools) {
|
||||||
public Set<K> keySet() {
|
for (Pool<V> pool : pools.values()) {
|
||||||
return pools.keySet();
|
Collection<V> poolValues = pool.values();
|
||||||
}
|
if (poolValues != null) {
|
||||||
|
values.addAll(poolValues);
|
||||||
@Override
|
|
||||||
public Set<Map.Entry<K, V>> entrySet() {
|
|
||||||
Set<Map.Entry<K, V>> entries = new HashSet<>();
|
|
||||||
for (Map.Entry<K, Pool<V>> poolEntry : pools.entrySet()) {
|
|
||||||
final K poolKey = poolEntry.getKey();
|
|
||||||
final Pool<V> pool = poolEntry.getValue();
|
|
||||||
if (pool != null) {
|
|
||||||
for (final V poolValue : pool.values()) {
|
|
||||||
entries.add(new Map.Entry<K, V>() {
|
|
||||||
@Override
|
|
||||||
public K getKey() {
|
|
||||||
return poolKey;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public V getValue() {
|
|
||||||
return poolValue;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public V setValue(V value) {
|
|
||||||
return pool.put(value);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return entries;
|
|
||||||
|
return values;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void clear() {
|
||||||
|
synchronized (pools) {
|
||||||
|
for (Pool<V> pool : pools.values()) {
|
||||||
|
pool.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
pools.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public interface PoolResourceSupplier<R> {
|
||||||
|
R get() throws IOException;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected static <V> V createResource(PoolResourceSupplier<V> supplier) throws IOException {
|
||||||
|
V resource = supplier.get();
|
||||||
|
return Objects.requireNonNull(resource, "resource cannot be null.");
|
||||||
}
|
}
|
||||||
|
|
||||||
protected interface Pool<R> {
|
protected interface Pool<R> {
|
||||||
R get();
|
R getOrCreate(PoolResourceSupplier<R> supplier) throws IOException;
|
||||||
|
|
||||||
R put(R resource);
|
|
||||||
|
|
||||||
boolean remove(R resource);
|
boolean remove(R resource);
|
||||||
|
|
||||||
@ -254,8 +176,9 @@ public class PoolMap<K, V> implements Map<K, V> {
|
|||||||
return new RoundRobinPool<>(poolMaxSize);
|
return new RoundRobinPool<>(poolMaxSize);
|
||||||
case ThreadLocal:
|
case ThreadLocal:
|
||||||
return new ThreadLocalPool<>();
|
return new ThreadLocalPool<>();
|
||||||
|
default:
|
||||||
|
return new RoundRobinPool<>(poolMaxSize);
|
||||||
}
|
}
|
||||||
return null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -275,43 +198,66 @@ public class PoolMap<K, V> implements Map<K, V> {
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("serial")
|
@SuppressWarnings("serial")
|
||||||
static class RoundRobinPool<R> extends CopyOnWriteArrayList<R> implements Pool<R> {
|
static class RoundRobinPool<R> implements Pool<R> {
|
||||||
private int maxSize;
|
private final List<R> resources;
|
||||||
private int nextResource = 0;
|
private final int maxSize;
|
||||||
|
|
||||||
|
private int nextIndex;
|
||||||
|
|
||||||
public RoundRobinPool(int maxSize) {
|
public RoundRobinPool(int maxSize) {
|
||||||
|
if (maxSize <= 0) {
|
||||||
|
throw new IllegalArgumentException("maxSize must be positive");
|
||||||
|
}
|
||||||
|
|
||||||
|
resources = new ArrayList<>(maxSize);
|
||||||
this.maxSize = maxSize;
|
this.maxSize = maxSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public R put(R resource) {
|
public R getOrCreate(PoolResourceSupplier<R> supplier) throws IOException {
|
||||||
if (super.size() < maxSize) {
|
int size = resources.size();
|
||||||
add(resource);
|
R resource;
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
/* letting pool to grow */
|
||||||
public R get() {
|
if (size < maxSize) {
|
||||||
if (super.size() < maxSize) {
|
resource = createResource(supplier);
|
||||||
return null;
|
resources.add(resource);
|
||||||
|
} else {
|
||||||
|
resource = resources.get(nextIndex);
|
||||||
|
|
||||||
|
/* at this point size cannot be 0 */
|
||||||
|
nextIndex = (nextIndex + 1) % size;
|
||||||
}
|
}
|
||||||
nextResource %= super.size();
|
|
||||||
R resource = get(nextResource++);
|
|
||||||
return resource;
|
return resource;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Collection<R> values() {
|
public boolean remove(R resource) {
|
||||||
return this;
|
return resources.remove(resource);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void clear() {
|
||||||
|
resources.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Collection<R> values() {
|
||||||
|
return resources;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int size() {
|
||||||
|
return resources.size();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The <code>ThreadLocalPool</code> represents a {@link PoolMap.Pool} that
|
* The <code>ThreadLocalPool</code> represents a {@link PoolMap.Pool} that
|
||||||
* builds on the {@link ThreadLocal} class. It essentially binds the resource
|
* works similarly to {@link ThreadLocal} class. It essentially binds the resource
|
||||||
* to the thread from which it is accessed.
|
* to the thread from which it is accessed. It doesn't remove resources when a thread exits,
|
||||||
|
* those resources must be closed manually.
|
||||||
*
|
*
|
||||||
* <p>
|
* <p>
|
||||||
* Note that the size of the pool is essentially bounded by the number of threads
|
* Note that the size of the pool is essentially bounded by the number of threads
|
||||||
@ -321,62 +267,45 @@ public class PoolMap<K, V> implements Map<K, V> {
|
|||||||
* @param <R>
|
* @param <R>
|
||||||
* the type of the resource
|
* the type of the resource
|
||||||
*/
|
*/
|
||||||
static class ThreadLocalPool<R> extends ThreadLocal<R> implements Pool<R> {
|
static class ThreadLocalPool<R> implements Pool<R> {
|
||||||
private static final Map<ThreadLocalPool<?>, AtomicInteger> poolSizes = new HashMap<>();
|
private final Map<Thread, R> resources;
|
||||||
|
|
||||||
public ThreadLocalPool() {
|
public ThreadLocalPool() {
|
||||||
|
resources = new HashMap<>();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public R put(R resource) {
|
public R getOrCreate(PoolResourceSupplier<R> supplier) throws IOException {
|
||||||
R previousResource = get();
|
Thread myself = Thread.currentThread();
|
||||||
if (previousResource == null) {
|
R resource = resources.get(myself);
|
||||||
AtomicInteger poolSize = poolSizes.get(this);
|
|
||||||
if (poolSize == null) {
|
if (resource == null) {
|
||||||
poolSizes.put(this, poolSize = new AtomicInteger(0));
|
resource = createResource(supplier);
|
||||||
}
|
resources.put(myself, resource);
|
||||||
poolSize.incrementAndGet();
|
|
||||||
}
|
}
|
||||||
this.set(resource);
|
|
||||||
return previousResource;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
return resource;
|
||||||
public void remove() {
|
|
||||||
super.remove();
|
|
||||||
AtomicInteger poolSize = poolSizes.get(this);
|
|
||||||
if (poolSize != null) {
|
|
||||||
poolSize.decrementAndGet();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int size() {
|
|
||||||
AtomicInteger poolSize = poolSizes.get(this);
|
|
||||||
return poolSize != null ? poolSize.get() : 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean remove(R resource) {
|
public boolean remove(R resource) {
|
||||||
R previousResource = super.get();
|
/* remove can be called from any thread */
|
||||||
if (resource != null && resource.equals(previousResource)) {
|
return resources.values().remove(resource);
|
||||||
remove();
|
}
|
||||||
return true;
|
|
||||||
} else {
|
@Override
|
||||||
return false;
|
public int size() {
|
||||||
}
|
return resources.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void clear() {
|
public void clear() {
|
||||||
super.remove();
|
resources.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Collection<R> values() {
|
public Collection<R> values() {
|
||||||
List<R> values = new ArrayList<>();
|
return resources.values();
|
||||||
values.add(get());
|
|
||||||
return values;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,9 +17,13 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.util;
|
package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Objects;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
import org.apache.hadoop.hbase.util.PoolMap.PoolType;
|
import org.apache.hadoop.hbase.util.PoolMap.PoolType;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
@ -28,6 +32,7 @@ public abstract class PoolMapTestBase {
|
|||||||
|
|
||||||
protected PoolMap<String, String> poolMap;
|
protected PoolMap<String, String> poolMap;
|
||||||
|
|
||||||
|
protected static final int KEY_COUNT = 5;
|
||||||
protected static final int POOL_SIZE = 3;
|
protected static final int POOL_SIZE = 3;
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
@ -35,27 +40,5 @@ public abstract class PoolMapTestBase {
|
|||||||
this.poolMap = new PoolMap<>(getPoolType(), POOL_SIZE);
|
this.poolMap = new PoolMap<>(getPoolType(), POOL_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
|
||||||
public void tearDown() throws Exception {
|
|
||||||
this.poolMap.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
protected abstract PoolType getPoolType();
|
protected abstract PoolType getPoolType();
|
||||||
|
|
||||||
protected void runThread(final String randomKey, final String randomValue,
|
|
||||||
final String expectedValue) throws InterruptedException {
|
|
||||||
final AtomicBoolean matchFound = new AtomicBoolean(false);
|
|
||||||
Thread thread = new Thread(new Runnable() {
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
poolMap.put(randomKey, randomValue);
|
|
||||||
String actualValue = poolMap.get(randomKey);
|
|
||||||
matchFound
|
|
||||||
.set(expectedValue == null ? actualValue == null : expectedValue.equals(actualValue));
|
|
||||||
}
|
|
||||||
});
|
|
||||||
thread.start();
|
|
||||||
thread.join();
|
|
||||||
assertTrue(matchFound.get());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -18,12 +18,19 @@
|
|||||||
package org.apache.hadoop.hbase.util;
|
package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertFalse;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
import java.util.concurrent.CompletableFuture;
|
||||||
|
import java.util.concurrent.CompletionException;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
import java.util.concurrent.ThreadLocalRandom;
|
import java.util.concurrent.ThreadLocalRandom;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||||
@ -45,58 +52,103 @@ public class TestRoundRobinPoolMap extends PoolMapTestBase {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testSingleThreadedClient() throws InterruptedException, ExecutionException {
|
public void testGetOrCreate() throws IOException {
|
||||||
Random rand = ThreadLocalRandom.current();
|
String key = "key";
|
||||||
String randomKey = String.valueOf(rand.nextInt());
|
String value = "value";
|
||||||
String randomValue = String.valueOf(rand.nextInt());
|
String result = poolMap.getOrCreate(key, () -> value);
|
||||||
// As long as the pool is not full, we'll get null back.
|
|
||||||
// This forces the user to create new values that can be used to populate
|
assertEquals(value, result);
|
||||||
// the pool.
|
assertEquals(1, poolMap.values().size());
|
||||||
runThread(randomKey, randomValue, null);
|
|
||||||
assertEquals(1, poolMap.size(randomKey));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testMultiThreadedClients() throws InterruptedException, ExecutionException {
|
public void testMultipleKeys() throws IOException {
|
||||||
Random rand = ThreadLocalRandom.current();
|
for (int i = 0; i < KEY_COUNT; i++) {
|
||||||
|
String key = Integer.toString(i);
|
||||||
|
String value = Integer.toString(2 * i);
|
||||||
|
String result = poolMap.getOrCreate(key, () -> value);
|
||||||
|
|
||||||
|
assertEquals(value, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
assertEquals(KEY_COUNT, poolMap.values().size());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMultipleValues() throws IOException {
|
||||||
|
String key = "key";
|
||||||
|
|
||||||
for (int i = 0; i < POOL_SIZE; i++) {
|
for (int i = 0; i < POOL_SIZE; i++) {
|
||||||
String randomKey = String.valueOf(rand.nextInt());
|
String value = Integer.toString(i);
|
||||||
String randomValue = String.valueOf(rand.nextInt());
|
String result = poolMap.getOrCreate(key, () -> value);
|
||||||
// As long as the pool is not full, we'll get null back
|
|
||||||
runThread(randomKey, randomValue, null);
|
assertEquals(value, result);
|
||||||
// As long as we use distinct keys, each pool will have one value
|
|
||||||
assertEquals(1, poolMap.size(randomKey));
|
|
||||||
}
|
}
|
||||||
poolMap.clear();
|
|
||||||
String randomKey = String.valueOf(rand.nextInt());
|
assertEquals(POOL_SIZE, poolMap.values().size());
|
||||||
for (int i = 0; i < POOL_SIZE - 1; i++) {
|
|
||||||
String randomValue = String.valueOf(rand.nextInt());
|
|
||||||
// As long as the pool is not full, we'll get null back
|
|
||||||
runThread(randomKey, randomValue, null);
|
|
||||||
// since we use the same key, the pool size should grow
|
|
||||||
assertEquals(i + 1, poolMap.size(randomKey));
|
|
||||||
}
|
|
||||||
// at the end of the day, there should be as many values as we put
|
|
||||||
assertEquals(POOL_SIZE - 1, poolMap.size(randomKey));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testPoolCap() throws InterruptedException, ExecutionException {
|
public void testRoundRobin() throws IOException {
|
||||||
Random rand = ThreadLocalRandom.current();
|
String key = "key";
|
||||||
String randomKey = String.valueOf(rand.nextInt());
|
|
||||||
List<String> randomValues = new ArrayList<>();
|
for (int i = 0; i < POOL_SIZE; i++) {
|
||||||
for (int i = 0; i < POOL_SIZE * 2; i++) {
|
String value = Integer.toString(i);
|
||||||
String randomValue = String.valueOf(rand.nextInt());
|
poolMap.getOrCreate(key, () -> value);
|
||||||
randomValues.add(randomValue);
|
|
||||||
if (i < POOL_SIZE - 1) {
|
|
||||||
// As long as the pool is not full, we'll get null back
|
|
||||||
runThread(randomKey, randomValue, null);
|
|
||||||
} else {
|
|
||||||
// when the pool becomes full, we expect the value we get back to be
|
|
||||||
// what we put earlier, in round-robin order
|
|
||||||
runThread(randomKey, randomValue, randomValues.get((i - POOL_SIZE + 1) % POOL_SIZE));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
assertEquals(POOL_SIZE, poolMap.size(randomKey));
|
|
||||||
|
assertEquals(POOL_SIZE, poolMap.values().size());
|
||||||
|
|
||||||
|
/* pool is filled, get() should return elements round robin order */
|
||||||
|
for (int i = 0; i < 2 * POOL_SIZE; i++) {
|
||||||
|
String expected = Integer.toString(i % POOL_SIZE);
|
||||||
|
assertEquals(expected, poolMap.getOrCreate(key, () -> {
|
||||||
|
throw new IOException("must not call me");
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
assertEquals(POOL_SIZE, poolMap.values().size());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMultiThreadedRoundRobin() throws ExecutionException, InterruptedException {
|
||||||
|
String key = "key";
|
||||||
|
AtomicInteger id = new AtomicInteger();
|
||||||
|
List<String> results = Collections.synchronizedList(new ArrayList<>());
|
||||||
|
|
||||||
|
Runnable runnable = () -> {
|
||||||
|
try {
|
||||||
|
for (int i = 0; i < POOL_SIZE; i++) {
|
||||||
|
String value = Integer.toString(id.getAndIncrement());
|
||||||
|
String result = poolMap.getOrCreate(key, () -> value);
|
||||||
|
results.add(result);
|
||||||
|
|
||||||
|
Thread.yield();
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new CompletionException(e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
CompletableFuture<Void> future1 = CompletableFuture.runAsync(runnable);
|
||||||
|
CompletableFuture<Void> future2 = CompletableFuture.runAsync(runnable);
|
||||||
|
|
||||||
|
/* test for successful completion */
|
||||||
|
future1.get();
|
||||||
|
future2.get();
|
||||||
|
|
||||||
|
assertEquals(POOL_SIZE, poolMap.values().size());
|
||||||
|
|
||||||
|
/* check every elements occur twice */
|
||||||
|
Collections.sort(results);
|
||||||
|
Iterator<String> iterator = results.iterator();
|
||||||
|
|
||||||
|
for (int i = 0; i < POOL_SIZE; i++) {
|
||||||
|
String next1 = iterator.next();
|
||||||
|
String next2 = iterator.next();
|
||||||
|
assertEquals(next1, next2);
|
||||||
|
}
|
||||||
|
|
||||||
|
assertFalse(iterator.hasNext());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,9 +19,13 @@ package org.apache.hadoop.hbase.util;
|
|||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
import java.util.concurrent.CompletableFuture;
|
||||||
|
import java.util.concurrent.CompletionException;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
import java.util.concurrent.ThreadLocalRandom;
|
import java.util.concurrent.ThreadLocalRandom;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||||
@ -43,42 +47,71 @@ public class TestThreadLocalPoolMap extends PoolMapTestBase {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testSingleThreadedClient() throws InterruptedException, ExecutionException {
|
public void testGetOrCreate() throws IOException {
|
||||||
Random rand = ThreadLocalRandom.current();
|
String key = "key";
|
||||||
String randomKey = String.valueOf(rand.nextInt());
|
String value = "value";
|
||||||
String randomValue = String.valueOf(rand.nextInt());
|
String result = poolMap.getOrCreate(key, () -> value);
|
||||||
// As long as the pool is not full, we should get back what we put
|
|
||||||
runThread(randomKey, randomValue, randomValue);
|
assertEquals(value, result);
|
||||||
assertEquals(1, poolMap.size(randomKey));
|
assertEquals(1, poolMap.values().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testMultiThreadedClients() throws InterruptedException, ExecutionException {
|
public void testMultipleKeys() throws IOException {
|
||||||
Random rand = ThreadLocalRandom.current();
|
for (int i = 0; i < KEY_COUNT; i++) {
|
||||||
// As long as the pool is not full, we should get back what we put
|
String key = Integer.toString(i);
|
||||||
for (int i = 0; i < POOL_SIZE; i++) {
|
String value = Integer.toString(2 * i);
|
||||||
String randomKey = String.valueOf(rand.nextInt());
|
String result = poolMap.getOrCreate(key, () -> value);
|
||||||
String randomValue = String.valueOf(rand.nextInt());
|
|
||||||
runThread(randomKey, randomValue, randomValue);
|
assertEquals(value, result);
|
||||||
assertEquals(1, poolMap.size(randomKey));
|
|
||||||
}
|
|
||||||
String randomKey = String.valueOf(rand.nextInt());
|
|
||||||
for (int i = 0; i < POOL_SIZE; i++) {
|
|
||||||
String randomValue = String.valueOf(rand.nextInt());
|
|
||||||
runThread(randomKey, randomValue, randomValue);
|
|
||||||
assertEquals(i + 1, poolMap.size(randomKey));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
assertEquals(KEY_COUNT, poolMap.values().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testPoolCap() throws InterruptedException, ExecutionException {
|
public void testFull() throws IOException {
|
||||||
Random rand = ThreadLocalRandom.current();
|
String key = "key";
|
||||||
String randomKey = String.valueOf(rand.nextInt());
|
String value = "value";
|
||||||
for (int i = 0; i < POOL_SIZE * 2; i++) {
|
|
||||||
String randomValue = String.valueOf(rand.nextInt());
|
String result = poolMap.getOrCreate(key, () -> value);
|
||||||
// as of HBASE-4150, pool limit is no longer used with ThreadLocalPool
|
assertEquals(value, result);
|
||||||
runThread(randomKey, randomValue, randomValue);
|
|
||||||
}
|
String result2 = poolMap.getOrCreate(key, () -> {
|
||||||
assertEquals(POOL_SIZE * 2, poolMap.size(randomKey));
|
throw new IOException("must not call me");
|
||||||
|
});
|
||||||
|
|
||||||
|
assertEquals(value, result2);
|
||||||
|
assertEquals(1, poolMap.values().size());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testLocality() throws ExecutionException, InterruptedException {
|
||||||
|
String key = "key";
|
||||||
|
AtomicInteger id = new AtomicInteger();
|
||||||
|
|
||||||
|
Runnable runnable = () -> {
|
||||||
|
try {
|
||||||
|
String myId = Integer.toString(id.getAndIncrement());
|
||||||
|
|
||||||
|
for (int i = 0; i < 3; i++) {
|
||||||
|
String result = poolMap.getOrCreate(key, () -> myId);
|
||||||
|
assertEquals(myId, result);
|
||||||
|
|
||||||
|
Thread.yield();
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new CompletionException(e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
CompletableFuture<Void> future1 = CompletableFuture.runAsync(runnable);
|
||||||
|
CompletableFuture<Void> future2 = CompletableFuture.runAsync(runnable);
|
||||||
|
|
||||||
|
/* test for successful completion */
|
||||||
|
future1.get();
|
||||||
|
future2.get();
|
||||||
|
|
||||||
|
assertEquals(2, poolMap.values().size());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user