HBASE-1655 Usability improvements to HTablePool

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@795201 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jonathan Gray 2009-07-17 19:25:13 +00:00
parent cf4d1a8a7d
commit 6f242ea63f
10 changed files with 225 additions and 143 deletions

View File

@ -494,6 +494,7 @@ Release 0.20.0 - Unreleased
HBASE-1665 expose more load information to the client side
HBASE-1609 We wait on leases to expire before regionserver goes down.
Rather, just let client fail
HBASE-1655 Usability improvements to HTablePool (Ken Weiner via jgray)
OPTIMIZATIONS
HBASE-1412 Change values for delete column and column family in KeyValue

View File

@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HTablePool;
import org.apache.hadoop.hbase.util.Bytes;
import com.sun.jersey.server.impl.container.servlet.ServletAdaptor;
@ -45,6 +44,7 @@ public class RESTServlet extends ServletAdaptor {
private static RESTServlet instance;
private final HBaseConfiguration conf;
private final HTablePool pool;
protected Map<String,Integer> maxAgeMap =
Collections.synchronizedMap(new HashMap<String,Integer>());
@ -65,16 +65,17 @@ public class RESTServlet extends ServletAdaptor {
*/
public RESTServlet() throws IOException {
this.conf = new HBaseConfiguration();
this.pool = new HTablePool(conf, 10);
}
/**
* Get or create a table pool for the given table.
* Get a table pool for the given table.
* @param name the table name
* @return the table pool
*/
protected HTablePool getTablePool(String name) {
return HTablePool.getPool(conf, Bytes.toBytes(name));
protected HTablePool getTablePool() {
return pool;
}
/**
@ -95,11 +96,10 @@ public class RESTServlet extends ServletAdaptor {
if (i != null) {
return i.intValue();
}
HTablePool pool = this.getTablePool(tableName);
HTable table = pool.get();
if (table != null) {
HTable table = pool.getTable(tableName);
try {
int maxAge = DEFAULT_MAX_AGE;
for (HColumnDescriptor family:
for (HColumnDescriptor family :
table.getTableDescriptor().getFamilies()) {
int ttl = family.getTimeToLive();
if (ttl < 0) {
@ -111,8 +111,9 @@ public class RESTServlet extends ServletAdaptor {
}
maxAgeMap.put(tableName, maxAge);
return maxAge;
} finally {
pool.putTable(table);
}
return DEFAULT_MAX_AGE;
}
/**

View File

@ -23,14 +23,16 @@ package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Map;
import javax.ws.rs.GET;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.CacheControl;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.UriInfo;
import javax.ws.rs.core.Response.ResponseBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo;
@ -56,12 +58,12 @@ public class RegionsResource implements Constants {
private Map<HRegionInfo,HServerAddress> getTableRegions()
throws IOException {
HTablePool pool = RESTServlet.getInstance().getTablePool(this.table);
HTable table = pool.get();
HTablePool pool = RESTServlet.getInstance().getTablePool();
HTable table = pool.getTable(this.table);
try {
return table.getRegionsInfo();
} finally {
pool.put(table);
pool.putTable(table);
}
}

View File

@ -23,23 +23,22 @@ package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import java.util.List;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Consumes;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.CacheControl;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.UriInfo;
import javax.ws.rs.core.Response.ResponseBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HTable;
@ -136,14 +135,14 @@ public class RowResource implements Constants {
private Response update(CellSetModel model, boolean replace) {
HTablePool pool;
try {
pool = RESTServlet.getInstance().getTablePool(this.table);
pool = RESTServlet.getInstance().getTablePool();
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
}
HTable table = null;
try {
table = pool.get();
table = pool.getTable(this.table);
for (RowModel row: model.getRows()) {
Put put = new Put(row.getKey());
for (CellModel cell: row.getCells()) {
@ -167,7 +166,7 @@ public class RowResource implements Constants {
Response.Status.SERVICE_UNAVAILABLE);
} finally {
if (table != null) {
pool.put(table);
pool.putTable(table);
}
}
}
@ -176,7 +175,7 @@ public class RowResource implements Constants {
boolean replace) {
HTablePool pool;
try {
pool = RESTServlet.getInstance().getTablePool(this.table);
pool = RESTServlet.getInstance().getTablePool();
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
@ -212,7 +211,7 @@ public class RowResource implements Constants {
} else {
put.add(parts[0], parts[1], message);
}
table = pool.get();
table = pool.getTable(this.table);
table.put(put);
if (LOG.isDebugEnabled()) {
LOG.debug("PUT " + put.toString());
@ -224,7 +223,7 @@ public class RowResource implements Constants {
Response.Status.SERVICE_UNAVAILABLE);
} finally {
if (table != null) {
pool.put(table);
pool.putTable(table);
}
}
}
@ -287,14 +286,14 @@ public class RowResource implements Constants {
}
HTablePool pool;
try {
pool = RESTServlet.getInstance().getTablePool(this.table);
pool = RESTServlet.getInstance().getTablePool();
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
}
HTable table = null;
try {
table = pool.get();
table = pool.getTable(this.table);
table.delete(delete);
if (LOG.isDebugEnabled()) {
LOG.debug("DELETE " + delete.toString());
@ -305,7 +304,7 @@ public class RowResource implements Constants {
Response.Status.SERVICE_UNAVAILABLE);
} finally {
if (table != null) {
pool.put(table);
pool.putTable(table);
}
}
return Response.ok().build();

View File

@ -36,8 +36,8 @@ public class RowResultGenerator extends ResultGenerator {
public RowResultGenerator(String tableName, RowSpec rowspec)
throws IllegalArgumentException, IOException {
HTablePool pool = RESTServlet.getInstance().getTablePool(tableName);
HTable table = pool.get();
HTablePool pool = RESTServlet.getInstance().getTablePool();
HTable table = pool.getTable(tableName);
try {
Get get = new Get(rowspec.getRow());
if (rowspec.hasColumns()) {
@ -56,7 +56,7 @@ public class RowResultGenerator extends ResultGenerator {
valuesI = result.list().iterator();
}
} finally {
pool.put(table);
pool.putTable(table);
}
}

View File

@ -46,8 +46,8 @@ public class ScannerResultGenerator extends ResultGenerator {
public ScannerResultGenerator(String tableName, RowSpec rowspec)
throws IllegalArgumentException, IOException {
HTablePool pool = RESTServlet.getInstance().getTablePool(tableName);
HTable table = pool.get();
HTablePool pool = RESTServlet.getInstance().getTablePool();
HTable table = pool.getTable(tableName);
try {
Scan scan;
if (rowspec.hasEndRow()) {
@ -70,7 +70,7 @@ public class ScannerResultGenerator extends ResultGenerator {
id = Long.toString(System.currentTimeMillis()) +
Integer.toHexString(scanner.hashCode());
} finally {
pool.put(table);
pool.putTable(table);
}
}

View File

@ -23,18 +23,18 @@ package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import java.util.Map;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Consumes;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.CacheControl;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.UriInfo;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.xml.namespace.QName;
import org.apache.commons.logging.Log;
@ -66,12 +66,12 @@ public class SchemaResource implements Constants {
private HTableDescriptor getTableSchema() throws IOException,
TableNotFoundException {
HTablePool pool = RESTServlet.getInstance().getTablePool(this.table);
HTable table = pool.get();
HTablePool pool = RESTServlet.getInstance().getTablePool();
HTable table = pool.getTable(this.table);
try {
return table.getTableDescriptor();
} finally {
pool.put(table);
pool.putTable(table);
}
}

View File

@ -38,11 +38,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.UnknownScannerException;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.RowFilterInterface;
import org.apache.hadoop.hbase.filter.StopRowFilter;
import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
@ -108,7 +103,7 @@ public class HTable {
}
/**
* Creates an object to access a HBase table
* Creates an object to access a HBase table.
*
* @param conf configuration object
* @param tableName name of the table
@ -116,15 +111,19 @@ public class HTable {
*/
public HTable(HBaseConfiguration conf, final byte [] tableName)
throws IOException {
this.connection = HConnectionManager.getConnection(conf);
this.tableName = tableName;
if (conf == null) {
this.scannerTimeout = 0;
this.connection = null;
return;
}
this.connection = HConnectionManager.getConnection(conf);
this.scannerTimeout =
conf.getInt("hbase.regionserver.lease.period", 60 * 1000);
this.configuration = conf;
this.connection.locateRegion(tableName, HConstants.EMPTY_START_ROW);
this.writeBuffer = new ArrayList<Put>();
this.writeBufferSize =
this.configuration.getLong("hbase.client.write.buffer", 2097152);
this.writeBufferSize = conf.getLong("hbase.client.write.buffer", 2097152);
this.autoFlush = true;
this.currentWriteBufferSize = 0;
this.scannerCaching = conf.getInt("hbase.client.scanner.caching", 1);

View File

@ -17,136 +17,108 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import java.io.IOException;
import java.util.ArrayDeque;
/* using a stack instead of a FIFO might have some small positive performance
impact wrt. cache */
import java.util.Deque;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.util.Bytes;
/**
* A simple pool of HTable instances.
* <p>
* The default pool size is 10.
* A simple pool of HTable instances.<p>
*
* Each HTablePool acts as a pool for all tables. To use, instantiate an
* HTablePool and use {@link #getTable(String)} to get an HTable from the pool.
* Once you are done with it, return it to the pool with {@link #putTable(HTable)}.<p>
*
* A pool can be created with a <i>maxSize</i> which defines the most HTable
* references that will ever be retained for each table. Otherwise the default
* is {@link Integer#MAX_VALUE}.<p>
*/
public class HTablePool {
private static final Map<byte[], HTablePool> poolMap =
new TreeMap<byte[], HTablePool>(Bytes.BYTES_COMPARATOR);
private final Map<String, LinkedList<HTable>> tables =
Collections.synchronizedMap(new HashMap<String, LinkedList<HTable>>());
private final HBaseConfiguration config;
private final byte[] tableName;
private final Deque<HTable> pool;
private final int maxSize;
/**
* Get a shared table pool.
* @param config
* @param tableName the table name
* @return the table pool
* Default Constructor. Default HBaseConfiguration and no limit on pool size.
*/
public static HTablePool getPool(HBaseConfiguration config,
byte[] tableName) {
return getPool(config, tableName, 10);
public HTablePool() {
this(new HBaseConfiguration(), Integer.MAX_VALUE);
}
/**
* Get a shared table pool.
* @param tableName the table name
* @return the table pool
* Constructor to set maximum versions and use the specified configuration.
* @param config configuration
* @param maxSize maximum number of references to keep for each table
*/
public static HTablePool getPool(byte[] tableName) {
return getPool(new HBaseConfiguration(), tableName, 10);
}
/**
* Get a shared table pool.
* <p>
* NOTE: <i>maxSize</i> is advisory. If the pool does not yet exist, a new
* shared pool will be allocated with <i>maxSize</i> as the size limit.
* However, if the shared pool already exists, and was created with a
* different (or default) value for <i>maxSize</i>, it will not be changed.
* @param config HBase configuration
* @param tableName the table name
* @param maxSize the maximum size of the pool
* @return the table pool
*/
public static HTablePool getPool(HBaseConfiguration config, byte[] tableName,
int maxSize) {
synchronized (poolMap) {
HTablePool pool = poolMap.get(tableName);
if (pool == null) {
pool = new HTablePool(config, tableName, maxSize);
poolMap.put(tableName, pool);
}
return pool;
}
}
/**
* Constructor
* @param config HBase configuration
* @param tableName the table name
* @param maxSize maximum pool size
*/
public HTablePool(HBaseConfiguration config, byte[] tableName,
int maxSize) {
public HTablePool(HBaseConfiguration config, int maxSize) {
this.config = config;
this.tableName = tableName;
this.maxSize = maxSize;
this.pool = new ArrayDeque<HTable>(this.maxSize);
}
/**
* Constructor
* @param tableName the table name
* @param maxSize maximum pool size
* Get a reference to the specified table from the pool.<p>
*
* Create a new one if one is not available.
* @param tableName
* @return a reference to the specified table
* @throws RuntimeException if there is a problem instantiating the HTable
*/
public HTablePool(byte[] tableName, int maxSize) {
this(new HBaseConfiguration(), tableName, maxSize);
}
/**
* Constructor
* @param tableName the table name
*/
public HTablePool(byte[] tableName) {
this(new HBaseConfiguration(), tableName, 10);
}
/**
* Get a HTable instance, possibly from the pool, if one is available.
* @return HTable a HTable instance
* @throws IOException
*/
public HTable get() throws IOException {
synchronized (pool) {
// peek then pop inside a synchronized block avoids the overhead of a
// NoSuchElementException
HTable table = pool.peek();
if (table != null) {
return pool.pop();
}
public HTable getTable(String tableName) {
LinkedList<HTable> queue = tables.get(tableName);
if(queue == null) {
queue = new LinkedList<HTable>();
tables.put(tableName, queue);
return newHTable(tableName);
}
return new HTable(config, tableName);
HTable table;
synchronized(queue) {
table = queue.poll();
}
if(table == null) {
return newHTable(tableName);
}
return table;
}
/**
* Return a HTable instance to the pool.
* @param table a HTable instance
* Get a reference to the specified table from the pool.<p>
*
* Create a new one if one is not available.
* @param tableName
* @return a reference to the specified table
* @throws RuntimeException if there is a problem instantiating the HTable
*/
public void put(HTable table) {
synchronized (pool) {
if (pool.size() < maxSize) {
pool.push(table);
}
public HTable getTable(byte [] tableName) {
return getTable(Bytes.toString(tableName));
}
/**
* Puts the specified HTable back into the pool.<p>
*
* If the pool already contains <i>maxSize</i> references to the table,
* then nothing happens.
* @param table
*/
public void putTable(HTable table) {
LinkedList<HTable> queue = tables.get(Bytes.toString(table.getTableName()));
synchronized(queue) {
if(queue.size() >= maxSize) return;
queue.add(table);
}
}
private HTable newHTable(String tableName) {
try {
return new HTable(config, Bytes.toBytes(tableName));
} catch(IOException ioe) {
throw new RuntimeException(ioe);
}
}
}

View File

@ -0,0 +1,108 @@
/**
* Copyright 2009 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Tests HTablePool
*/
public class TestHTablePool extends HBaseTestCase {
public void testTableWithStringName() {
HTablePool pool = new HTablePool((HBaseConfiguration)null, Integer.MAX_VALUE);
String tableName = "testTable";
// Request a table from an empty pool
HTable table = pool.getTable(tableName);
assertNotNull(table);
// Return the table to the pool
pool.putTable(table);
// Request a table of the same name
HTable sameTable = pool.getTable(tableName);
assertSame(table, sameTable);
}
public void testTableWithByteArrayName() {
HTablePool pool = new HTablePool((HBaseConfiguration)null, Integer.MAX_VALUE);
byte[] tableName = Bytes.toBytes("testTable");
// Request a table from an empty pool
HTable table = pool.getTable(tableName);
assertNotNull(table);
// Return the table to the pool
pool.putTable(table);
// Request a table of the same name
HTable sameTable = pool.getTable(tableName);
assertSame(table, sameTable);
}
public void testTableWithMaxSize() {
HTablePool pool = new HTablePool((HBaseConfiguration)null, 2);
String tableName = "testTable";
// Request tables from an empty pool
HTable table1 = pool.getTable(tableName);
HTable table2 = pool.getTable(tableName);
HTable table3 = pool.getTable(tableName);
// Return the tables to the pool
pool.putTable(table1);
pool.putTable(table2);
// The pool should reject this one since it is already full
pool.putTable(table3);
// Request tables of the same name
HTable sameTable1 = pool.getTable(tableName);
HTable sameTable2 = pool.getTable(tableName);
HTable sameTable3 = pool.getTable(tableName);
assertSame(table1, sameTable1);
assertSame(table2, sameTable2);
assertNotSame(table3, sameTable3);
}
public void testTablesWithDifferentNames() {
HTablePool pool = new HTablePool((HBaseConfiguration)null, Integer.MAX_VALUE);
String tableName1 = "testTable1";
String tableName2 = "testTable2";
// Request a table from an empty pool
HTable table1 = pool.getTable(tableName1);
HTable table2 = pool.getTable(tableName2);
assertNotNull(table2);
// Return the tables to the pool
pool.putTable(table1);
pool.putTable(table2);
// Request tables of the same names
HTable sameTable1 = pool.getTable(tableName1);
HTable sameTable2 = pool.getTable(tableName2);
assertSame(table1, sameTable1);
assertSame(table2, sameTable2);
}
}