HBASE-1758 Extract interface out of HTable
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@805392 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
4d6f9bef90
commit
1e8d2fe999
|
@ -15,6 +15,8 @@ Release 0.21.0 - Unreleased
|
|||
(Mathias via jgray)
|
||||
HBASE-1771 PE sequentialWrite is 7x slower because of
|
||||
MemStoreFlusher#checkStoreFileCount
|
||||
HBASE-1758 Extract interface out of HTable (Vaibhav Puranik via Andrew
|
||||
Purtell)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.util.Map;
|
|||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HTableInterface;
|
||||
import org.apache.hadoop.hbase.client.HTablePool;
|
||||
|
||||
import com.sun.jersey.server.impl.container.servlet.ServletAdaptor;
|
||||
|
@ -36,8 +37,8 @@ import com.sun.jersey.server.impl.container.servlet.ServletAdaptor;
|
|||
* Singleton class encapsulating global REST servlet state and functions.
|
||||
*/
|
||||
public class RESTServlet extends ServletAdaptor {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
public static final int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours
|
||||
public static final String VERSION_STRING = "0.0.1";
|
||||
|
||||
|
@ -45,7 +46,7 @@ public class RESTServlet extends ServletAdaptor {
|
|||
|
||||
private final HBaseConfiguration conf;
|
||||
private final HTablePool pool;
|
||||
protected Map<String,Integer> maxAgeMap =
|
||||
protected Map<String,Integer> maxAgeMap =
|
||||
Collections.synchronizedMap(new HashMap<String,Integer>());
|
||||
|
||||
/**
|
||||
|
@ -70,7 +71,7 @@ public class RESTServlet extends ServletAdaptor {
|
|||
|
||||
|
||||
/**
|
||||
* Get a table pool for the given table.
|
||||
* Get a table pool for the given table.
|
||||
* @return the table pool
|
||||
*/
|
||||
protected HTablePool getTablePool() {
|
||||
|
@ -87,7 +88,7 @@ public class RESTServlet extends ServletAdaptor {
|
|||
/**
|
||||
* @param tableName the table name
|
||||
* @return the maximum cache age suitable for use with this table, in
|
||||
* seconds
|
||||
* seconds
|
||||
* @throws IOException
|
||||
*/
|
||||
public int getMaxAge(String tableName) throws IOException {
|
||||
|
@ -95,10 +96,10 @@ public class RESTServlet extends ServletAdaptor {
|
|||
if (i != null) {
|
||||
return i.intValue();
|
||||
}
|
||||
HTable table = pool.getTable(tableName);
|
||||
HTableInterface table = pool.getTable(tableName);
|
||||
try {
|
||||
int maxAge = DEFAULT_MAX_AGE;
|
||||
for (HColumnDescriptor family :
|
||||
for (HColumnDescriptor family :
|
||||
table.getTableDescriptor().getFamilies()) {
|
||||
int ttl = family.getTimeToLive();
|
||||
if (ttl < 0) {
|
||||
|
|
|
@ -1,99 +0,0 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.CacheControl;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
import javax.ws.rs.core.Response.ResponseBuilder;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HTablePool;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableInfoModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableRegionModel;
|
||||
|
||||
public class RegionsResource implements Constants {
|
||||
private static final Log LOG = LogFactory.getLog(RegionsResource.class);
|
||||
|
||||
private String table;
|
||||
private CacheControl cacheControl;
|
||||
|
||||
public RegionsResource(String table) {
|
||||
this.table = table;
|
||||
cacheControl = new CacheControl();
|
||||
cacheControl.setNoCache(true);
|
||||
cacheControl.setNoTransform(false);
|
||||
}
|
||||
|
||||
private Map<HRegionInfo,HServerAddress> getTableRegions()
|
||||
throws IOException {
|
||||
HTablePool pool = RESTServlet.getInstance().getTablePool();
|
||||
HTable table = pool.getTable(this.table);
|
||||
try {
|
||||
return table.getRegionsInfo();
|
||||
} finally {
|
||||
pool.putTable(table);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
|
||||
MIMETYPE_PROTOBUF})
|
||||
public Response get(@Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("GET " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
try {
|
||||
TableInfoModel model = new TableInfoModel(table);
|
||||
Map<HRegionInfo,HServerAddress> regions = getTableRegions();
|
||||
for (Map.Entry<HRegionInfo,HServerAddress> e: regions.entrySet()) {
|
||||
HRegionInfo hri = e.getKey();
|
||||
HServerAddress addr = e.getValue();
|
||||
InetSocketAddress sa = addr.getInetSocketAddress();
|
||||
model.add(
|
||||
new TableRegionModel(table, hri.getRegionId(), hri.getStartKey(),
|
||||
hri.getEndKey(),
|
||||
sa.getHostName() + ":" + Integer.valueOf(sa.getPort())));
|
||||
}
|
||||
ResponseBuilder response = Response.ok(model);
|
||||
response.cacheControl(cacheControl);
|
||||
return response.build();
|
||||
} catch (TableNotFoundException e) {
|
||||
throw new WebApplicationException(Response.Status.NOT_FOUND);
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -41,7 +41,7 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HTableInterface;
|
||||
import org.apache.hadoop.hbase.client.HTablePool;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.stargate.model.CellModel;
|
||||
|
@ -56,7 +56,7 @@ public class RowResource implements Constants {
|
|||
private RowSpec rowspec;
|
||||
private CacheControl cacheControl;
|
||||
|
||||
public RowResource(String table, String rowspec, String versions)
|
||||
public RowResource(String table, String rowspec, String versions)
|
||||
throws IOException {
|
||||
this.table = table;
|
||||
this.rowspec = new RowSpec(rowspec);
|
||||
|
@ -137,10 +137,10 @@ public class RowResource implements Constants {
|
|||
try {
|
||||
pool = RESTServlet.getInstance().getTablePool();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.INTERNAL_SERVER_ERROR);
|
||||
}
|
||||
HTable table = null;
|
||||
HTableInterface table = null;
|
||||
try {
|
||||
table = pool.getTable(this.table);
|
||||
for (RowModel row: model.getRows()) {
|
||||
|
@ -171,16 +171,16 @@ public class RowResource implements Constants {
|
|||
}
|
||||
}
|
||||
|
||||
private Response updateBinary(byte[] message, HttpHeaders headers,
|
||||
private Response updateBinary(byte[] message, HttpHeaders headers,
|
||||
boolean replace) {
|
||||
HTablePool pool;
|
||||
try {
|
||||
pool = RESTServlet.getInstance().getTablePool();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.INTERNAL_SERVER_ERROR);
|
||||
}
|
||||
HTable table = null;
|
||||
HTableInterface table = null;
|
||||
try {
|
||||
byte[] row = rowspec.getRow();
|
||||
byte[][] columns = rowspec.getColumns();
|
||||
|
@ -240,7 +240,7 @@ public class RowResource implements Constants {
|
|||
|
||||
@PUT
|
||||
@Consumes(MIMETYPE_BINARY)
|
||||
public Response putBinary(byte[] message, @Context UriInfo uriInfo,
|
||||
public Response putBinary(byte[] message, @Context UriInfo uriInfo,
|
||||
@Context HttpHeaders headers)
|
||||
{
|
||||
if (LOG.isDebugEnabled()) {
|
||||
|
@ -261,7 +261,7 @@ public class RowResource implements Constants {
|
|||
|
||||
@POST
|
||||
@Consumes(MIMETYPE_BINARY)
|
||||
public Response postBinary(byte[] message, @Context UriInfo uriInfo,
|
||||
public Response postBinary(byte[] message, @Context UriInfo uriInfo,
|
||||
@Context HttpHeaders headers)
|
||||
{
|
||||
if (LOG.isDebugEnabled()) {
|
||||
|
@ -281,17 +281,17 @@ public class RowResource implements Constants {
|
|||
if (rowspec.hasTimestamp()) {
|
||||
delete.deleteColumns(split[0], split[1], rowspec.getTimestamp());
|
||||
} else {
|
||||
delete.deleteColumns(split[0], split[1]);
|
||||
delete.deleteColumns(split[0], split[1]);
|
||||
}
|
||||
}
|
||||
HTablePool pool;
|
||||
try {
|
||||
pool = RESTServlet.getInstance().getTablePool();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.INTERNAL_SERVER_ERROR);
|
||||
}
|
||||
HTable table = null;
|
||||
HTableInterface table = null;
|
||||
try {
|
||||
table = pool.getTable(this.table);
|
||||
table.delete(delete);
|
||||
|
@ -300,7 +300,7 @@ public class RowResource implements Constants {
|
|||
}
|
||||
table.flushCommits();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
} finally {
|
||||
if (table != null) {
|
||||
|
|
|
@ -27,7 +27,7 @@ import java.util.NoSuchElementException;
|
|||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HTableInterface;
|
||||
import org.apache.hadoop.hbase.client.HTablePool;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
|
||||
|
@ -36,15 +36,15 @@ public class RowResultGenerator extends ResultGenerator {
|
|||
|
||||
public RowResultGenerator(String tableName, RowSpec rowspec)
|
||||
throws IllegalArgumentException, IOException {
|
||||
HTablePool pool = RESTServlet.getInstance().getTablePool();
|
||||
HTable table = pool.getTable(tableName);
|
||||
HTablePool pool = RESTServlet.getInstance().getTablePool();
|
||||
HTableInterface table = pool.getTable(tableName);
|
||||
try {
|
||||
Get get = new Get(rowspec.getRow());
|
||||
if (rowspec.hasColumns()) {
|
||||
get.addColumns(rowspec.getColumns());
|
||||
} else {
|
||||
// rowspec does not explicitly specify columns, return them all
|
||||
for (HColumnDescriptor family:
|
||||
for (HColumnDescriptor family:
|
||||
table.getTableDescriptor().getFamilies()) {
|
||||
get.addFamily(family.getName());
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.UnknownScannerException;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HTableInterface;
|
||||
import org.apache.hadoop.hbase.client.HTablePool;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
|
@ -38,7 +38,7 @@ import org.apache.hadoop.util.StringUtils;
|
|||
public class ScannerResultGenerator extends ResultGenerator {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(ScannerResultGenerator.class);
|
||||
|
||||
|
||||
private String id;
|
||||
private Iterator<KeyValue> rowI;
|
||||
private ResultScanner scanner;
|
||||
|
@ -46,8 +46,8 @@ public class ScannerResultGenerator extends ResultGenerator {
|
|||
|
||||
public ScannerResultGenerator(String tableName, RowSpec rowspec)
|
||||
throws IllegalArgumentException, IOException {
|
||||
HTablePool pool = RESTServlet.getInstance().getTablePool();
|
||||
HTable table = pool.getTable(tableName);
|
||||
HTablePool pool = RESTServlet.getInstance().getTablePool();
|
||||
HTableInterface table = pool.getTable(tableName);
|
||||
try {
|
||||
Scan scan;
|
||||
if (rowspec.hasEndRow()) {
|
||||
|
@ -58,12 +58,12 @@ public class ScannerResultGenerator extends ResultGenerator {
|
|||
if (rowspec.hasColumns()) {
|
||||
scan.addColumns(rowspec.getColumns());
|
||||
} else {
|
||||
for (HColumnDescriptor family:
|
||||
for (HColumnDescriptor family:
|
||||
table.getTableDescriptor().getFamilies()) {
|
||||
scan.addFamily(family.getName());
|
||||
}
|
||||
}
|
||||
scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
|
||||
scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
|
||||
scan.setMaxVersions(rowspec.getMaxVersions());
|
||||
scanner = table.getScanner(scan);
|
||||
cached = null;
|
||||
|
|
|
@ -44,7 +44,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.hbase.TableExistsException;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HTableInterface;
|
||||
import org.apache.hadoop.hbase.client.HTablePool;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
|
||||
|
@ -67,7 +67,7 @@ public class SchemaResource implements Constants {
|
|||
private HTableDescriptor getTableSchema() throws IOException,
|
||||
TableNotFoundException {
|
||||
HTablePool pool = RESTServlet.getInstance().getTablePool();
|
||||
HTable table = pool.getTable(this.table);
|
||||
HTableInterface table = pool.getTable(this.table);
|
||||
try {
|
||||
return table.getTableDescriptor();
|
||||
} finally {
|
||||
|
@ -88,7 +88,7 @@ public class SchemaResource implements Constants {
|
|||
model.setName(htd.getNameAsString());
|
||||
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
|
||||
htd.getValues().entrySet()) {
|
||||
model.addAttribute(Bytes.toString(e.getKey().get()),
|
||||
model.addAttribute(Bytes.toString(e.getKey().get()),
|
||||
Bytes.toString(e.getValue().get()));
|
||||
}
|
||||
for (HColumnDescriptor hcd: htd.getFamilies()) {
|
||||
|
@ -96,7 +96,7 @@ public class SchemaResource implements Constants {
|
|||
columnModel.setName(hcd.getNameAsString());
|
||||
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
|
||||
hcd.getValues().entrySet()) {
|
||||
columnModel.addAttribute(Bytes.toString(e.getKey().get()),
|
||||
columnModel.addAttribute(Bytes.toString(e.getKey().get()),
|
||||
Bytes.toString(e.getValue().get()));
|
||||
}
|
||||
model.addColumnFamily(columnModel);
|
||||
|
@ -138,10 +138,10 @@ public class SchemaResource implements Constants {
|
|||
}
|
||||
return Response.created(uriInfo.getAbsolutePath()).build();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private Response update(byte[] tableName, TableSchemaModel model,
|
||||
UriInfo uriInfo, HBaseAdmin admin) {
|
||||
|
@ -157,11 +157,11 @@ public class SchemaResource implements Constants {
|
|||
if (htd.hasFamily(hcd.getName())) {
|
||||
admin.modifyColumn(tableName, hcd.getName(), hcd);
|
||||
} else {
|
||||
admin.addColumn(model.getName(), hcd);
|
||||
admin.addColumn(model.getName(), hcd);
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.INTERNAL_SERVER_ERROR);
|
||||
} finally {
|
||||
admin.enableTable(tableName);
|
||||
|
@ -186,7 +186,7 @@ public class SchemaResource implements Constants {
|
|||
return update(tableName, model, uriInfo, admin);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
}
|
||||
}
|
||||
|
@ -212,7 +212,7 @@ public class SchemaResource implements Constants {
|
|||
}
|
||||
|
||||
@DELETE
|
||||
public Response delete(@Context UriInfo uriInfo) {
|
||||
public Response delete(@Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("DELETE " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ public class SchemaResource implements Constants {
|
|||
} catch (TableNotFoundException e) {
|
||||
throw new WebApplicationException(Response.Status.NOT_FOUND);
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -85,27 +85,21 @@ public class TableResource implements Constants {
|
|||
response.cacheControl(cacheControl);
|
||||
return response.build();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
}
|
||||
}
|
||||
|
||||
@Path("{table}/regions")
|
||||
public RegionsResource getRegionsResource(
|
||||
@PathParam("table") String table) {
|
||||
return new RegionsResource(table);
|
||||
}
|
||||
|
||||
@Path("{table}/scanner")
|
||||
public ScannerResource getScannerResource(
|
||||
@PathParam("table") String table) {
|
||||
return new ScannerResource(table);
|
||||
return new ScannerResource(table);
|
||||
}
|
||||
|
||||
@Path("{table}/schema")
|
||||
public SchemaResource getSchemaResource(
|
||||
@PathParam("table") String table) {
|
||||
return new SchemaResource(table);
|
||||
return new SchemaResource(table);
|
||||
}
|
||||
|
||||
@Path("{table}/{rowspec: .+}")
|
||||
|
@ -116,7 +110,7 @@ public class TableResource implements Constants {
|
|||
try {
|
||||
return new RowResource(table, rowspec, versions);
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.INTERNAL_SERVER_ERROR);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,23 +22,15 @@ package org.apache.hadoop.hbase.stargate;
|
|||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.stargate.client.Client;
|
||||
import org.apache.hadoop.hbase.stargate.client.Cluster;
|
||||
import org.apache.hadoop.hbase.stargate.client.Response;
|
||||
|
@ -46,16 +38,10 @@ import org.apache.hadoop.hbase.stargate.model.TableModel;
|
|||
import org.apache.hadoop.hbase.stargate.model.TableInfoModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableListModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableRegionModel;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
public class TestTableResource extends MiniClusterTestCase {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TestTableResource.class);
|
||||
|
||||
private static String TABLE = "TestTableResource";
|
||||
private static String COLUMN = "test:";
|
||||
private static Map<HRegionInfo,HServerAddress> regionMap;
|
||||
|
||||
private Client client;
|
||||
private JAXBContext context;
|
||||
|
@ -81,39 +67,7 @@ public class TestTableResource extends MiniClusterTestCase {
|
|||
HTableDescriptor htd = new HTableDescriptor(TABLE);
|
||||
htd.addFamily(new HColumnDescriptor(COLUMN));
|
||||
admin.createTable(htd);
|
||||
HTable table = new HTable(conf, TABLE);
|
||||
byte[] k = new byte[3];
|
||||
byte [][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(COLUMN));
|
||||
for (byte b1 = 'a'; b1 < 'z'; b1++) {
|
||||
for (byte b2 = 'a'; b2 < 'z'; b2++) {
|
||||
for (byte b3 = 'a'; b3 < 'z'; b3++) {
|
||||
k[0] = b1;
|
||||
k[1] = b2;
|
||||
k[2] = b3;
|
||||
Put put = new Put(k);
|
||||
put.add(famAndQf[0], famAndQf[1], k);
|
||||
table.put(put);
|
||||
}
|
||||
}
|
||||
}
|
||||
table.flushCommits();
|
||||
// get the initial layout (should just be one region)
|
||||
Map<HRegionInfo,HServerAddress> m = table.getRegionsInfo();
|
||||
assertEquals(m.size(), 1);
|
||||
// tell the master to split the table
|
||||
admin.split(TABLE);
|
||||
// give some time for the split to happen
|
||||
try {
|
||||
Thread.sleep(15 * 1000);
|
||||
} catch (InterruptedException e) {
|
||||
LOG.warn(StringUtils.stringifyException(e));
|
||||
}
|
||||
// check again
|
||||
m = table.getRegionsInfo();
|
||||
// should have two regions now
|
||||
assertEquals(m.size(), 2);
|
||||
regionMap = m;
|
||||
LOG.info("regions: " + regionMap);
|
||||
new HTable(conf, TABLE);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -162,59 +116,4 @@ public class TestTableResource extends MiniClusterTestCase {
|
|||
model.getObjectFromMessage(response.getBody());
|
||||
checkTableList(model);
|
||||
}
|
||||
|
||||
public void checkTableInfo(TableInfoModel model) {
|
||||
assertEquals(model.getName(), TABLE);
|
||||
Iterator<TableRegionModel> regions = model.getRegions().iterator();
|
||||
assertTrue(regions.hasNext());
|
||||
while (regions.hasNext()) {
|
||||
TableRegionModel region = regions.next();
|
||||
boolean found = false;
|
||||
for (Map.Entry<HRegionInfo,HServerAddress> e: regionMap.entrySet()) {
|
||||
HRegionInfo hri = e.getKey();
|
||||
if (hri.getRegionNameAsString().equals(region.getName())) {
|
||||
found = true;
|
||||
byte[] startKey = hri.getStartKey();
|
||||
byte[] endKey = hri.getEndKey();
|
||||
InetSocketAddress sa = e.getValue().getInetSocketAddress();
|
||||
String location = sa.getHostName() + ":" +
|
||||
Integer.valueOf(sa.getPort());
|
||||
assertEquals(hri.getRegionId(), region.getId());
|
||||
assertTrue(Bytes.equals(startKey, region.getStartKey()));
|
||||
assertTrue(Bytes.equals(endKey, region.getEndKey()));
|
||||
assertEquals(location, region.getLocation());
|
||||
break;
|
||||
}
|
||||
}
|
||||
assertTrue(found);
|
||||
}
|
||||
}
|
||||
|
||||
public void testTableInfoText() throws IOException {
|
||||
Response response = client.get("/" + TABLE + "/regions", MIMETYPE_PLAIN);
|
||||
assertEquals(response.getCode(), 200);
|
||||
}
|
||||
|
||||
public void testTableInfoXML() throws IOException, JAXBException {
|
||||
Response response = client.get("/" + TABLE + "/regions", MIMETYPE_XML);
|
||||
assertEquals(response.getCode(), 200);
|
||||
TableInfoModel model = (TableInfoModel)
|
||||
context.createUnmarshaller()
|
||||
.unmarshal(new ByteArrayInputStream(response.getBody()));
|
||||
checkTableInfo(model);
|
||||
}
|
||||
|
||||
public void testTableInfoJSON() throws IOException {
|
||||
Response response = client.get("/" + TABLE + "/regions", MIMETYPE_JSON);
|
||||
assertEquals(response.getCode(), 200);
|
||||
}
|
||||
|
||||
public void testTableInfoPB() throws IOException, JAXBException {
|
||||
Response response =
|
||||
client.get("/" + TABLE + "/regions", MIMETYPE_PROTOBUF);
|
||||
assertEquals(response.getCode(), 200);
|
||||
TableInfoModel model = new TableInfoModel();
|
||||
model.getObjectFromMessage(response.getBody());
|
||||
checkTableInfo(model);
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -19,7 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
|
@ -30,20 +29,21 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
|
||||
/**
|
||||
* A simple pool of HTable instances.<p>
|
||||
*
|
||||
*
|
||||
* Each HTablePool acts as a pool for all tables. To use, instantiate an
|
||||
* HTablePool and use {@link #getTable(String)} to get an HTable from the pool.
|
||||
* Once you are done with it, return it to the pool with {@link #putTable(HTable)}.<p>
|
||||
*
|
||||
*
|
||||
* A pool can be created with a <i>maxSize</i> which defines the most HTable
|
||||
* references that will ever be retained for each table. Otherwise the default
|
||||
* is {@link Integer#MAX_VALUE}.<p>
|
||||
*/
|
||||
public class HTablePool {
|
||||
private final Map<String, LinkedList<HTable>> tables =
|
||||
Collections.synchronizedMap(new HashMap<String, LinkedList<HTable>>());
|
||||
private final Map<String, LinkedList<HTableInterface>> tables =
|
||||
Collections.synchronizedMap(new HashMap<String, LinkedList<HTableInterface>>());
|
||||
private final HBaseConfiguration config;
|
||||
private final int maxSize;
|
||||
private HTableInterfaceFactory tableFactory = new HTableFactory();
|
||||
|
||||
/**
|
||||
* Default Constructor. Default HBaseConfiguration and no limit on pool size.
|
||||
|
@ -62,63 +62,65 @@ public class HTablePool {
|
|||
this.maxSize = maxSize;
|
||||
}
|
||||
|
||||
public HTablePool(HBaseConfiguration config, int maxSize, HTableInterfaceFactory tableFactory) {
|
||||
this.config = config;
|
||||
this.maxSize = maxSize;
|
||||
this.tableFactory = tableFactory;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a reference to the specified table from the pool.<p>
|
||||
*
|
||||
*
|
||||
* Create a new one if one is not available.
|
||||
* @param tableName
|
||||
* @return a reference to the specified table
|
||||
* @throws RuntimeException if there is a problem instantiating the HTable
|
||||
*/
|
||||
public HTable getTable(String tableName) {
|
||||
LinkedList<HTable> queue = tables.get(tableName);
|
||||
public HTableInterface getTable(String tableName) {
|
||||
LinkedList<HTableInterface> queue = tables.get(tableName);
|
||||
if(queue == null) {
|
||||
queue = new LinkedList<HTable>();
|
||||
queue = new LinkedList<HTableInterface>();
|
||||
tables.put(tableName, queue);
|
||||
return newHTable(tableName);
|
||||
return createHTable(tableName);
|
||||
}
|
||||
HTable table;
|
||||
HTableInterface table;
|
||||
synchronized(queue) {
|
||||
table = queue.poll();
|
||||
}
|
||||
if(table == null) {
|
||||
return newHTable(tableName);
|
||||
return createHTable(tableName);
|
||||
}
|
||||
return table;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a reference to the specified table from the pool.<p>
|
||||
*
|
||||
*
|
||||
* Create a new one if one is not available.
|
||||
* @param tableName
|
||||
* @return a reference to the specified table
|
||||
* @throws RuntimeException if there is a problem instantiating the HTable
|
||||
*/
|
||||
public HTable getTable(byte [] tableName) {
|
||||
public HTableInterface getTable(byte [] tableName) {
|
||||
return getTable(Bytes.toString(tableName));
|
||||
}
|
||||
|
||||
/**
|
||||
* Puts the specified HTable back into the pool.<p>
|
||||
*
|
||||
*
|
||||
* If the pool already contains <i>maxSize</i> references to the table,
|
||||
* then nothing happens.
|
||||
* @param table
|
||||
*/
|
||||
public void putTable(HTable table) {
|
||||
LinkedList<HTable> queue = tables.get(Bytes.toString(table.getTableName()));
|
||||
public void putTable(HTableInterface table) {
|
||||
LinkedList<HTableInterface> queue = tables.get(Bytes.toString(table.getTableName()));
|
||||
synchronized(queue) {
|
||||
if(queue.size() >= maxSize) return;
|
||||
queue.add(table);
|
||||
}
|
||||
}
|
||||
|
||||
private HTable newHTable(String tableName) {
|
||||
try {
|
||||
return new HTable(config, Bytes.toBytes(tableName));
|
||||
} catch(IOException ioe) {
|
||||
throw new RuntimeException(ioe);
|
||||
}
|
||||
private HTableInterface createHTable(String tableName) {
|
||||
return this.tableFactory.createHTableInterface(config, Bytes.toBytes(tableName));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,14 +33,14 @@ public class TestHTablePool extends HBaseTestCase {
|
|||
String tableName = "testTable";
|
||||
|
||||
// Request a table from an empty pool
|
||||
HTable table = pool.getTable(tableName);
|
||||
HTableInterface table = pool.getTable(tableName);
|
||||
assertNotNull(table);
|
||||
|
||||
|
||||
// Return the table to the pool
|
||||
pool.putTable(table);
|
||||
|
||||
// Request a table of the same name
|
||||
HTable sameTable = pool.getTable(tableName);
|
||||
HTableInterface sameTable = pool.getTable(tableName);
|
||||
assertSame(table, sameTable);
|
||||
}
|
||||
|
||||
|
@ -49,36 +49,36 @@ public class TestHTablePool extends HBaseTestCase {
|
|||
byte[] tableName = Bytes.toBytes("testTable");
|
||||
|
||||
// Request a table from an empty pool
|
||||
HTable table = pool.getTable(tableName);
|
||||
HTableInterface table = pool.getTable(tableName);
|
||||
assertNotNull(table);
|
||||
|
||||
|
||||
// Return the table to the pool
|
||||
pool.putTable(table);
|
||||
|
||||
// Request a table of the same name
|
||||
HTable sameTable = pool.getTable(tableName);
|
||||
HTableInterface sameTable = pool.getTable(tableName);
|
||||
assertSame(table, sameTable);
|
||||
}
|
||||
|
||||
public void testTableWithMaxSize() {
|
||||
HTablePool pool = new HTablePool((HBaseConfiguration)null, 2);
|
||||
String tableName = "testTable";
|
||||
|
||||
|
||||
// Request tables from an empty pool
|
||||
HTable table1 = pool.getTable(tableName);
|
||||
HTable table2 = pool.getTable(tableName);
|
||||
HTable table3 = pool.getTable(tableName);
|
||||
|
||||
HTableInterface table1 = pool.getTable(tableName);
|
||||
HTableInterface table2 = pool.getTable(tableName);
|
||||
HTableInterface table3 = pool.getTable(tableName);
|
||||
|
||||
// Return the tables to the pool
|
||||
pool.putTable(table1);
|
||||
pool.putTable(table2);
|
||||
// The pool should reject this one since it is already full
|
||||
pool.putTable(table3);
|
||||
|
||||
|
||||
// Request tables of the same name
|
||||
HTable sameTable1 = pool.getTable(tableName);
|
||||
HTable sameTable2 = pool.getTable(tableName);
|
||||
HTable sameTable3 = pool.getTable(tableName);
|
||||
HTableInterface sameTable1 = pool.getTable(tableName);
|
||||
HTableInterface sameTable2 = pool.getTable(tableName);
|
||||
HTableInterface sameTable3 = pool.getTable(tableName);
|
||||
assertSame(table1, sameTable1);
|
||||
assertSame(table2, sameTable2);
|
||||
assertNotSame(table3, sameTable3);
|
||||
|
@ -90,17 +90,17 @@ public class TestHTablePool extends HBaseTestCase {
|
|||
String tableName2 = "testTable2";
|
||||
|
||||
// Request a table from an empty pool
|
||||
HTable table1 = pool.getTable(tableName1);
|
||||
HTable table2 = pool.getTable(tableName2);
|
||||
HTableInterface table1 = pool.getTable(tableName1);
|
||||
HTableInterface table2 = pool.getTable(tableName2);
|
||||
assertNotNull(table2);
|
||||
|
||||
|
||||
// Return the tables to the pool
|
||||
pool.putTable(table1);
|
||||
pool.putTable(table2);
|
||||
|
||||
// Request tables of the same names
|
||||
HTable sameTable1 = pool.getTable(tableName1);
|
||||
HTable sameTable2 = pool.getTable(tableName2);
|
||||
HTableInterface sameTable1 = pool.getTable(tableName1);
|
||||
HTableInterface sameTable2 = pool.getTable(tableName2);
|
||||
assertSame(table1, sameTable1);
|
||||
assertSame(table2, sameTable2);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue