HBASE-2412 [stargate] PerformanceEvaluation; and related fixes
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@931038 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
6baf69a757
commit
4c5eec3968
|
@ -484,6 +484,7 @@ Release 0.21.0 - Unreleased
|
|||
HBASE-2087 The wait on compaction because "Too many store files"
|
||||
holds up all flushing
|
||||
HBASE-2252 Mapping a very big table kills region servers
|
||||
HBASE-2412 [stargate] PerformanceEvaluation
|
||||
|
||||
NEW FEATURES
|
||||
HBASE-1961 HBase EC2 scripts
|
||||
|
|
|
@ -22,6 +22,28 @@
|
|||
<commons-httpclient.version>3.0.1</commons-httpclient.version>
|
||||
</properties>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>test-jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<mainClass>org/apache/hadoop/hbase/stargate/PerformanceEvaluation</mainClass>
|
||||
</manifest>
|
||||
</archive>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.CacheControl;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
import javax.ws.rs.core.Response.ResponseBuilder;
|
||||
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
|
||||
public class ExistsResource implements Constants {
|
||||
|
||||
User user;
|
||||
String tableName;
|
||||
String actualTableName;
|
||||
CacheControl cacheControl;
|
||||
RESTServlet servlet;
|
||||
|
||||
public ExistsResource(User user, String table) throws IOException {
|
||||
if (user != null) {
|
||||
this.user = user;
|
||||
this.actualTableName =
|
||||
!user.isAdmin() ? (user.getName() + "." + table) : table;
|
||||
} else {
|
||||
this.actualTableName = table;
|
||||
}
|
||||
this.tableName = table;
|
||||
servlet = RESTServlet.getInstance();
|
||||
cacheControl = new CacheControl();
|
||||
cacheControl.setNoCache(true);
|
||||
cacheControl.setNoTransform(false);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
|
||||
MIMETYPE_BINARY})
|
||||
public Response get(final @Context UriInfo uriInfo) throws IOException {
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
return Response.status(509).build();
|
||||
}
|
||||
try {
|
||||
HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
|
||||
if (!admin.tableExists(actualTableName)) {
|
||||
throw new WebApplicationException(Response.Status.NOT_FOUND);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(Response.Status.SERVICE_UNAVAILABLE);
|
||||
}
|
||||
ResponseBuilder response = Response.ok();
|
||||
response.cacheControl(cacheControl);
|
||||
return response.build();
|
||||
}
|
||||
|
||||
}
|
|
@ -20,11 +20,12 @@
|
|||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.net.InetAddress;
|
||||
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.commons.cli.CommandLineParser;
|
||||
import org.apache.commons.cli.Options;
|
||||
import org.apache.commons.cli.PosixParser;
|
||||
import org.mortbay.jetty.Connector;
|
||||
import org.mortbay.jetty.Server;
|
||||
import org.mortbay.jetty.servlet.Context;
|
||||
import org.mortbay.jetty.servlet.ServletHolder;
|
||||
|
@ -64,6 +65,18 @@ public class Main implements Constants {
|
|||
sh.setInitParameter("com.sun.jersey.config.property.packages",
|
||||
"jetty");
|
||||
|
||||
// configure the Stargate singleton
|
||||
|
||||
RESTServlet servlet = RESTServlet.getInstance();
|
||||
port = servlet.getConfiguration().getInt("stargate.port", port);
|
||||
if (!servlet.isMultiUser()) {
|
||||
servlet.setMultiUser(cmd.hasOption("m"));
|
||||
}
|
||||
servlet.addConnectorAddress(
|
||||
servlet.getConfiguration().get("stargate.hostname",
|
||||
InetAddress.getLocalHost().getCanonicalHostName()),
|
||||
port);
|
||||
|
||||
// set up Jetty and run the embedded server
|
||||
|
||||
Server server = new Server(port);
|
||||
|
@ -74,14 +87,6 @@ public class Main implements Constants {
|
|||
Context context = new Context(server, "/", Context.SESSIONS);
|
||||
context.addServlet(sh, "/*");
|
||||
|
||||
// configure the Stargate singleton
|
||||
|
||||
RESTServlet servlet = RESTServlet.getInstance();
|
||||
servlet.setMultiUser(cmd.hasOption("m"));
|
||||
for (Connector conn: server.getConnectors()) {
|
||||
servlet.addConnectorAddress(conn.getHost(), conn.getLocalPort());
|
||||
}
|
||||
|
||||
server.start();
|
||||
server.join();
|
||||
}
|
||||
|
|
|
@ -193,6 +193,10 @@ public class RESTServlet extends ServletAdaptor
|
|||
this.statusReporter = new StatusReporter(
|
||||
conf.getInt(STATUS_REPORT_PERIOD_KEY, 1000 * 60), stopping);
|
||||
this.multiuser = conf.getBoolean("stargate.multiuser", false);
|
||||
if (this.multiuser) {
|
||||
LOG.info("multiuser mode enabled");
|
||||
getAuthenticator();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -321,6 +325,7 @@ public class RESTServlet extends ServletAdaptor
|
|||
if (authenticator == null) {
|
||||
authenticator = new HBCAuthenticator(conf);
|
||||
}
|
||||
LOG.info("using authenticator " + authenticator);
|
||||
}
|
||||
return authenticator;
|
||||
}
|
||||
|
@ -339,18 +344,20 @@ public class RESTServlet extends ServletAdaptor
|
|||
* @param want the number of tokens desired
|
||||
* @throws IOException
|
||||
*/
|
||||
public boolean userRequestLimit(final User user, int want)
|
||||
public boolean userRequestLimit(final User user, int want)
|
||||
throws IOException {
|
||||
UserData ud = SoftUserData.get(user);
|
||||
HTableTokenBucket tb = (HTableTokenBucket) ud.get(UserData.TOKENBUCKET);
|
||||
if (tb == null) {
|
||||
tb = new HTableTokenBucket(conf, Bytes.toBytes(user.getToken()));
|
||||
ud.put(UserData.TOKENBUCKET, tb);
|
||||
if (multiuser) {
|
||||
UserData ud = SoftUserData.get(user);
|
||||
HTableTokenBucket tb = (HTableTokenBucket) ud.get(UserData.TOKENBUCKET);
|
||||
if (tb == null) {
|
||||
tb = new HTableTokenBucket(conf, Bytes.toBytes(user.getToken()));
|
||||
ud.put(UserData.TOKENBUCKET, tb);
|
||||
}
|
||||
if (tb.available() < want) {
|
||||
return false;
|
||||
}
|
||||
tb.remove(want);
|
||||
}
|
||||
if (tb.available() < want) {
|
||||
return false;
|
||||
}
|
||||
tb.remove(want);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -35,10 +35,12 @@ import javax.ws.rs.core.Response.ResponseBuilder;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HTableInterface;
|
||||
import org.apache.hadoop.hbase.client.HTablePool;
|
||||
import org.apache.hadoop.hbase.stargate.User;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableInfoModel;
|
||||
|
@ -48,18 +50,20 @@ public class RegionsResource implements Constants {
|
|||
private static final Log LOG = LogFactory.getLog(RegionsResource.class);
|
||||
|
||||
User user;
|
||||
String table;
|
||||
String tableName;
|
||||
String actualTableName;
|
||||
CacheControl cacheControl;
|
||||
RESTServlet servlet;
|
||||
|
||||
public RegionsResource(User user, String table) throws IOException {
|
||||
if (user != null) {
|
||||
if (!user.isAdmin()) {
|
||||
throw new WebApplicationException(Response.Status.FORBIDDEN);
|
||||
}
|
||||
this.user = user;
|
||||
this.actualTableName =
|
||||
!user.isAdmin() ? (user.getName() + "." + table) : table;
|
||||
} else {
|
||||
this.actualTableName = table;
|
||||
}
|
||||
this.table = table;
|
||||
this.tableName = table;
|
||||
cacheControl = new CacheControl();
|
||||
cacheControl.setNoCache(true);
|
||||
cacheControl.setNoTransform(false);
|
||||
|
@ -69,9 +73,9 @@ public class RegionsResource implements Constants {
|
|||
private Map<HRegionInfo,HServerAddress> getTableRegions()
|
||||
throws IOException {
|
||||
HTablePool pool = servlet.getTablePool();
|
||||
HTable table = (HTable) pool.getTable(this.table);
|
||||
HTableInterface table = pool.getTable(actualTableName);
|
||||
try {
|
||||
return table.getRegionsInfo();
|
||||
return ((HTable)table).getRegionsInfo();
|
||||
} finally {
|
||||
pool.putTable(table);
|
||||
}
|
||||
|
@ -79,22 +83,32 @@ public class RegionsResource implements Constants {
|
|||
|
||||
@GET
|
||||
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
|
||||
public Response get(final @Context UriInfo uriInfo) {
|
||||
public Response get(final @Context UriInfo uriInfo) throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("GET " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
return Response.status(509).build();
|
||||
}
|
||||
servlet.getMetrics().incrementRequests(1);
|
||||
try {
|
||||
TableInfoModel model = new TableInfoModel(table);
|
||||
String name = user.isAdmin() ? actualTableName : tableName;
|
||||
TableInfoModel model = new TableInfoModel(name);
|
||||
Map<HRegionInfo,HServerAddress> regions = getTableRegions();
|
||||
for (Map.Entry<HRegionInfo,HServerAddress> e: regions.entrySet()) {
|
||||
HRegionInfo hri = e.getKey();
|
||||
HServerAddress addr = e.getValue();
|
||||
InetSocketAddress sa = addr.getInetSocketAddress();
|
||||
model.add(
|
||||
new TableRegionModel(table, hri.getRegionId(), hri.getStartKey(),
|
||||
hri.getEndKey(),
|
||||
sa.getHostName() + ":" + Integer.valueOf(sa.getPort())));
|
||||
if (user.isAdmin()) {
|
||||
HServerAddress addr = e.getValue();
|
||||
InetSocketAddress sa = addr.getInetSocketAddress();
|
||||
model.add(
|
||||
new TableRegionModel(name, hri.getRegionId(), hri.getStartKey(),
|
||||
hri.getEndKey(),
|
||||
sa.getHostName() + ":" + Integer.valueOf(sa.getPort())));
|
||||
} else {
|
||||
model.add(
|
||||
new TableRegionModel(name, hri.getRegionId(), hri.getStartKey(),
|
||||
hri.getEndKey()));
|
||||
}
|
||||
}
|
||||
ResponseBuilder response = Response.ok(model);
|
||||
response.cacheControl(cacheControl);
|
||||
|
|
|
@ -121,7 +121,7 @@ public class RootResource implements Constants {
|
|||
if (servlet.isMultiUser()) {
|
||||
throw new WebApplicationException(Response.Status.BAD_REQUEST);
|
||||
}
|
||||
return new StorageClusterStatusResource();
|
||||
return new StorageClusterStatusResource(User.DEFAULT_USER);
|
||||
}
|
||||
|
||||
@Path("version")
|
||||
|
@ -135,7 +135,7 @@ public class RootResource implements Constants {
|
|||
if (servlet.isMultiUser()) {
|
||||
User user = auth(token);
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
throw new WebApplicationException(Response.status(509).build());
|
||||
return Response.status(509).build();
|
||||
}
|
||||
try {
|
||||
ResponseBuilder response = Response.ok(getTableListForUser(user));
|
||||
|
@ -154,11 +154,8 @@ public class RootResource implements Constants {
|
|||
final @PathParam("token") String token) throws IOException {
|
||||
if (servlet.isMultiUser()) {
|
||||
User user = auth(token);
|
||||
if (user.isAdmin()) {
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
throw new WebApplicationException(Response.status(509).build());
|
||||
}
|
||||
return new StorageClusterStatusResource();
|
||||
if (user != null && user.isAdmin()) {
|
||||
return new StorageClusterStatusResource(user);
|
||||
}
|
||||
throw new WebApplicationException(Response.Status.FORBIDDEN);
|
||||
}
|
||||
|
@ -185,7 +182,7 @@ public class RootResource implements Constants {
|
|||
if (servlet.isMultiUser()) {
|
||||
throw new WebApplicationException(Response.Status.BAD_REQUEST);
|
||||
}
|
||||
return new TableResource(null, table);
|
||||
return new TableResource(User.DEFAULT_USER, table);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.KeyValue;
|
|||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.HTableInterface;
|
||||
import org.apache.hadoop.hbase.client.HTablePool;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.stargate.User;
|
||||
import org.apache.hadoop.hbase.stargate.model.CellModel;
|
||||
|
@ -79,16 +80,19 @@ public class RowResource implements Constants {
|
|||
}
|
||||
this.servlet = RESTServlet.getInstance();
|
||||
cacheControl = new CacheControl();
|
||||
cacheControl.setMaxAge(servlet.getMaxAge(table));
|
||||
cacheControl.setMaxAge(servlet.getMaxAge(actualTableName));
|
||||
cacheControl.setNoTransform(false);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
|
||||
public Response get(final @Context UriInfo uriInfo) {
|
||||
public Response get(final @Context UriInfo uriInfo) throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("GET " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
return Response.status(509).build();
|
||||
}
|
||||
servlet.getMetrics().incrementRequests(1);
|
||||
try {
|
||||
ResultGenerator generator =
|
||||
|
@ -127,10 +131,14 @@ public class RowResource implements Constants {
|
|||
|
||||
@GET
|
||||
@Produces(MIMETYPE_BINARY)
|
||||
public Response getBinary(final @Context UriInfo uriInfo) {
|
||||
public Response getBinary(final @Context UriInfo uriInfo)
|
||||
throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
|
||||
}
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
return Response.status(509).build();
|
||||
}
|
||||
servlet.getMetrics().incrementRequests(1);
|
||||
// doesn't make sense to use a non specific coordinate as this can only
|
||||
// return a single cell
|
||||
|
@ -166,6 +174,7 @@ public class RowResource implements Constants {
|
|||
throw new WebApplicationException(Response.status(509).build());
|
||||
}
|
||||
table = pool.getTable(actualTableName);
|
||||
((HTable)table).setAutoFlush(false);
|
||||
for (RowModel row: rows) {
|
||||
byte[] key = row.getKey();
|
||||
Put put = new Put(key);
|
||||
|
@ -182,6 +191,7 @@ public class RowResource implements Constants {
|
|||
LOG.debug("PUT " + put.toString());
|
||||
}
|
||||
}
|
||||
((HTable)table).setAutoFlush(true);
|
||||
table.flushCommits();
|
||||
ResponseBuilder response = Response.ok();
|
||||
return response.build();
|
||||
|
@ -236,7 +246,6 @@ public class RowResource implements Constants {
|
|||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("PUT " + put.toString());
|
||||
}
|
||||
table.flushCommits();
|
||||
return Response.ok().build();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
|
@ -251,10 +260,13 @@ public class RowResource implements Constants {
|
|||
@PUT
|
||||
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
|
||||
public Response put(final CellSetModel model,
|
||||
final @Context UriInfo uriInfo) {
|
||||
final @Context UriInfo uriInfo) throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("PUT " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
return Response.status(509).build();
|
||||
}
|
||||
return update(model, true);
|
||||
}
|
||||
|
||||
|
@ -262,38 +274,52 @@ public class RowResource implements Constants {
|
|||
@Consumes(MIMETYPE_BINARY)
|
||||
public Response putBinary(final byte[] message,
|
||||
final @Context UriInfo uriInfo, final @Context HttpHeaders headers)
|
||||
throws IOException
|
||||
{
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
|
||||
}
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
return Response.status(509).build();
|
||||
}
|
||||
return updateBinary(message, headers, true);
|
||||
}
|
||||
|
||||
@POST
|
||||
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
|
||||
public Response post(final CellSetModel model,
|
||||
final @Context UriInfo uriInfo) {
|
||||
final @Context UriInfo uriInfo) throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("POST " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
return Response.status(509).build();
|
||||
}
|
||||
return update(model, false);
|
||||
}
|
||||
|
||||
@POST
|
||||
@Consumes(MIMETYPE_BINARY)
|
||||
public Response postBinary(final byte[] message,
|
||||
final @Context UriInfo uriInfo, final @Context HttpHeaders headers) {
|
||||
final @Context UriInfo uriInfo, final @Context HttpHeaders headers)
|
||||
throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("POST " + uriInfo.getAbsolutePath() + " as "+MIMETYPE_BINARY);
|
||||
}
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
return Response.status(509).build();
|
||||
}
|
||||
return updateBinary(message, headers, false);
|
||||
}
|
||||
|
||||
@DELETE
|
||||
public Response delete(final @Context UriInfo uriInfo) {
|
||||
public Response delete(final @Context UriInfo uriInfo) throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("DELETE " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
return Response.status(509).build();
|
||||
}
|
||||
servlet.getMetrics().incrementRequests(1);
|
||||
Delete delete = null;
|
||||
if (rowspec.hasTimestamp())
|
||||
|
@ -325,7 +351,6 @@ public class RowResource implements Constants {
|
|||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("DELETE " + delete.toString());
|
||||
}
|
||||
table.flushCommits();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.stargate;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -48,8 +49,8 @@ public class ScannerResource implements Constants {
|
|||
|
||||
private static final Log LOG = LogFactory.getLog(ScannerResource.class);
|
||||
|
||||
static final Map<String,ScannerInstanceResource> scanners =
|
||||
new HashMap<String,ScannerInstanceResource>();
|
||||
static final Map<String,ScannerInstanceResource> scanners =
|
||||
Collections.synchronizedMap(new HashMap<String,ScannerInstanceResource>());
|
||||
|
||||
User user;
|
||||
String tableName;
|
||||
|
@ -69,16 +70,17 @@ public class ScannerResource implements Constants {
|
|||
}
|
||||
|
||||
static void delete(final String id) {
|
||||
synchronized (scanners) {
|
||||
ScannerInstanceResource instance = scanners.remove(id);
|
||||
if (instance != null) {
|
||||
instance.generator.close();
|
||||
}
|
||||
ScannerInstanceResource instance = scanners.remove(id);
|
||||
if (instance != null) {
|
||||
instance.generator.close();
|
||||
}
|
||||
}
|
||||
|
||||
Response update(final ScannerModel model, final boolean replace,
|
||||
final UriInfo uriInfo) {
|
||||
final UriInfo uriInfo) throws IOException {
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
return Response.status(509).build();
|
||||
}
|
||||
servlet.getMetrics().incrementRequests(1);
|
||||
byte[] endRow = model.hasEndRow() ? model.getEndRow() : null;
|
||||
RowSpec spec = new RowSpec(model.getStartRow(), endRow,
|
||||
|
@ -91,9 +93,7 @@ public class ScannerResource implements Constants {
|
|||
ScannerInstanceResource instance =
|
||||
new ScannerInstanceResource(user, actualTableName, id, gen,
|
||||
model.getBatch());
|
||||
synchronized (scanners) {
|
||||
scanners.put(id, instance);
|
||||
}
|
||||
scanners.put(id, instance);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("new scanner: " + id);
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ public class ScannerResource implements Constants {
|
|||
@PUT
|
||||
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
|
||||
public Response put(final ScannerModel model,
|
||||
final @Context UriInfo uriInfo) {
|
||||
final @Context UriInfo uriInfo) throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("PUT " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ public class ScannerResource implements Constants {
|
|||
@POST
|
||||
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
|
||||
public Response post(final ScannerModel model,
|
||||
final @Context UriInfo uriInfo) {
|
||||
final @Context UriInfo uriInfo) throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("POST " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
|
@ -131,13 +131,11 @@ public class ScannerResource implements Constants {
|
|||
@Path("{scanner: .+}")
|
||||
public ScannerInstanceResource getScannerInstanceResource(
|
||||
final @PathParam("scanner") String id) {
|
||||
synchronized (scanners) {
|
||||
ScannerInstanceResource instance = scanners.get(id);
|
||||
if (instance == null) {
|
||||
throw new WebApplicationException(Response.Status.NOT_FOUND);
|
||||
}
|
||||
return instance;
|
||||
ScannerInstanceResource instance = scanners.get(id);
|
||||
if (instance == null) {
|
||||
throw new WebApplicationException(Response.Status.NOT_FOUND);
|
||||
}
|
||||
return instance;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -35,10 +35,12 @@ import javax.ws.rs.core.Context;
|
|||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
import javax.ws.rs.core.Response.ResponseBuilder;
|
||||
|
||||
import javax.xml.namespace.QName;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableExistsException;
|
||||
|
@ -46,7 +48,6 @@ import org.apache.hadoop.hbase.TableNotFoundException;
|
|||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTableInterface;
|
||||
import org.apache.hadoop.hbase.client.HTablePool;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.stargate.User;
|
||||
import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
|
||||
|
@ -89,31 +90,17 @@ public class SchemaResource implements Constants {
|
|||
|
||||
@GET
|
||||
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
|
||||
public Response get(final @Context UriInfo uriInfo) {
|
||||
public Response get(final @Context UriInfo uriInfo) throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("GET " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
return Response.status(509).build();
|
||||
}
|
||||
servlet.getMetrics().incrementRequests(1);
|
||||
try {
|
||||
HTableDescriptor htd = getTableSchema();
|
||||
TableSchemaModel model = new TableSchemaModel();
|
||||
model.setName(tableName);
|
||||
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
|
||||
htd.getValues().entrySet()) {
|
||||
model.addAttribute(Bytes.toString(e.getKey().get()),
|
||||
Bytes.toString(e.getValue().get()));
|
||||
}
|
||||
for (HColumnDescriptor hcd: htd.getFamilies()) {
|
||||
ColumnSchemaModel columnModel = new ColumnSchemaModel();
|
||||
columnModel.setName(hcd.getNameAsString());
|
||||
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
|
||||
hcd.getValues().entrySet()) {
|
||||
columnModel.addAttribute(Bytes.toString(e.getKey().get()),
|
||||
Bytes.toString(e.getValue().get()));
|
||||
}
|
||||
model.addColumnFamily(columnModel);
|
||||
}
|
||||
ResponseBuilder response = Response.ok(model);
|
||||
ResponseBuilder response =
|
||||
Response.ok(new TableSchemaModel(getTableSchema()));
|
||||
response.cacheControl(cacheControl);
|
||||
return response.build();
|
||||
} catch (TableNotFoundException e) {
|
||||
|
@ -206,46 +193,52 @@ public class SchemaResource implements Constants {
|
|||
@PUT
|
||||
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
|
||||
public Response put(final TableSchemaModel model,
|
||||
final @Context UriInfo uriInfo) {
|
||||
final @Context UriInfo uriInfo) throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("PUT " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
servlet.getMetrics().incrementRequests(1);
|
||||
// use the name given in the path, but warn if the name on the path and
|
||||
// the name in the schema are different
|
||||
if (model.getName() != tableName) {
|
||||
LOG.warn("table name mismatch: path='" + tableName + "', schema='" +
|
||||
model.getName() + "'");
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
return Response.status(509).build();
|
||||
}
|
||||
servlet.getMetrics().incrementRequests(1);
|
||||
return update(model, true, uriInfo);
|
||||
}
|
||||
|
||||
@POST
|
||||
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
|
||||
public Response post(final TableSchemaModel model,
|
||||
final @Context UriInfo uriInfo) {
|
||||
final @Context UriInfo uriInfo) throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("PUT " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
servlet.getMetrics().incrementRequests(1);
|
||||
// use the name given in the path, but warn if the name on the path and
|
||||
// the name in the schema are different
|
||||
if (model.getName() != tableName) {
|
||||
LOG.warn("table name mismatch: path='" + tableName + "', schema='" +
|
||||
model.getName() + "'");
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
return Response.status(509).build();
|
||||
}
|
||||
servlet.getMetrics().incrementRequests(1);
|
||||
return update(model, false, uriInfo);
|
||||
}
|
||||
|
||||
@DELETE
|
||||
public Response delete(final @Context UriInfo uriInfo) {
|
||||
public Response delete(final @Context UriInfo uriInfo) throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("DELETE " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
return Response.status(509).build();
|
||||
}
|
||||
servlet.getMetrics().incrementRequests(1);
|
||||
try {
|
||||
HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
|
||||
admin.disableTable(actualTableName);
|
||||
boolean success = false;
|
||||
for (int i = 0; i < 10; i++) try {
|
||||
admin.disableTable(actualTableName);
|
||||
success = true;
|
||||
break;
|
||||
} catch (IOException e) {
|
||||
}
|
||||
if (!success) {
|
||||
throw new IOException("could not disable table");
|
||||
}
|
||||
admin.deleteTable(actualTableName);
|
||||
return Response.ok().build();
|
||||
} catch (TableNotFoundException e) {
|
||||
|
|
|
@ -44,22 +44,27 @@ public class StorageClusterStatusResource implements Constants {
|
|||
private static final Log LOG =
|
||||
LogFactory.getLog(StorageClusterStatusResource.class);
|
||||
|
||||
private User user;
|
||||
private CacheControl cacheControl;
|
||||
private RESTServlet servlet;
|
||||
|
||||
public StorageClusterStatusResource() throws IOException {
|
||||
servlet = RESTServlet.getInstance();
|
||||
cacheControl = new CacheControl();
|
||||
cacheControl.setNoCache(true);
|
||||
cacheControl.setNoTransform(false);
|
||||
public StorageClusterStatusResource(User user) throws IOException {
|
||||
this.user = user;
|
||||
this.servlet = RESTServlet.getInstance();
|
||||
this.cacheControl = new CacheControl();
|
||||
this.cacheControl.setNoCache(true);
|
||||
this.cacheControl.setNoTransform(false);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
|
||||
public Response get(final @Context UriInfo uriInfo) {
|
||||
public Response get(final @Context UriInfo uriInfo) throws IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("GET " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
if (!servlet.userRequestLimit(user, 1)) {
|
||||
Response.status(509).build();
|
||||
}
|
||||
servlet.getMetrics().incrementRequests(1);
|
||||
try {
|
||||
HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
|
||||
|
|
|
@ -40,6 +40,11 @@ public class TableResource implements Constants {
|
|||
this.table = table;
|
||||
}
|
||||
|
||||
@Path("exists")
|
||||
public ExistsResource getExistsResource() throws IOException {
|
||||
return new ExistsResource(user, table);
|
||||
}
|
||||
|
||||
@Path("regions")
|
||||
public RegionsResource getRegionsResource() throws IOException {
|
||||
return new RegionsResource(user, table);
|
||||
|
@ -66,4 +71,5 @@ public class TableResource implements Constants {
|
|||
Response.Status.INTERNAL_SERVER_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
public class User implements Constants {
|
||||
|
||||
public static final User DEFAULT_USER = new User("default",
|
||||
"00000000000000000000000000000000", false, true);
|
||||
"00000000000000000000000000000000", true, true);
|
||||
|
||||
private String name;
|
||||
private String token;
|
||||
|
|
|
@ -64,10 +64,13 @@ public class Client {
|
|||
*/
|
||||
public Client(Cluster cluster) {
|
||||
this.cluster = cluster;
|
||||
httpClient = new HttpClient(new MultiThreadedHttpConnectionManager());
|
||||
HttpConnectionManagerParams managerParams =
|
||||
httpClient.getHttpConnectionManager().getParams();
|
||||
MultiThreadedHttpConnectionManager manager =
|
||||
new MultiThreadedHttpConnectionManager();
|
||||
HttpConnectionManagerParams managerParams = manager.getParams();
|
||||
managerParams.setConnectionTimeout(2000); // 2 s
|
||||
managerParams.setDefaultMaxConnectionsPerHost(10);
|
||||
managerParams.setMaxTotalConnections(100);
|
||||
this.httpClient = new HttpClient(manager);
|
||||
HttpClientParams clientParams = httpClient.getParams();
|
||||
clientParams.setVersion(HttpVersion.HTTP_1_1);
|
||||
}
|
||||
|
@ -200,10 +203,13 @@ public class Client {
|
|||
public Response head(Cluster cluster, String path, Header[] headers)
|
||||
throws IOException {
|
||||
HeadMethod method = new HeadMethod();
|
||||
int code = execute(cluster, method, null, path);
|
||||
headers = method.getResponseHeaders();
|
||||
method.releaseConnection();
|
||||
return new Response(code, headers, null);
|
||||
try {
|
||||
int code = execute(cluster, method, null, path);
|
||||
headers = method.getResponseHeaders();
|
||||
return new Response(code, headers, null);
|
||||
} finally {
|
||||
method.releaseConnection();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -276,11 +282,14 @@ public class Client {
|
|||
public Response get(Cluster c, String path, Header[] headers)
|
||||
throws IOException {
|
||||
GetMethod method = new GetMethod();
|
||||
int code = execute(c, method, headers, path);
|
||||
headers = method.getResponseHeaders();
|
||||
byte[] body = method.getResponseBody();
|
||||
method.releaseConnection();
|
||||
return new Response(code, headers, body);
|
||||
try {
|
||||
int code = execute(c, method, headers, path);
|
||||
headers = method.getResponseHeaders();
|
||||
byte[] body = method.getResponseBody();
|
||||
return new Response(code, headers, body);
|
||||
} finally {
|
||||
method.releaseConnection();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -339,12 +348,15 @@ public class Client {
|
|||
public Response put(Cluster cluster, String path, Header[] headers,
|
||||
byte[] content) throws IOException {
|
||||
PutMethod method = new PutMethod();
|
||||
method.setRequestEntity(new ByteArrayRequestEntity(content));
|
||||
int code = execute(cluster, method, headers, path);
|
||||
headers = method.getResponseHeaders();
|
||||
content = method.getResponseBody();
|
||||
method.releaseConnection();
|
||||
return new Response(code, headers, content);
|
||||
try {
|
||||
method.setRequestEntity(new ByteArrayRequestEntity(content));
|
||||
int code = execute(cluster, method, headers, path);
|
||||
headers = method.getResponseHeaders();
|
||||
content = method.getResponseBody();
|
||||
return new Response(code, headers, content);
|
||||
} finally {
|
||||
method.releaseConnection();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -403,12 +415,15 @@ public class Client {
|
|||
public Response post(Cluster cluster, String path, Header[] headers,
|
||||
byte[] content) throws IOException {
|
||||
PostMethod method = new PostMethod();
|
||||
method.setRequestEntity(new ByteArrayRequestEntity(content));
|
||||
int code = execute(cluster, method, headers, path);
|
||||
headers = method.getResponseHeaders();
|
||||
content = method.getResponseBody();
|
||||
method.releaseConnection();
|
||||
return new Response(code, headers, content);
|
||||
try {
|
||||
method.setRequestEntity(new ByteArrayRequestEntity(content));
|
||||
int code = execute(cluster, method, headers, path);
|
||||
headers = method.getResponseHeaders();
|
||||
content = method.getResponseBody();
|
||||
return new Response(code, headers, content);
|
||||
} finally {
|
||||
method.releaseConnection();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -430,9 +445,14 @@ public class Client {
|
|||
*/
|
||||
public Response delete(Cluster cluster, String path) throws IOException {
|
||||
DeleteMethod method = new DeleteMethod();
|
||||
int code = execute(cluster, method, null, path);
|
||||
Header[] headers = method.getResponseHeaders();
|
||||
method.releaseConnection();
|
||||
return new Response(code, headers);
|
||||
try {
|
||||
int code = execute(cluster, method, null, path);
|
||||
Header[] headers = method.getResponseHeaders();
|
||||
byte[] content = method.getResponseBody();
|
||||
return new Response(code, headers, content);
|
||||
} finally {
|
||||
method.releaseConnection();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -46,6 +46,13 @@ public class Cluster {
|
|||
nodes.addAll(nodes);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if no locations have been added, false otherwise
|
||||
*/
|
||||
public boolean isEmpty() {
|
||||
return nodes.isEmpty();
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a node to the cluster
|
||||
* @param node the service location in 'host:port' format
|
||||
|
|
|
@ -0,0 +1,188 @@
|
|||
/*
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.client;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.stargate.Constants;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
public class RemoteAdmin {
|
||||
|
||||
final Client client;
|
||||
final Configuration conf;
|
||||
final String accessToken;
|
||||
final int maxRetries;
|
||||
final long sleepTime;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param client
|
||||
* @param conf
|
||||
*/
|
||||
public RemoteAdmin(Client client, Configuration conf) {
|
||||
this(client, conf, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param client
|
||||
* @param conf
|
||||
* @param accessToken
|
||||
*/
|
||||
public RemoteAdmin(Client client, Configuration conf, String accessToken) {
|
||||
this.client = client;
|
||||
this.conf = conf;
|
||||
this.accessToken = accessToken;
|
||||
this.maxRetries = conf.getInt("stargate.client.max.retries", 10);
|
||||
this.sleepTime = conf.getLong("stargate.client.sleep", 1000);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param tableName name of table to check
|
||||
* @return true if all regions of the table are available
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public boolean isTableAvailable(String tableName) throws IOException {
|
||||
return isTableAvailable(Bytes.toBytes(tableName));
|
||||
}
|
||||
|
||||
/**
|
||||
* @param tableName name of table to check
|
||||
* @return true if all regions of the table are available
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public boolean isTableAvailable(byte[] tableName) throws IOException {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append('/');
|
||||
if (accessToken != null) {
|
||||
sb.append(accessToken);
|
||||
sb.append('/');
|
||||
}
|
||||
sb.append(Bytes.toStringBinary(tableName));
|
||||
sb.append('/');
|
||||
sb.append("exists");
|
||||
int code = 0;
|
||||
for (int i = 0; i < maxRetries; i++) {
|
||||
Response response = client.get(sb.toString());
|
||||
code = response.getCode();
|
||||
switch (code) {
|
||||
case 200:
|
||||
return true;
|
||||
case 404:
|
||||
return false;
|
||||
case 509:
|
||||
try {
|
||||
Thread.sleep(sleepTime);
|
||||
} catch (InterruptedException e) { }
|
||||
break;
|
||||
default:
|
||||
throw new IOException("exists request returned " + code);
|
||||
}
|
||||
}
|
||||
throw new IOException("exists request timed out");
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new table.
|
||||
* @param desc table descriptor for table
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void createTable(HTableDescriptor desc)
|
||||
throws IOException {
|
||||
TableSchemaModel model = new TableSchemaModel(desc);
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append('/');
|
||||
if (accessToken != null) {
|
||||
sb.append(accessToken);
|
||||
sb.append('/');
|
||||
}
|
||||
sb.append(Bytes.toStringBinary(desc.getName()));
|
||||
sb.append('/');
|
||||
sb.append("schema");
|
||||
int code = 0;
|
||||
for (int i = 0; i < maxRetries; i++) {
|
||||
Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
|
||||
model.createProtobufOutput());
|
||||
code = response.getCode();
|
||||
switch (code) {
|
||||
case 201:
|
||||
return;
|
||||
case 509:
|
||||
try {
|
||||
Thread.sleep(sleepTime);
|
||||
} catch (InterruptedException e) { }
|
||||
break;
|
||||
default:
|
||||
throw new IOException("create request returned " + code);
|
||||
}
|
||||
}
|
||||
throw new IOException("create request timed out");
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a table.
|
||||
* @param tableName name of table to delete
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void deleteTable(final String tableName) throws IOException {
|
||||
deleteTable(Bytes.toBytes(tableName));
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a table.
|
||||
* @param tableName name of table to delete
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void deleteTable(final byte [] tableName) throws IOException {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append('/');
|
||||
if (accessToken != null) {
|
||||
sb.append(accessToken);
|
||||
sb.append('/');
|
||||
}
|
||||
sb.append(Bytes.toStringBinary(tableName));
|
||||
sb.append('/');
|
||||
sb.append("schema");
|
||||
int code = 0;
|
||||
for (int i = 0; i < maxRetries; i++) {
|
||||
Response response = client.delete(sb.toString());
|
||||
code = response.getCode();
|
||||
switch (code) {
|
||||
case 200:
|
||||
return;
|
||||
case 509:
|
||||
try {
|
||||
Thread.sleep(sleepTime);
|
||||
} catch (InterruptedException e) { }
|
||||
break;
|
||||
default:
|
||||
throw new IOException("delete request returned " + code);
|
||||
}
|
||||
}
|
||||
throw new IOException("delete request timed out");
|
||||
}
|
||||
|
||||
}
|
|
@ -29,16 +29,13 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import javax.xml.namespace.QName;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
@ -54,7 +51,6 @@ import org.apache.hadoop.hbase.io.TimeRange;
|
|||
import org.apache.hadoop.hbase.stargate.Constants;
|
||||
import org.apache.hadoop.hbase.stargate.model.CellModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.CellSetModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.RowModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.ScannerModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
|
||||
|
@ -67,11 +63,13 @@ public class RemoteHTable implements HTableInterface {
|
|||
|
||||
private static final Log LOG = LogFactory.getLog(RemoteHTable.class);
|
||||
|
||||
Client client;
|
||||
Configuration conf;
|
||||
byte[] name;
|
||||
String accessToken;
|
||||
|
||||
final Client client;
|
||||
final Configuration conf;
|
||||
final byte[] name;
|
||||
final String accessToken;
|
||||
final int maxRetries;
|
||||
final long sleepTime;
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
protected String buildRowSpec(final byte[] row, final Map familyMap,
|
||||
final long startTime, final long endTime, final int maxVersions) {
|
||||
|
@ -210,19 +208,18 @@ public class RemoteHTable implements HTableInterface {
|
|||
this.conf = conf;
|
||||
this.name = name;
|
||||
this.accessToken = accessToken;
|
||||
this.maxRetries = conf.getInt("stargate.client.max.retries", 10);
|
||||
this.sleepTime = conf.getLong("stargate.client.sleep", 1000);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getTableName() {
|
||||
return name.clone();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Configuration getConfiguration() {
|
||||
return conf;
|
||||
}
|
||||
|
||||
@Override
|
||||
public HTableDescriptor getTableDescriptor() throws IOException {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append('/');
|
||||
|
@ -233,32 +230,30 @@ public class RemoteHTable implements HTableInterface {
|
|||
sb.append(Bytes.toStringBinary(name));
|
||||
sb.append('/');
|
||||
sb.append("schema");
|
||||
Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF);
|
||||
if (response.getCode() != 200) {
|
||||
throw new IOException("schema request returned " + response.getCode());
|
||||
}
|
||||
TableSchemaModel schema = new TableSchemaModel();
|
||||
schema.getObjectFromMessage(response.getBody());
|
||||
HTableDescriptor htd = new HTableDescriptor(schema.getName());
|
||||
for (Map.Entry<QName, Object> e: schema.getAny().entrySet()) {
|
||||
htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
|
||||
}
|
||||
for (ColumnSchemaModel column: schema.getColumns()) {
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(column.getName());
|
||||
for (Map.Entry<QName, Object> e: column.getAny().entrySet()) {
|
||||
hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
|
||||
for (int i = 0; i < maxRetries; i++) {
|
||||
Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF);
|
||||
int code = response.getCode();
|
||||
switch (code) {
|
||||
case 200:
|
||||
TableSchemaModel schema = new TableSchemaModel();
|
||||
schema.getObjectFromMessage(response.getBody());
|
||||
return schema.getTableDescriptor();
|
||||
case 509:
|
||||
try {
|
||||
Thread.sleep(sleepTime);
|
||||
} catch (InterruptedException e) { }
|
||||
break;
|
||||
default:
|
||||
throw new IOException("schema request returned " + code);
|
||||
}
|
||||
htd.addFamily(hcd);
|
||||
}
|
||||
return htd;
|
||||
throw new IOException("schema request timed out");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
client.shutdown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result get(Get get) throws IOException {
|
||||
TimeRange range = get.getTimeRange();
|
||||
String spec = buildRowSpec(get.getRow(), get.getFamilyMap(),
|
||||
|
@ -266,34 +261,41 @@ public class RemoteHTable implements HTableInterface {
|
|||
if (get.getFilter() != null) {
|
||||
LOG.warn("filters not supported on gets");
|
||||
}
|
||||
Response response = client.get(spec, Constants.MIMETYPE_PROTOBUF);
|
||||
int code = response.getCode();
|
||||
if (code == 404) {
|
||||
return new Result();
|
||||
}
|
||||
if (code != 200) {
|
||||
throw new IOException("get request returned " + code);
|
||||
}
|
||||
CellSetModel model = new CellSetModel();
|
||||
model.getObjectFromMessage(response.getBody());
|
||||
Result[] results = buildResultFromModel(model);
|
||||
if (results.length > 0) {
|
||||
if (results.length > 1) {
|
||||
LOG.warn("too many results for get (" + results.length + ")");
|
||||
for (int i = 0; i < maxRetries; i++) {
|
||||
Response response = client.get(spec, Constants.MIMETYPE_PROTOBUF);
|
||||
int code = response.getCode();
|
||||
switch (code) {
|
||||
case 200:
|
||||
CellSetModel model = new CellSetModel();
|
||||
model.getObjectFromMessage(response.getBody());
|
||||
Result[] results = buildResultFromModel(model);
|
||||
if (results.length > 0) {
|
||||
if (results.length > 1) {
|
||||
LOG.warn("too many results for get (" + results.length + ")");
|
||||
}
|
||||
return results[0];
|
||||
}
|
||||
// fall through
|
||||
case 404:
|
||||
return new Result();
|
||||
case 509:
|
||||
try {
|
||||
Thread.sleep(sleepTime);
|
||||
} catch (InterruptedException e) { }
|
||||
break;
|
||||
default:
|
||||
throw new IOException("get request returned " + code);
|
||||
}
|
||||
return results[0];
|
||||
}
|
||||
return new Result();
|
||||
throw new IOException("get request timed out");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean exists(Get get) throws IOException {
|
||||
LOG.warn("exists() is really get(), just use get()");
|
||||
Result result = get(get);
|
||||
return (result != null && !(result.isEmpty()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(Put put) throws IOException {
|
||||
CellSetModel model = buildModelFromPut(put);
|
||||
StringBuilder sb = new StringBuilder();
|
||||
|
@ -305,14 +307,25 @@ public class RemoteHTable implements HTableInterface {
|
|||
sb.append(Bytes.toStringBinary(name));
|
||||
sb.append('/');
|
||||
sb.append(Bytes.toStringBinary(put.getRow()));
|
||||
Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
|
||||
model.createProtobufOutput());
|
||||
if (response.getCode() != 200) {
|
||||
throw new IOException("put failed with " + response.getCode());
|
||||
for (int i = 0; i < maxRetries; i++) {
|
||||
Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
|
||||
model.createProtobufOutput());
|
||||
int code = response.getCode();
|
||||
switch (code) {
|
||||
case 200:
|
||||
return;
|
||||
case 509:
|
||||
try {
|
||||
Thread.sleep(sleepTime);
|
||||
} catch (InterruptedException e) { }
|
||||
break;
|
||||
default:
|
||||
throw new IOException("put request failed with " + code);
|
||||
}
|
||||
}
|
||||
throw new IOException("put request timed out");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(List<Put> puts) throws IOException {
|
||||
// this is a trick: Stargate accepts multiple rows in a cell set and
|
||||
// ignores the row specification in the URI
|
||||
|
@ -351,31 +364,52 @@ public class RemoteHTable implements HTableInterface {
|
|||
}
|
||||
sb.append(Bytes.toStringBinary(name));
|
||||
sb.append("/$multiput"); // can be any nonexistent row
|
||||
Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
|
||||
model.createProtobufOutput());
|
||||
if (response.getCode() != 200) {
|
||||
throw new IOException("multiput failed with " + response.getCode());
|
||||
for (int i = 0; i < maxRetries; i++) {
|
||||
Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
|
||||
model.createProtobufOutput());
|
||||
int code = response.getCode();
|
||||
switch (code) {
|
||||
case 200:
|
||||
return;
|
||||
case 509:
|
||||
try {
|
||||
Thread.sleep(sleepTime);
|
||||
} catch (InterruptedException e) { }
|
||||
break;
|
||||
default:
|
||||
throw new IOException("multiput request failed with " + code);
|
||||
}
|
||||
}
|
||||
throw new IOException("multiput request timed out");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete(Delete delete) throws IOException {
|
||||
String spec = buildRowSpec(delete.getRow(), delete.getFamilyMap(),
|
||||
delete.getTimeStamp(), delete.getTimeStamp(), 1);
|
||||
Response response = client.delete(spec);
|
||||
if (response.getCode() != 200) {
|
||||
throw new IOException("delete() returned " + response.getCode());
|
||||
for (int i = 0; i < maxRetries; i++) {
|
||||
Response response = client.delete(spec);
|
||||
int code = response.getCode();
|
||||
switch (code) {
|
||||
case 200:
|
||||
return;
|
||||
case 509:
|
||||
try {
|
||||
Thread.sleep(sleepTime);
|
||||
} catch (InterruptedException e) { }
|
||||
break;
|
||||
default:
|
||||
throw new IOException("delete request failed with " + code);
|
||||
}
|
||||
}
|
||||
throw new IOException("delete request timed out");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete(List<Delete> deletes) throws IOException {
|
||||
for (Delete delete: deletes) {
|
||||
delete(delete);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flushCommits() throws IOException {
|
||||
// no-op
|
||||
}
|
||||
|
@ -385,6 +419,12 @@ public class RemoteHTable implements HTableInterface {
|
|||
String uri;
|
||||
|
||||
public Scanner(Scan scan) throws IOException {
|
||||
ScannerModel model;
|
||||
try {
|
||||
model = ScannerModel.fromScan(scan);
|
||||
} catch (Exception e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
StringBuffer sb = new StringBuffer();
|
||||
sb.append('/');
|
||||
if (accessToken != null) {
|
||||
|
@ -394,18 +434,24 @@ public class RemoteHTable implements HTableInterface {
|
|||
sb.append(Bytes.toStringBinary(name));
|
||||
sb.append('/');
|
||||
sb.append("scanner");
|
||||
try {
|
||||
ScannerModel model = ScannerModel.fromScan(scan);
|
||||
for (int i = 0; i < maxRetries; i++) {
|
||||
Response response = client.post(sb.toString(),
|
||||
Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
|
||||
if (response.getCode() != 201) {
|
||||
throw new IOException("scan request failed with " +
|
||||
response.getCode());
|
||||
int code = response.getCode();
|
||||
switch (code) {
|
||||
case 201:
|
||||
uri = response.getLocation();
|
||||
return;
|
||||
case 509:
|
||||
try {
|
||||
Thread.sleep(sleepTime);
|
||||
} catch (InterruptedException e) { }
|
||||
break;
|
||||
default:
|
||||
throw new IOException("scan request failed with " + code);
|
||||
}
|
||||
uri = response.getLocation();
|
||||
} catch (Exception e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
throw new IOException("scan request timed out");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -413,18 +459,28 @@ public class RemoteHTable implements HTableInterface {
|
|||
StringBuilder sb = new StringBuilder(uri);
|
||||
sb.append("?n=");
|
||||
sb.append(nbRows);
|
||||
Response response = client.get(sb.toString(),
|
||||
Constants.MIMETYPE_PROTOBUF);
|
||||
if (response.getCode() == 206) {
|
||||
return null;
|
||||
for (int i = 0; i < maxRetries; i++) {
|
||||
Response response = client.get(sb.toString(),
|
||||
Constants.MIMETYPE_PROTOBUF);
|
||||
int code = response.getCode();
|
||||
switch (code) {
|
||||
case 200:
|
||||
CellSetModel model = new CellSetModel();
|
||||
model.getObjectFromMessage(response.getBody());
|
||||
return buildResultFromModel(model);
|
||||
case 204:
|
||||
case 206:
|
||||
return null;
|
||||
case 509:
|
||||
try {
|
||||
Thread.sleep(sleepTime);
|
||||
} catch (InterruptedException e) { }
|
||||
break;
|
||||
default:
|
||||
throw new IOException("scanner.next request failed with " + code);
|
||||
}
|
||||
}
|
||||
if (response.getCode() != 200) {
|
||||
LOG.error("scanner.next failed with " + response.getCode());
|
||||
return null;
|
||||
}
|
||||
CellSetModel model = new CellSetModel();
|
||||
model.getObjectFromMessage(response.getBody());
|
||||
return buildResultFromModel(model);
|
||||
throw new IOException("scanner.next request timed out");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -487,20 +543,17 @@ public class RemoteHTable implements HTableInterface {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
public ResultScanner getScanner(Scan scan) throws IOException {
|
||||
return new Scanner(scan);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResultScanner getScanner(byte[] family) throws IOException {
|
||||
Scan scan = new Scan();
|
||||
scan.addFamily(family);
|
||||
return new Scanner(scan);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResultScanner getScanner(byte[] family, byte[] qualifier)
|
||||
throws IOException {
|
||||
Scan scan = new Scan();
|
||||
|
@ -508,39 +561,32 @@ public class RemoteHTable implements HTableInterface {
|
|||
return new Scanner(scan);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAutoFlush() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result getRowOrBefore(byte[] row, byte[] family) throws IOException {
|
||||
throw new IOException("getRowOrBefore not supported");
|
||||
}
|
||||
|
||||
@Override
|
||||
public RowLock lockRow(byte[] row) throws IOException {
|
||||
throw new IOException("lockRow not implemented");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unlockRow(RowLock rl) throws IOException {
|
||||
throw new IOException("unlockRow not implemented");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
|
||||
byte[] value, Put put) throws IOException {
|
||||
throw new IOException("checkAndPut not supported");
|
||||
}
|
||||
|
||||
@Override
|
||||
public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
|
||||
long amount) throws IOException {
|
||||
throw new IOException("incrementColumnValue not supported");
|
||||
}
|
||||
|
||||
@Override
|
||||
public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
|
||||
long amount, boolean writeToWAL) throws IOException {
|
||||
throw new IOException("incrementColumnValue not supported");
|
||||
|
|
|
@ -59,6 +59,18 @@ public class TableRegionModel implements Serializable {
|
|||
*/
|
||||
public TableRegionModel() {}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param table the table name
|
||||
* @param id the encoded id of the region
|
||||
* @param startKey the start key of the region
|
||||
* @param endKey the end key of the region
|
||||
*/
|
||||
public TableRegionModel(String table, long id, byte[] startKey,
|
||||
byte[] endKey) {
|
||||
this(table, id, startKey, endKey, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param table the table name
|
||||
|
@ -173,8 +185,10 @@ public class TableRegionModel implements Serializable {
|
|||
sb.append(Bytes.toString(startKey));
|
||||
sb.append("'\n endKey='");
|
||||
sb.append(Bytes.toString(endKey));
|
||||
sb.append("'\n location='");
|
||||
sb.append(location);
|
||||
if (location != null) {
|
||||
sb.append("'\n location='");
|
||||
sb.append(location);
|
||||
}
|
||||
sb.append("'\n]\n");
|
||||
return sb.toString();
|
||||
}
|
||||
|
|
|
@ -38,9 +38,11 @@ import javax.xml.namespace.QName;
|
|||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
|
||||
import org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema;
|
||||
import org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* A representation of HBase table descriptors.
|
||||
|
@ -77,6 +79,29 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler {
|
|||
*/
|
||||
public TableSchemaModel() {}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param htd the table descriptor
|
||||
*/
|
||||
public TableSchemaModel(HTableDescriptor htd) {
|
||||
setName(htd.getNameAsString());
|
||||
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
|
||||
htd.getValues().entrySet()) {
|
||||
addAttribute(Bytes.toString(e.getKey().get()),
|
||||
Bytes.toString(e.getValue().get()));
|
||||
}
|
||||
for (HColumnDescriptor hcd: htd.getFamilies()) {
|
||||
ColumnSchemaModel columnModel = new ColumnSchemaModel();
|
||||
columnModel.setName(hcd.getNameAsString());
|
||||
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
|
||||
hcd.getValues().entrySet()) {
|
||||
columnModel.addAttribute(Bytes.toString(e.getKey().get()),
|
||||
Bytes.toString(e.getValue().get()));
|
||||
}
|
||||
addColumnFamily(columnModel);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add an attribute to the table descriptor
|
||||
* @param name attribute name
|
||||
|
@ -308,4 +333,23 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler {
|
|||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a table descriptor
|
||||
*/
|
||||
public HTableDescriptor getTableDescriptor() {
|
||||
HTableDescriptor htd = new HTableDescriptor(getName());
|
||||
for (Map.Entry<QName, Object> e: getAny().entrySet()) {
|
||||
htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
|
||||
}
|
||||
for (ColumnSchemaModel column: getColumns()) {
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(column.getName());
|
||||
for (Map.Entry<QName, Object> e: column.getAny().entrySet()) {
|
||||
hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
|
||||
}
|
||||
htd.addFamily(hcd);
|
||||
}
|
||||
return htd;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ public class HTableTokenBucket implements Constants {
|
|||
HTable table;
|
||||
byte[] row;
|
||||
int tokens;
|
||||
double rate = 10.0; // default, 10 ops added per second
|
||||
double rate = 20.0; // default, 20 ops added per second
|
||||
int size = 100; // burst
|
||||
long lastUpdated = System.currentTimeMillis();
|
||||
long configUpdateInterval;
|
||||
|
|
|
@ -20,46 +20,32 @@
|
|||
|
||||
package org.apache.hadoop.hbase.stargate.util;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Generic storage for per user information.
|
||||
*/
|
||||
public class UserData {
|
||||
|
||||
public static final int TOKENBUCKET = 0;
|
||||
public static final int TOKENBUCKET = 1;
|
||||
|
||||
ArrayList<Object> data = new ArrayList<Object>();
|
||||
Map<Integer,Object> data = new HashMap<Integer,Object>(1);
|
||||
|
||||
public synchronized boolean has(final int sel) {
|
||||
try {
|
||||
return data.get(sel) != null;
|
||||
} catch (IndexOutOfBoundsException e) {
|
||||
return false;
|
||||
}
|
||||
return data.get(sel) != null;
|
||||
}
|
||||
|
||||
public synchronized Object get(final int sel) {
|
||||
try {
|
||||
return data.get(sel);
|
||||
} catch (IndexOutOfBoundsException e) {
|
||||
return null;
|
||||
}
|
||||
return data.get(sel);
|
||||
}
|
||||
|
||||
public synchronized Object put(final int sel, final Object o) {
|
||||
Object old = null;
|
||||
try {
|
||||
old = data.get(sel);
|
||||
} catch (IndexOutOfBoundsException e) {
|
||||
// do nothing
|
||||
}
|
||||
data.set(sel, o);
|
||||
return old;
|
||||
return data.put(sel, o);
|
||||
}
|
||||
|
||||
public synchronized Object remove(int sel) {
|
||||
return put(sel, null);
|
||||
return remove(sel);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.client;
|
||||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.stargate.MiniClusterTestBase;
|
||||
import org.apache.hadoop.hbase.stargate.client.Client;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
public class TestRemoteAdmin extends MiniClusterTestBase {
|
||||
|
||||
static final String TABLE_1 = "TestRemoteAdmin_Table_1";
|
||||
static final String TABLE_2 = "TestRemoteAdmin_Table_2";
|
||||
static final byte[] COLUMN_1 = Bytes.toBytes("a");
|
||||
|
||||
static final HTableDescriptor DESC_1;
|
||||
static {
|
||||
DESC_1 = new HTableDescriptor(TABLE_1);
|
||||
DESC_1.addFamily(new HColumnDescriptor(COLUMN_1));
|
||||
}
|
||||
static final HTableDescriptor DESC_2;
|
||||
static {
|
||||
DESC_2 = new HTableDescriptor(TABLE_2);
|
||||
DESC_2.addFamily(new HColumnDescriptor(COLUMN_1));
|
||||
}
|
||||
|
||||
Client client;
|
||||
HBaseAdmin localAdmin;
|
||||
RemoteAdmin remoteAdmin;
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
localAdmin = new HBaseAdmin(conf);
|
||||
remoteAdmin = new RemoteAdmin(new Client(
|
||||
new Cluster().add("localhost", testServletPort)),
|
||||
conf);
|
||||
if (localAdmin.tableExists(TABLE_1)) {
|
||||
localAdmin.disableTable(TABLE_1);
|
||||
localAdmin.deleteTable(TABLE_1);
|
||||
}
|
||||
if (!localAdmin.tableExists(TABLE_2)) {
|
||||
localAdmin.createTable(DESC_2);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
public void testCreateTable() throws Exception {
|
||||
assertFalse(remoteAdmin.isTableAvailable(TABLE_1));
|
||||
remoteAdmin.createTable(DESC_1);
|
||||
assertTrue(remoteAdmin.isTableAvailable(TABLE_1));
|
||||
}
|
||||
|
||||
public void testDeleteTable() throws Exception {
|
||||
assertTrue(remoteAdmin.isTableAvailable(TABLE_2));
|
||||
remoteAdmin.deleteTable(TABLE_2);
|
||||
assertFalse(remoteAdmin.isTableAvailable(TABLE_2));
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in New Issue