CRLF cleanup

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@928031 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Kyle Purtell 2010-03-26 19:33:27 +00:00
parent 57a3c0395e
commit 4dced17a0e
27 changed files with 4273 additions and 4273 deletions

View File

@ -1,88 +1,88 @@
/*
* Copyright 2009 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.PosixParser;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.ServletHolder;
import com.sun.jersey.spi.container.servlet.ServletContainer;
/**
* Main class for launching Stargate as a servlet hosted by an embedded Jetty
* servlet container.
* <p>
* The following options are supported:
* <ul>
* <li>-p: service port</li>
* </ul>
*/
public class Main implements Constants {
public static void main(String[] args) throws Exception {
// process command line
Options options = new Options();
options.addOption("p", "port", true, "service port");
options.addOption("m", "multiuser", false, "enable multiuser mode");
CommandLineParser parser = new PosixParser();
CommandLine cmd = parser.parse(options, args);
int port = 8080;
if (cmd.hasOption("p")) {
port = Integer.valueOf(cmd.getOptionValue("p"));
}
// set up the Jersey servlet container for Jetty
ServletHolder sh = new ServletHolder(ServletContainer.class);
sh.setInitParameter(
"com.sun.jersey.config.property.resourceConfigClass",
ResourceConfig.class.getCanonicalName());
sh.setInitParameter("com.sun.jersey.config.property.packages",
"jetty");
// set up Jetty and run the embedded server
Server server = new Server(port);
server.setSendServerVersion(false);
server.setSendDateHeader(false);
server.setStopAtShutdown(true);
// set up context
Context context = new Context(server, "/", Context.SESSIONS);
context.addServlet(sh, "/*");
// configure the Stargate singleton
RESTServlet servlet = RESTServlet.getInstance();
servlet.setMultiUser(cmd.hasOption("m"));
for (Connector conn: server.getConnectors()) {
servlet.addConnectorAddress(conn.getHost(), conn.getLocalPort());
}
server.start();
server.join();
}
}
/*
* Copyright 2009 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.PosixParser;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.ServletHolder;
import com.sun.jersey.spi.container.servlet.ServletContainer;
/**
* Main class for launching Stargate as a servlet hosted by an embedded Jetty
* servlet container.
* <p>
* The following options are supported:
* <ul>
* <li>-p: service port</li>
* </ul>
*/
public class Main implements Constants {
public static void main(String[] args) throws Exception {
// process command line
Options options = new Options();
options.addOption("p", "port", true, "service port");
options.addOption("m", "multiuser", false, "enable multiuser mode");
CommandLineParser parser = new PosixParser();
CommandLine cmd = parser.parse(options, args);
int port = 8080;
if (cmd.hasOption("p")) {
port = Integer.valueOf(cmd.getOptionValue("p"));
}
// set up the Jersey servlet container for Jetty
ServletHolder sh = new ServletHolder(ServletContainer.class);
sh.setInitParameter(
"com.sun.jersey.config.property.resourceConfigClass",
ResourceConfig.class.getCanonicalName());
sh.setInitParameter("com.sun.jersey.config.property.packages",
"jetty");
// set up Jetty and run the embedded server
Server server = new Server(port);
server.setSendServerVersion(false);
server.setSendDateHeader(false);
server.setStopAtShutdown(true);
// set up context
Context context = new Context(server, "/", Context.SESSIONS);
context.addServlet(sh, "/*");
// configure the Stargate singleton
RESTServlet servlet = RESTServlet.getInstance();
servlet.setMultiUser(cmd.hasOption("m"));
for (Connector conn: server.getConnectors()) {
servlet.addConnectorAddress(conn.getHost(), conn.getLocalPort());
}
server.start();
server.join();
}
}

View File

@ -1,110 +1,110 @@
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Map;
import javax.ws.rs.GET;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.CacheControl;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import javax.ws.rs.core.Response.ResponseBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HTablePool;
import org.apache.hadoop.hbase.stargate.User;
import org.apache.hadoop.hbase.stargate.model.TableInfoModel;
import org.apache.hadoop.hbase.stargate.model.TableRegionModel;
public class RegionsResource implements Constants {
private static final Log LOG = LogFactory.getLog(RegionsResource.class);
User user;
String table;
CacheControl cacheControl;
RESTServlet servlet;
public RegionsResource(User user, String table) throws IOException {
if (user != null) {
if (!user.isAdmin()) {
throw new WebApplicationException(Response.Status.FORBIDDEN);
}
this.user = user;
}
this.table = table;
cacheControl = new CacheControl();
cacheControl.setNoCache(true);
cacheControl.setNoTransform(false);
servlet = RESTServlet.getInstance();
}
private Map<HRegionInfo,HServerAddress> getTableRegions()
throws IOException {
HTablePool pool = servlet.getTablePool();
HTable table = (HTable) pool.getTable(this.table);
try {
return table.getRegionsInfo();
} finally {
pool.putTable(table);
}
}
@GET
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response get(final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
try {
TableInfoModel model = new TableInfoModel(table);
Map<HRegionInfo,HServerAddress> regions = getTableRegions();
for (Map.Entry<HRegionInfo,HServerAddress> e: regions.entrySet()) {
HRegionInfo hri = e.getKey();
HServerAddress addr = e.getValue();
InetSocketAddress sa = addr.getInetSocketAddress();
model.add(
new TableRegionModel(table, hri.getRegionId(), hri.getStartKey(),
hri.getEndKey(),
sa.getHostName() + ":" + Integer.valueOf(sa.getPort())));
}
ResponseBuilder response = Response.ok(model);
response.cacheControl(cacheControl);
return response.build();
} catch (TableNotFoundException e) {
throw new WebApplicationException(Response.Status.NOT_FOUND);
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
}
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Map;
import javax.ws.rs.GET;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.CacheControl;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import javax.ws.rs.core.Response.ResponseBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HTablePool;
import org.apache.hadoop.hbase.stargate.User;
import org.apache.hadoop.hbase.stargate.model.TableInfoModel;
import org.apache.hadoop.hbase.stargate.model.TableRegionModel;
public class RegionsResource implements Constants {
private static final Log LOG = LogFactory.getLog(RegionsResource.class);
User user;
String table;
CacheControl cacheControl;
RESTServlet servlet;
public RegionsResource(User user, String table) throws IOException {
if (user != null) {
if (!user.isAdmin()) {
throw new WebApplicationException(Response.Status.FORBIDDEN);
}
this.user = user;
}
this.table = table;
cacheControl = new CacheControl();
cacheControl.setNoCache(true);
cacheControl.setNoTransform(false);
servlet = RESTServlet.getInstance();
}
private Map<HRegionInfo,HServerAddress> getTableRegions()
throws IOException {
HTablePool pool = servlet.getTablePool();
HTable table = (HTable) pool.getTable(this.table);
try {
return table.getRegionsInfo();
} finally {
pool.putTable(table);
}
}
@GET
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response get(final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
try {
TableInfoModel model = new TableInfoModel(table);
Map<HRegionInfo,HServerAddress> regions = getTableRegions();
for (Map.Entry<HRegionInfo,HServerAddress> e: regions.entrySet()) {
HRegionInfo hri = e.getKey();
HServerAddress addr = e.getValue();
InetSocketAddress sa = addr.getInetSocketAddress();
model.add(
new TableRegionModel(table, hri.getRegionId(), hri.getStartKey(),
hri.getEndKey(),
sa.getHostName() + ":" + Integer.valueOf(sa.getPort())));
}
ResponseBuilder response = Response.ok(model);
response.cacheControl(cacheControl);
return response.build();
} catch (TableNotFoundException e) {
throw new WebApplicationException(Response.Status.NOT_FOUND);
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
}

View File

@ -1,29 +1,29 @@
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import com.sun.jersey.api.core.PackagesResourceConfig;
public class ResourceConfig extends PackagesResourceConfig {
public ResourceConfig() {
super("org.apache.hadoop.hbase.stargate");
}
}
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import com.sun.jersey.api.core.PackagesResourceConfig;
public class ResourceConfig extends PackagesResourceConfig {
public ResourceConfig() {
super("org.apache.hadoop.hbase.stargate");
}
}

View File

@ -1,51 +1,51 @@
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.stargate.model.ScannerModel;
import org.json.JSONObject;
public abstract class ResultGenerator implements Iterator<KeyValue> {
public static ResultGenerator fromRowSpec(final String table,
final RowSpec rowspec, final Filter filter) throws IOException {
if (rowspec.isSingleRow()) {
return new RowResultGenerator(table, rowspec, filter);
} else {
return new ScannerResultGenerator(table, rowspec, filter);
}
}
public static Filter buildFilter(final String filter) throws Exception {
return ScannerModel.buildFilter(new JSONObject(filter));
}
public abstract void putBack(KeyValue kv);
public abstract void close();
}
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.stargate.model.ScannerModel;
import org.json.JSONObject;
public abstract class ResultGenerator implements Iterator<KeyValue> {
public static ResultGenerator fromRowSpec(final String table,
final RowSpec rowspec, final Filter filter) throws IOException {
if (rowspec.isSingleRow()) {
return new RowResultGenerator(table, rowspec, filter);
} else {
return new ScannerResultGenerator(table, rowspec, filter);
}
}
public static Filter buildFilter(final String filter) throws Exception {
return ScannerModel.buildFilter(new JSONObject(filter));
}
public abstract void putBack(KeyValue kv);
public abstract void close();
}

View File

@ -1,191 +1,191 @@
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.CacheControl;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import javax.ws.rs.core.Response.ResponseBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.stargate.User;
import org.apache.hadoop.hbase.stargate.model.TableListModel;
import org.apache.hadoop.hbase.stargate.model.TableModel;
@Path("/")
public class RootResource implements Constants {
private static final Log LOG = LogFactory.getLog(RootResource.class);
RESTServlet servlet;
CacheControl cacheControl;
public RootResource() throws IOException {
servlet = RESTServlet.getInstance();
cacheControl = new CacheControl();
cacheControl.setNoCache(true);
cacheControl.setNoTransform(false);
}
private final User auth(final String token) throws IOException {
User user = servlet.getAuthenticator().getUserForToken(token);
if (user == null || user.isDisabled()) {
throw new WebApplicationException(Response.Status.FORBIDDEN);
}
return user;
}
private final TableListModel getTableList() throws IOException {
TableListModel tableList = new TableListModel();
HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
HTableDescriptor[] list = admin.listTables();
for (HTableDescriptor htd: list) {
tableList.add(new TableModel(htd.getNameAsString()));
}
return tableList;
}
private final TableListModel getTableListForUser(final User user)
throws IOException {
TableListModel tableList;
if (user.isAdmin()) {
tableList = getTableList();
} else {
tableList = new TableListModel();
HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
HTableDescriptor[] list = admin.listTables();
String prefix = user.getName() + ".";
for (HTableDescriptor htd: list) {
String name = htd.getNameAsString();
if (!name.startsWith(prefix)) {
continue;
}
tableList.add(new TableModel(name.substring(prefix.length())));
}
}
return tableList;
}
@GET
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response get(final @Context UriInfo uriInfo) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
if (servlet.isMultiUser()) {
throw new WebApplicationException(Response.Status.BAD_REQUEST);
}
try {
ResponseBuilder response = Response.ok(getTableList());
response.cacheControl(cacheControl);
return response.build();
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
@Path("status/cluster")
public StorageClusterStatusResource getClusterStatusResource()
throws IOException {
if (servlet.isMultiUser()) {
throw new WebApplicationException(Response.Status.BAD_REQUEST);
}
return new StorageClusterStatusResource();
}
@Path("version")
public VersionResource getVersionResource() throws IOException {
return new VersionResource();
}
@Path("{token: [0-9a-fA-F]{32} }") // 128 bit md5 sums
public Response getTableRootResource(
final @PathParam("token") String token) throws IOException {
if (servlet.isMultiUser()) {
User user = auth(token);
if (!servlet.userRequestLimit(user, 1)) {
throw new WebApplicationException(Response.status(509).build());
}
try {
ResponseBuilder response = Response.ok(getTableListForUser(user));
response.cacheControl(cacheControl);
return response.build();
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
throw new WebApplicationException(Response.Status.BAD_REQUEST);
}
@Path("{token: [0-9a-fA-F]{32} }/status/cluster") // 128 bit md5 sums
public StorageClusterStatusResource getClusterStatusResourceAuthorized(
final @PathParam("token") String token) throws IOException {
if (servlet.isMultiUser()) {
User user = auth(token);
if (user.isAdmin()) {
if (!servlet.userRequestLimit(user, 1)) {
throw new WebApplicationException(Response.status(509).build());
}
return new StorageClusterStatusResource();
}
throw new WebApplicationException(Response.Status.FORBIDDEN);
}
throw new WebApplicationException(Response.Status.BAD_REQUEST);
}
@Path("{token: [0-9a-fA-F]{32} }/{table}")
public TableResource getTableResource(
final @PathParam("token") String token,
final @PathParam("table") String table) throws IOException {
if (servlet.isMultiUser()) {
User user = auth(token);
if (!servlet.userRequestLimit(user, 1)) {
throw new WebApplicationException(Response.status(509).build());
}
return new TableResource(user, table);
}
throw new WebApplicationException(Response.Status.BAD_REQUEST);
}
@Path("{table}")
public TableResource getTableResource(
final @PathParam("table") String table) throws IOException {
if (servlet.isMultiUser()) {
throw new WebApplicationException(Response.Status.BAD_REQUEST);
}
return new TableResource(null, table);
}
}
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.CacheControl;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import javax.ws.rs.core.Response.ResponseBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.stargate.User;
import org.apache.hadoop.hbase.stargate.model.TableListModel;
import org.apache.hadoop.hbase.stargate.model.TableModel;
@Path("/")
public class RootResource implements Constants {
private static final Log LOG = LogFactory.getLog(RootResource.class);
RESTServlet servlet;
CacheControl cacheControl;
public RootResource() throws IOException {
servlet = RESTServlet.getInstance();
cacheControl = new CacheControl();
cacheControl.setNoCache(true);
cacheControl.setNoTransform(false);
}
private final User auth(final String token) throws IOException {
User user = servlet.getAuthenticator().getUserForToken(token);
if (user == null || user.isDisabled()) {
throw new WebApplicationException(Response.Status.FORBIDDEN);
}
return user;
}
private final TableListModel getTableList() throws IOException {
TableListModel tableList = new TableListModel();
HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
HTableDescriptor[] list = admin.listTables();
for (HTableDescriptor htd: list) {
tableList.add(new TableModel(htd.getNameAsString()));
}
return tableList;
}
private final TableListModel getTableListForUser(final User user)
throws IOException {
TableListModel tableList;
if (user.isAdmin()) {
tableList = getTableList();
} else {
tableList = new TableListModel();
HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
HTableDescriptor[] list = admin.listTables();
String prefix = user.getName() + ".";
for (HTableDescriptor htd: list) {
String name = htd.getNameAsString();
if (!name.startsWith(prefix)) {
continue;
}
tableList.add(new TableModel(name.substring(prefix.length())));
}
}
return tableList;
}
@GET
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response get(final @Context UriInfo uriInfo) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
if (servlet.isMultiUser()) {
throw new WebApplicationException(Response.Status.BAD_REQUEST);
}
try {
ResponseBuilder response = Response.ok(getTableList());
response.cacheControl(cacheControl);
return response.build();
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
@Path("status/cluster")
public StorageClusterStatusResource getClusterStatusResource()
throws IOException {
if (servlet.isMultiUser()) {
throw new WebApplicationException(Response.Status.BAD_REQUEST);
}
return new StorageClusterStatusResource();
}
@Path("version")
public VersionResource getVersionResource() throws IOException {
return new VersionResource();
}
@Path("{token: [0-9a-fA-F]{32} }") // 128 bit md5 sums
public Response getTableRootResource(
final @PathParam("token") String token) throws IOException {
if (servlet.isMultiUser()) {
User user = auth(token);
if (!servlet.userRequestLimit(user, 1)) {
throw new WebApplicationException(Response.status(509).build());
}
try {
ResponseBuilder response = Response.ok(getTableListForUser(user));
response.cacheControl(cacheControl);
return response.build();
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
throw new WebApplicationException(Response.Status.BAD_REQUEST);
}
@Path("{token: [0-9a-fA-F]{32} }/status/cluster") // 128 bit md5 sums
public StorageClusterStatusResource getClusterStatusResourceAuthorized(
final @PathParam("token") String token) throws IOException {
if (servlet.isMultiUser()) {
User user = auth(token);
if (user.isAdmin()) {
if (!servlet.userRequestLimit(user, 1)) {
throw new WebApplicationException(Response.status(509).build());
}
return new StorageClusterStatusResource();
}
throw new WebApplicationException(Response.Status.FORBIDDEN);
}
throw new WebApplicationException(Response.Status.BAD_REQUEST);
}
@Path("{token: [0-9a-fA-F]{32} }/{table}")
public TableResource getTableResource(
final @PathParam("token") String token,
final @PathParam("table") String table) throws IOException {
if (servlet.isMultiUser()) {
User user = auth(token);
if (!servlet.userRequestLimit(user, 1)) {
throw new WebApplicationException(Response.status(509).build());
}
return new TableResource(user, table);
}
throw new WebApplicationException(Response.Status.BAD_REQUEST);
}
@Path("{table}")
public TableResource getTableResource(
final @PathParam("table") String table) throws IOException {
if (servlet.isMultiUser()) {
throw new WebApplicationException(Response.Status.BAD_REQUEST);
}
return new TableResource(null, table);
}
}

View File

@ -1,112 +1,112 @@
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import java.util.Iterator;
import java.util.NoSuchElementException;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.HTablePool;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.filter.Filter;
public class RowResultGenerator extends ResultGenerator {
private Iterator<KeyValue> valuesI;
private KeyValue cache;
public RowResultGenerator(final String tableName, final RowSpec rowspec,
final Filter filter) throws IllegalArgumentException, IOException {
HTablePool pool = RESTServlet.getInstance().getTablePool();
HTableInterface table = pool.getTable(tableName);
try {
Get get = new Get(rowspec.getRow());
if (rowspec.hasColumns()) {
for (byte[] col: rowspec.getColumns()) {
byte[][] split = KeyValue.parseColumn(col);
if (split.length == 2 && split[1].length != 0) {
get.addColumn(split[0], split[1]);
} else {
get.addFamily(split[0]);
}
}
} else {
// rowspec does not explicitly specify columns, return them all
for (HColumnDescriptor family:
table.getTableDescriptor().getFamilies()) {
get.addFamily(family.getName());
}
}
get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
get.setMaxVersions(rowspec.getMaxVersions());
if (filter != null) {
get.setFilter(filter);
}
Result result = table.get(get);
if (result != null && !result.isEmpty()) {
valuesI = result.list().iterator();
}
} finally {
pool.putTable(table);
}
}
public void close() {
}
public boolean hasNext() {
if (cache != null) {
return true;
}
if (valuesI == null) {
return false;
}
return valuesI.hasNext();
}
public KeyValue next() {
if (cache != null) {
KeyValue kv = cache;
cache = null;
return kv;
}
if (valuesI == null) {
return null;
}
try {
return valuesI.next();
} catch (NoSuchElementException e) {
return null;
}
}
public void putBack(KeyValue kv) {
this.cache = kv;
}
public void remove() {
throw new UnsupportedOperationException("remove not supported");
}
}
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import java.util.Iterator;
import java.util.NoSuchElementException;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.HTablePool;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.filter.Filter;
public class RowResultGenerator extends ResultGenerator {
private Iterator<KeyValue> valuesI;
private KeyValue cache;
public RowResultGenerator(final String tableName, final RowSpec rowspec,
final Filter filter) throws IllegalArgumentException, IOException {
HTablePool pool = RESTServlet.getInstance().getTablePool();
HTableInterface table = pool.getTable(tableName);
try {
Get get = new Get(rowspec.getRow());
if (rowspec.hasColumns()) {
for (byte[] col: rowspec.getColumns()) {
byte[][] split = KeyValue.parseColumn(col);
if (split.length == 2 && split[1].length != 0) {
get.addColumn(split[0], split[1]);
} else {
get.addFamily(split[0]);
}
}
} else {
// rowspec does not explicitly specify columns, return them all
for (HColumnDescriptor family:
table.getTableDescriptor().getFamilies()) {
get.addFamily(family.getName());
}
}
get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
get.setMaxVersions(rowspec.getMaxVersions());
if (filter != null) {
get.setFilter(filter);
}
Result result = table.get(get);
if (result != null && !result.isEmpty()) {
valuesI = result.list().iterator();
}
} finally {
pool.putTable(table);
}
}
public void close() {
}
public boolean hasNext() {
if (cache != null) {
return true;
}
if (valuesI == null) {
return false;
}
return valuesI.hasNext();
}
public KeyValue next() {
if (cache != null) {
KeyValue kv = cache;
cache = null;
return kv;
}
if (valuesI == null) {
return null;
}
try {
return valuesI.next();
} catch (NoSuchElementException e) {
return null;
}
}
public void putBack(KeyValue kv) {
this.cache = kv;
}
public void remove() {
throw new UnsupportedOperationException("remove not supported");
}
}

View File

@ -1,317 +1,317 @@
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.util.Collection;
import java.util.TreeSet;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Parses a path based row/column/timestamp specification into its component
* elements.
* <p>
*
*/
public class RowSpec {
public static final long DEFAULT_START_TIMESTAMP = 0;
public static final long DEFAULT_END_TIMESTAMP = Long.MAX_VALUE;
private byte[] row = HConstants.EMPTY_START_ROW;
private byte[] endRow = null;
private TreeSet<byte[]> columns =
new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
private long startTime = DEFAULT_START_TIMESTAMP;
private long endTime = DEFAULT_END_TIMESTAMP;
private int maxVersions = HColumnDescriptor.DEFAULT_VERSIONS;
public RowSpec(String path) throws IllegalArgumentException {
int i = 0;
while (path.charAt(i) == '/') {
i++;
}
i = parseRowKeys(path, i);
i = parseColumns(path, i);
i = parseTimestamp(path, i);
}
private int parseRowKeys(final String path, int i)
throws IllegalArgumentException {
StringBuilder startRow = new StringBuilder();
StringBuilder endRow = null;
try {
char c;
boolean doEndRow = false;
while (i < path.length() && (c = path.charAt(i)) != '/') {
if (c == ',') {
doEndRow = true;
i++;
break;
}
startRow.append(c);
i++;
}
i++;
this.row = Bytes.toBytes(startRow.toString());
if (doEndRow) {
endRow = new StringBuilder();
while ((c = path.charAt(i)) != '/') {
endRow.append(c);
i++;
}
i++;
}
} catch (IndexOutOfBoundsException e) {
throw new IllegalArgumentException(e);
}
// HBase does not support wildcards on row keys so we will emulate a
// suffix glob by synthesizing appropriate start and end row keys for
// table scanning
if (startRow.charAt(startRow.length() - 1) == '*') {
if (endRow != null)
throw new IllegalArgumentException("invalid path: start row "+
"specified with wildcard");
this.row = Bytes.toBytes(startRow.substring(0,
startRow.lastIndexOf("*")));
this.endRow = new byte[this.row.length + 1];
System.arraycopy(this.row, 0, this.endRow, 0, this.row.length);
this.endRow[this.row.length] = (byte)255;
} else {
this.row = Bytes.toBytes(startRow.toString());
if (endRow != null) {
this.endRow = Bytes.toBytes(endRow.toString());
}
}
return i;
}
private int parseColumns(final String path, int i)
throws IllegalArgumentException {
if (i >= path.length()) {
return i;
}
try {
char c;
StringBuilder column = new StringBuilder();
boolean hasColon = false;
while (i < path.length() && (c = path.charAt(i)) != '/') {
if (c == ',') {
if (column.length() < 1) {
throw new IllegalArgumentException("invalid path");
}
if (!hasColon) {
column.append(':');
}
this.columns.add(Bytes.toBytes(column.toString()));
column = new StringBuilder();
hasColon = false;
i++;
continue;
}
if (c == ':') {
hasColon = true;
}
column.append(c);
i++;
}
i++;
// trailing list entry
if (column.length() > 1) {
if (!hasColon) {
column.append(':');
}
this.columns.add(Bytes.toBytes(column.toString()));
}
} catch (IndexOutOfBoundsException e) {
throw new IllegalArgumentException(e);
}
return i;
}
private int parseTimestamp(final String path, int i)
throws IllegalArgumentException {
if (i >= path.length()) {
return i;
}
long time0 = 0, time1 = 0;
try {
char c = 0;
StringBuilder stamp = new StringBuilder();
while (i < path.length()) {
c = path.charAt(i);
if (c == '/' || c == ',') {
break;
}
stamp.append(c);
i++;
}
try {
time0 = Long.valueOf(stamp.toString());
} catch (NumberFormatException e) {
throw new IllegalArgumentException(e);
}
if (c == ',') {
stamp = new StringBuilder();
i++;
while (i < path.length() && ((c = path.charAt(i)) != '/')) {
stamp.append(c);
i++;
}
try {
time1 = Long.valueOf(stamp.toString());
} catch (NumberFormatException e) {
throw new IllegalArgumentException(e);
}
}
if (c == '/') {
i++;
}
} catch (IndexOutOfBoundsException e) {
throw new IllegalArgumentException(e);
}
if (time1 != 0) {
startTime = time0;
endTime = time1;
} else {
endTime = time0;
}
return i;
}
public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns,
long startTime, long endTime, int maxVersions) {
this.row = startRow;
this.endRow = endRow;
if (columns != null) {
for (byte[] col: columns) {
this.columns.add(col);
}
}
this.startTime = startTime;
this.endTime = endTime;
this.maxVersions = maxVersions;
}
public RowSpec(byte[] startRow, byte[] endRow, Collection<byte[]> columns,
long startTime, long endTime, int maxVersions) {
this.row = startRow;
this.endRow = endRow;
if (columns != null) {
this.columns.addAll(columns);
}
this.startTime = startTime;
this.endTime = endTime;
this.maxVersions = maxVersions;
}
public boolean isSingleRow() {
return endRow == null;
}
public int getMaxVersions() {
return maxVersions;
}
public void setMaxVersions(final int maxVersions) {
this.maxVersions = maxVersions;
}
public boolean hasColumns() {
return !columns.isEmpty();
}
public byte[] getRow() {
return row;
}
public byte[] getStartRow() {
return row;
}
public boolean hasEndRow() {
return endRow != null;
}
public byte[] getEndRow() {
return endRow;
}
public void addColumn(final byte[] column) {
columns.add(column);
}
public byte[][] getColumns() {
return columns.toArray(new byte[columns.size()][]);
}
public boolean hasTimestamp() {
return (startTime == 0) && (endTime != Long.MAX_VALUE);
}
public long getTimestamp() {
return endTime;
}
public long getStartTime() {
return startTime;
}
public void setStartTime(final long startTime) {
this.startTime = startTime;
}
public long getEndTime() {
return endTime;
}
public void setEndTime(long endTime) {
this.endTime = endTime;
}
public String toString() {
StringBuilder result = new StringBuilder();
result.append("{startRow => '");
if (row != null) {
result.append(Bytes.toString(row));
}
result.append("', endRow => '");
if (endRow != null) {
result.append(Bytes.toString(endRow));
}
result.append("', columns => [");
for (byte[] col: columns) {
result.append(" '");
result.append(Bytes.toString(col));
result.append("'");
}
result.append(" ], startTime => ");
result.append(Long.toString(startTime));
result.append(", endTime => ");
result.append(Long.toString(endTime));
result.append(", maxVersions => ");
result.append(Integer.toString(maxVersions));
result.append("}");
return result.toString();
}
}
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.util.Collection;
import java.util.TreeSet;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Parses a path based row/column/timestamp specification into its component
* elements.
* <p>
*
*/
public class RowSpec {
public static final long DEFAULT_START_TIMESTAMP = 0;
public static final long DEFAULT_END_TIMESTAMP = Long.MAX_VALUE;
private byte[] row = HConstants.EMPTY_START_ROW;
private byte[] endRow = null;
private TreeSet<byte[]> columns =
new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
private long startTime = DEFAULT_START_TIMESTAMP;
private long endTime = DEFAULT_END_TIMESTAMP;
private int maxVersions = HColumnDescriptor.DEFAULT_VERSIONS;
public RowSpec(String path) throws IllegalArgumentException {
int i = 0;
while (path.charAt(i) == '/') {
i++;
}
i = parseRowKeys(path, i);
i = parseColumns(path, i);
i = parseTimestamp(path, i);
}
private int parseRowKeys(final String path, int i)
throws IllegalArgumentException {
StringBuilder startRow = new StringBuilder();
StringBuilder endRow = null;
try {
char c;
boolean doEndRow = false;
while (i < path.length() && (c = path.charAt(i)) != '/') {
if (c == ',') {
doEndRow = true;
i++;
break;
}
startRow.append(c);
i++;
}
i++;
this.row = Bytes.toBytes(startRow.toString());
if (doEndRow) {
endRow = new StringBuilder();
while ((c = path.charAt(i)) != '/') {
endRow.append(c);
i++;
}
i++;
}
} catch (IndexOutOfBoundsException e) {
throw new IllegalArgumentException(e);
}
// HBase does not support wildcards on row keys so we will emulate a
// suffix glob by synthesizing appropriate start and end row keys for
// table scanning
if (startRow.charAt(startRow.length() - 1) == '*') {
if (endRow != null)
throw new IllegalArgumentException("invalid path: start row "+
"specified with wildcard");
this.row = Bytes.toBytes(startRow.substring(0,
startRow.lastIndexOf("*")));
this.endRow = new byte[this.row.length + 1];
System.arraycopy(this.row, 0, this.endRow, 0, this.row.length);
this.endRow[this.row.length] = (byte)255;
} else {
this.row = Bytes.toBytes(startRow.toString());
if (endRow != null) {
this.endRow = Bytes.toBytes(endRow.toString());
}
}
return i;
}
private int parseColumns(final String path, int i)
throws IllegalArgumentException {
if (i >= path.length()) {
return i;
}
try {
char c;
StringBuilder column = new StringBuilder();
boolean hasColon = false;
while (i < path.length() && (c = path.charAt(i)) != '/') {
if (c == ',') {
if (column.length() < 1) {
throw new IllegalArgumentException("invalid path");
}
if (!hasColon) {
column.append(':');
}
this.columns.add(Bytes.toBytes(column.toString()));
column = new StringBuilder();
hasColon = false;
i++;
continue;
}
if (c == ':') {
hasColon = true;
}
column.append(c);
i++;
}
i++;
// trailing list entry
if (column.length() > 1) {
if (!hasColon) {
column.append(':');
}
this.columns.add(Bytes.toBytes(column.toString()));
}
} catch (IndexOutOfBoundsException e) {
throw new IllegalArgumentException(e);
}
return i;
}
private int parseTimestamp(final String path, int i)
throws IllegalArgumentException {
if (i >= path.length()) {
return i;
}
long time0 = 0, time1 = 0;
try {
char c = 0;
StringBuilder stamp = new StringBuilder();
while (i < path.length()) {
c = path.charAt(i);
if (c == '/' || c == ',') {
break;
}
stamp.append(c);
i++;
}
try {
time0 = Long.valueOf(stamp.toString());
} catch (NumberFormatException e) {
throw new IllegalArgumentException(e);
}
if (c == ',') {
stamp = new StringBuilder();
i++;
while (i < path.length() && ((c = path.charAt(i)) != '/')) {
stamp.append(c);
i++;
}
try {
time1 = Long.valueOf(stamp.toString());
} catch (NumberFormatException e) {
throw new IllegalArgumentException(e);
}
}
if (c == '/') {
i++;
}
} catch (IndexOutOfBoundsException e) {
throw new IllegalArgumentException(e);
}
if (time1 != 0) {
startTime = time0;
endTime = time1;
} else {
endTime = time0;
}
return i;
}
public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns,
long startTime, long endTime, int maxVersions) {
this.row = startRow;
this.endRow = endRow;
if (columns != null) {
for (byte[] col: columns) {
this.columns.add(col);
}
}
this.startTime = startTime;
this.endTime = endTime;
this.maxVersions = maxVersions;
}
public RowSpec(byte[] startRow, byte[] endRow, Collection<byte[]> columns,
long startTime, long endTime, int maxVersions) {
this.row = startRow;
this.endRow = endRow;
if (columns != null) {
this.columns.addAll(columns);
}
this.startTime = startTime;
this.endTime = endTime;
this.maxVersions = maxVersions;
}
public boolean isSingleRow() {
return endRow == null;
}
public int getMaxVersions() {
return maxVersions;
}
public void setMaxVersions(final int maxVersions) {
this.maxVersions = maxVersions;
}
public boolean hasColumns() {
return !columns.isEmpty();
}
public byte[] getRow() {
return row;
}
public byte[] getStartRow() {
return row;
}
public boolean hasEndRow() {
return endRow != null;
}
public byte[] getEndRow() {
return endRow;
}
public void addColumn(final byte[] column) {
columns.add(column);
}
public byte[][] getColumns() {
return columns.toArray(new byte[columns.size()][]);
}
public boolean hasTimestamp() {
return (startTime == 0) && (endTime != Long.MAX_VALUE);
}
public long getTimestamp() {
return endTime;
}
public long getStartTime() {
return startTime;
}
public void setStartTime(final long startTime) {
this.startTime = startTime;
}
public long getEndTime() {
return endTime;
}
public void setEndTime(long endTime) {
this.endTime = endTime;
}
public String toString() {
StringBuilder result = new StringBuilder();
result.append("{startRow => '");
if (row != null) {
result.append(Bytes.toString(row));
}
result.append("', endRow => '");
if (endRow != null) {
result.append(Bytes.toString(endRow));
}
result.append("', columns => [");
for (byte[] col: columns) {
result.append(" '");
result.append(Bytes.toString(col));
result.append("'");
}
result.append(" ], startTime => ");
result.append(Long.toString(startTime));
result.append(", endTime => ");
result.append(Long.toString(endTime));
result.append(", maxVersions => ");
result.append(Integer.toString(maxVersions));
result.append("}");
return result.toString();
}
}

View File

@ -1,159 +1,159 @@
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.CacheControl;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.UriInfo;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.stargate.model.CellModel;
import org.apache.hadoop.hbase.stargate.model.CellSetModel;
import org.apache.hadoop.hbase.stargate.model.RowModel;
import org.apache.hadoop.hbase.util.Bytes;
import com.sun.jersey.core.util.Base64;
public class ScannerInstanceResource implements Constants {
private static final Log LOG =
LogFactory.getLog(ScannerInstanceResource.class);
User user;
ResultGenerator generator;
String id;
int batch;
RESTServlet servlet;
CacheControl cacheControl;
public ScannerInstanceResource(User user, String table, String id,
ResultGenerator generator, int batch) throws IOException {
this.user = user;
this.id = id;
this.generator = generator;
this.batch = batch;
servlet = RESTServlet.getInstance();
cacheControl = new CacheControl();
cacheControl.setNoCache(true);
cacheControl.setNoTransform(false);
}
@GET
@Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response get(final @Context UriInfo uriInfo) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
CellSetModel model = new CellSetModel();
RowModel rowModel = null;
byte[] rowKey = null;
int count = batch;
do {
KeyValue value = null;
try {
value = generator.next();
} catch (IllegalStateException e) {
ScannerResource.delete(id);
throw new WebApplicationException(Response.Status.GONE);
}
if (value == null) {
LOG.info("generator exhausted");
// respond with 204 (No Content) if an empty cell set would be
// returned
if (count == batch) {
return Response.noContent().build();
}
break;
}
if (rowKey == null) {
rowKey = value.getRow();
rowModel = new RowModel(rowKey);
}
if (!Bytes.equals(value.getRow(), rowKey)) {
// the user request limit is a transaction limit, so we need to
// account for scanner.next()
if (user != null && !servlet.userRequestLimit(user, 1)) {
generator.putBack(value);
break;
}
model.addRow(rowModel);
rowKey = value.getRow();
rowModel = new RowModel(rowKey);
}
rowModel.addCell(
new CellModel(value.getFamily(), value.getQualifier(),
value.getTimestamp(), value.getValue()));
} while (--count > 0);
model.addRow(rowModel);
ResponseBuilder response = Response.ok(model);
response.cacheControl(cacheControl);
return response.build();
}
@GET
@Produces(MIMETYPE_BINARY)
public Response getBinary(final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("GET " + uriInfo.getAbsolutePath() + " as " +
MIMETYPE_BINARY);
}
servlet.getMetrics().incrementRequests(1);
try {
KeyValue value = generator.next();
if (value == null) {
LOG.info("generator exhausted");
return Response.noContent().build();
}
ResponseBuilder response = Response.ok(value.getValue());
response.cacheControl(cacheControl);
response.header("X-Row", Base64.encode(value.getRow()));
response.header("X-Column",
Base64.encode(
KeyValue.makeColumn(value.getFamily(), value.getQualifier())));
response.header("X-Timestamp", value.getTimestamp());
return response.build();
} catch (IllegalStateException e) {
ScannerResource.delete(id);
throw new WebApplicationException(Response.Status.GONE);
}
}
@DELETE
public Response delete(final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("DELETE " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
ScannerResource.delete(id);
return Response.ok().build();
}
}
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.CacheControl;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.UriInfo;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.stargate.model.CellModel;
import org.apache.hadoop.hbase.stargate.model.CellSetModel;
import org.apache.hadoop.hbase.stargate.model.RowModel;
import org.apache.hadoop.hbase.util.Bytes;
import com.sun.jersey.core.util.Base64;
public class ScannerInstanceResource implements Constants {
private static final Log LOG =
LogFactory.getLog(ScannerInstanceResource.class);
User user;
ResultGenerator generator;
String id;
int batch;
RESTServlet servlet;
CacheControl cacheControl;
public ScannerInstanceResource(User user, String table, String id,
ResultGenerator generator, int batch) throws IOException {
this.user = user;
this.id = id;
this.generator = generator;
this.batch = batch;
servlet = RESTServlet.getInstance();
cacheControl = new CacheControl();
cacheControl.setNoCache(true);
cacheControl.setNoTransform(false);
}
@GET
@Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response get(final @Context UriInfo uriInfo) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
CellSetModel model = new CellSetModel();
RowModel rowModel = null;
byte[] rowKey = null;
int count = batch;
do {
KeyValue value = null;
try {
value = generator.next();
} catch (IllegalStateException e) {
ScannerResource.delete(id);
throw new WebApplicationException(Response.Status.GONE);
}
if (value == null) {
LOG.info("generator exhausted");
// respond with 204 (No Content) if an empty cell set would be
// returned
if (count == batch) {
return Response.noContent().build();
}
break;
}
if (rowKey == null) {
rowKey = value.getRow();
rowModel = new RowModel(rowKey);
}
if (!Bytes.equals(value.getRow(), rowKey)) {
// the user request limit is a transaction limit, so we need to
// account for scanner.next()
if (user != null && !servlet.userRequestLimit(user, 1)) {
generator.putBack(value);
break;
}
model.addRow(rowModel);
rowKey = value.getRow();
rowModel = new RowModel(rowKey);
}
rowModel.addCell(
new CellModel(value.getFamily(), value.getQualifier(),
value.getTimestamp(), value.getValue()));
} while (--count > 0);
model.addRow(rowModel);
ResponseBuilder response = Response.ok(model);
response.cacheControl(cacheControl);
return response.build();
}
@GET
@Produces(MIMETYPE_BINARY)
public Response getBinary(final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("GET " + uriInfo.getAbsolutePath() + " as " +
MIMETYPE_BINARY);
}
servlet.getMetrics().incrementRequests(1);
try {
KeyValue value = generator.next();
if (value == null) {
LOG.info("generator exhausted");
return Response.noContent().build();
}
ResponseBuilder response = Response.ok(value.getValue());
response.cacheControl(cacheControl);
response.header("X-Row", Base64.encode(value.getRow()));
response.header("X-Column",
Base64.encode(
KeyValue.makeColumn(value.getFamily(), value.getQualifier())));
response.header("X-Timestamp", value.getTimestamp());
return response.build();
} catch (IllegalStateException e) {
ScannerResource.delete(id);
throw new WebApplicationException(Response.Status.GONE);
}
}
@DELETE
public Response delete(final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("DELETE " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
ScannerResource.delete(id);
return Response.ok().build();
}
}

View File

@ -1,143 +1,143 @@
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
import javax.ws.rs.Consumes;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriBuilder;
import javax.ws.rs.core.UriInfo;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.stargate.User;
import org.apache.hadoop.hbase.stargate.model.ScannerModel;
public class ScannerResource implements Constants {
private static final Log LOG = LogFactory.getLog(ScannerResource.class);
static final Map<String,ScannerInstanceResource> scanners =
new HashMap<String,ScannerInstanceResource>();
User user;
String tableName;
String actualTableName;
RESTServlet servlet;
public ScannerResource(User user, String table) throws IOException {
if (user != null) {
this.user = user;
this.actualTableName =
!user.isAdmin() ? user.getName() + "." + table : table;
} else {
this.actualTableName = table;
}
this.tableName = table;
servlet = RESTServlet.getInstance();
}
static void delete(final String id) {
synchronized (scanners) {
ScannerInstanceResource instance = scanners.remove(id);
if (instance != null) {
instance.generator.close();
}
}
}
Response update(final ScannerModel model, final boolean replace,
final UriInfo uriInfo) {
servlet.getMetrics().incrementRequests(1);
byte[] endRow = model.hasEndRow() ? model.getEndRow() : null;
RowSpec spec = new RowSpec(model.getStartRow(), endRow,
model.getColumns(), model.getStartTime(), model.getEndTime(), 1);
try {
Filter filter = ScannerResultGenerator.buildFilterFromModel(model);
ScannerResultGenerator gen =
new ScannerResultGenerator(actualTableName, spec, filter);
String id = gen.getID();
ScannerInstanceResource instance =
new ScannerInstanceResource(user, actualTableName, id, gen,
model.getBatch());
synchronized (scanners) {
scanners.put(id, instance);
}
if (LOG.isDebugEnabled()) {
LOG.debug("new scanner: " + id);
}
UriBuilder builder = uriInfo.getAbsolutePathBuilder();
URI uri = builder.path(id).build();
return Response.created(uri).build();
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
} catch (Exception e) {
throw new WebApplicationException(e, Response.Status.BAD_REQUEST);
}
}
@PUT
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response put(final ScannerModel model,
final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("PUT " + uriInfo.getAbsolutePath());
}
return update(model, true, uriInfo);
}
@POST
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response post(final ScannerModel model,
final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("POST " + uriInfo.getAbsolutePath());
}
return update(model, false, uriInfo);
}
@Path("{scanner: .+}")
public ScannerInstanceResource getScannerInstanceResource(
final @PathParam("scanner") String id) {
synchronized (scanners) {
ScannerInstanceResource instance = scanners.get(id);
if (instance == null) {
throw new WebApplicationException(Response.Status.NOT_FOUND);
}
return instance;
}
}
}
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
import javax.ws.rs.Consumes;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriBuilder;
import javax.ws.rs.core.UriInfo;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.stargate.User;
import org.apache.hadoop.hbase.stargate.model.ScannerModel;
public class ScannerResource implements Constants {
private static final Log LOG = LogFactory.getLog(ScannerResource.class);
static final Map<String,ScannerInstanceResource> scanners =
new HashMap<String,ScannerInstanceResource>();
User user;
String tableName;
String actualTableName;
RESTServlet servlet;
public ScannerResource(User user, String table) throws IOException {
if (user != null) {
this.user = user;
this.actualTableName =
!user.isAdmin() ? user.getName() + "." + table : table;
} else {
this.actualTableName = table;
}
this.tableName = table;
servlet = RESTServlet.getInstance();
}
static void delete(final String id) {
synchronized (scanners) {
ScannerInstanceResource instance = scanners.remove(id);
if (instance != null) {
instance.generator.close();
}
}
}
Response update(final ScannerModel model, final boolean replace,
final UriInfo uriInfo) {
servlet.getMetrics().incrementRequests(1);
byte[] endRow = model.hasEndRow() ? model.getEndRow() : null;
RowSpec spec = new RowSpec(model.getStartRow(), endRow,
model.getColumns(), model.getStartTime(), model.getEndTime(), 1);
try {
Filter filter = ScannerResultGenerator.buildFilterFromModel(model);
ScannerResultGenerator gen =
new ScannerResultGenerator(actualTableName, spec, filter);
String id = gen.getID();
ScannerInstanceResource instance =
new ScannerInstanceResource(user, actualTableName, id, gen,
model.getBatch());
synchronized (scanners) {
scanners.put(id, instance);
}
if (LOG.isDebugEnabled()) {
LOG.debug("new scanner: " + id);
}
UriBuilder builder = uriInfo.getAbsolutePathBuilder();
URI uri = builder.path(id).build();
return Response.created(uri).build();
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
} catch (Exception e) {
throw new WebApplicationException(e, Response.Status.BAD_REQUEST);
}
}
@PUT
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response put(final ScannerModel model,
final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("PUT " + uriInfo.getAbsolutePath());
}
return update(model, true, uriInfo);
}
@POST
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response post(final ScannerModel model,
final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("POST " + uriInfo.getAbsolutePath());
}
return update(model, false, uriInfo);
}
@Path("{scanner: .+}")
public ScannerInstanceResource getScannerInstanceResource(
final @PathParam("scanner") String id) {
synchronized (scanners) {
ScannerInstanceResource instance = scanners.get(id);
if (instance == null) {
throw new WebApplicationException(Response.Status.NOT_FOUND);
}
return instance;
}
}
}

View File

@ -1,179 +1,179 @@
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.UnknownScannerException;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.HTablePool;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.stargate.model.ScannerModel;
import org.apache.hadoop.util.StringUtils;
public class ScannerResultGenerator extends ResultGenerator {
private static final Log LOG =
LogFactory.getLog(ScannerResultGenerator.class);
public static Filter buildFilterFromModel(final ScannerModel model)
throws Exception {
String filter = model.getFilter();
if (filter == null || filter.length() == 0) {
return null;
}
return buildFilter(filter);
}
private String id;
private Iterator<KeyValue> rowI;
private KeyValue cache;
private ResultScanner scanner;
private Result cached;
public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
final Filter filter) throws IllegalArgumentException, IOException {
HTablePool pool = RESTServlet.getInstance().getTablePool();
HTableInterface table = pool.getTable(tableName);
try {
Scan scan;
if (rowspec.hasEndRow()) {
scan = new Scan(rowspec.getStartRow(), rowspec.getEndRow());
} else {
scan = new Scan(rowspec.getStartRow());
}
if (rowspec.hasColumns()) {
byte[][] columns = rowspec.getColumns();
for (byte[] column: columns) {
byte[][] split = KeyValue.parseColumn(column);
if (split.length > 1 && (split[1] != null && split[1].length != 0)) {
scan.addColumn(split[0], split[1]);
} else {
scan.addFamily(split[0]);
}
}
} else {
for (HColumnDescriptor family:
table.getTableDescriptor().getFamilies()) {
scan.addFamily(family.getName());
}
}
scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
scan.setMaxVersions(rowspec.getMaxVersions());
if (filter != null) {
scan.setFilter(filter);
}
// always disable block caching on the cluster when scanning
scan.setCacheBlocks(false);
scanner = table.getScanner(scan);
cached = null;
id = Long.toString(System.currentTimeMillis()) +
Integer.toHexString(scanner.hashCode());
} finally {
pool.putTable(table);
}
}
public String getID() {
return id;
}
public void close() {
}
public boolean hasNext() {
if (cache != null) {
return true;
}
if (rowI != null && rowI.hasNext()) {
return true;
}
if (cached != null) {
return true;
}
try {
Result result = scanner.next();
if (result != null && !result.isEmpty()) {
cached = result;
}
} catch (UnknownScannerException e) {
throw new IllegalArgumentException(e);
} catch (IOException e) {
LOG.error(StringUtils.stringifyException(e));
}
return cached != null;
}
public KeyValue next() {
if (cache != null) {
KeyValue kv = cache;
cache = null;
return kv;
}
boolean loop;
do {
loop = false;
if (rowI != null) {
if (rowI.hasNext()) {
return rowI.next();
} else {
rowI = null;
}
}
if (cached != null) {
rowI = cached.list().iterator();
loop = true;
cached = null;
} else {
Result result = null;
try {
result = scanner.next();
} catch (UnknownScannerException e) {
throw new IllegalArgumentException(e);
} catch (IOException e) {
LOG.error(StringUtils.stringifyException(e));
}
if (result != null && !result.isEmpty()) {
rowI = result.list().iterator();
loop = true;
}
}
} while (loop);
return null;
}
public void putBack(KeyValue kv) {
this.cache = kv;
}
public void remove() {
throw new UnsupportedOperationException("remove not supported");
}
}
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.UnknownScannerException;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.HTablePool;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.stargate.model.ScannerModel;
import org.apache.hadoop.util.StringUtils;
public class ScannerResultGenerator extends ResultGenerator {
private static final Log LOG =
LogFactory.getLog(ScannerResultGenerator.class);
public static Filter buildFilterFromModel(final ScannerModel model)
throws Exception {
String filter = model.getFilter();
if (filter == null || filter.length() == 0) {
return null;
}
return buildFilter(filter);
}
private String id;
private Iterator<KeyValue> rowI;
private KeyValue cache;
private ResultScanner scanner;
private Result cached;
public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
final Filter filter) throws IllegalArgumentException, IOException {
HTablePool pool = RESTServlet.getInstance().getTablePool();
HTableInterface table = pool.getTable(tableName);
try {
Scan scan;
if (rowspec.hasEndRow()) {
scan = new Scan(rowspec.getStartRow(), rowspec.getEndRow());
} else {
scan = new Scan(rowspec.getStartRow());
}
if (rowspec.hasColumns()) {
byte[][] columns = rowspec.getColumns();
for (byte[] column: columns) {
byte[][] split = KeyValue.parseColumn(column);
if (split.length > 1 && (split[1] != null && split[1].length != 0)) {
scan.addColumn(split[0], split[1]);
} else {
scan.addFamily(split[0]);
}
}
} else {
for (HColumnDescriptor family:
table.getTableDescriptor().getFamilies()) {
scan.addFamily(family.getName());
}
}
scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
scan.setMaxVersions(rowspec.getMaxVersions());
if (filter != null) {
scan.setFilter(filter);
}
// always disable block caching on the cluster when scanning
scan.setCacheBlocks(false);
scanner = table.getScanner(scan);
cached = null;
id = Long.toString(System.currentTimeMillis()) +
Integer.toHexString(scanner.hashCode());
} finally {
pool.putTable(table);
}
}
public String getID() {
return id;
}
public void close() {
}
public boolean hasNext() {
if (cache != null) {
return true;
}
if (rowI != null && rowI.hasNext()) {
return true;
}
if (cached != null) {
return true;
}
try {
Result result = scanner.next();
if (result != null && !result.isEmpty()) {
cached = result;
}
} catch (UnknownScannerException e) {
throw new IllegalArgumentException(e);
} catch (IOException e) {
LOG.error(StringUtils.stringifyException(e));
}
return cached != null;
}
public KeyValue next() {
if (cache != null) {
KeyValue kv = cache;
cache = null;
return kv;
}
boolean loop;
do {
loop = false;
if (rowI != null) {
if (rowI.hasNext()) {
return rowI.next();
} else {
rowI = null;
}
}
if (cached != null) {
rowI = cached.list().iterator();
loop = true;
cached = null;
} else {
Result result = null;
try {
result = scanner.next();
} catch (UnknownScannerException e) {
throw new IllegalArgumentException(e);
} catch (IOException e) {
LOG.error(StringUtils.stringifyException(e));
}
if (result != null && !result.isEmpty()) {
rowI = result.list().iterator();
loop = true;
}
}
} while (loop);
return null;
}
public void putBack(KeyValue kv) {
this.cache = kv;
}
public void remove() {
throw new UnsupportedOperationException("remove not supported");
}
}

View File

@ -1,259 +1,259 @@
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import java.util.Map;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.CacheControl;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.xml.namespace.QName;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.HTablePool;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.stargate.User;
import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
import org.apache.hadoop.hbase.util.Bytes;
public class SchemaResource implements Constants {
private static final Log LOG = LogFactory.getLog(SchemaResource.class);
User user;
String tableName;
String actualTableName;
CacheControl cacheControl;
RESTServlet servlet;
public SchemaResource(User user, String table) throws IOException {
if (user != null) {
this.user = user;
this.actualTableName =
!user.isAdmin() ? (user.getName() + "." + table) : table;
} else {
this.actualTableName = table;
}
this.tableName = table;
servlet = RESTServlet.getInstance();
cacheControl = new CacheControl();
cacheControl.setNoCache(true);
cacheControl.setNoTransform(false);
}
private HTableDescriptor getTableSchema() throws IOException,
TableNotFoundException {
HTablePool pool = servlet.getTablePool();
HTableInterface table = pool.getTable(actualTableName);
try {
return table.getTableDescriptor();
} finally {
pool.putTable(table);
}
}
@GET
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response get(final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
try {
HTableDescriptor htd = getTableSchema();
TableSchemaModel model = new TableSchemaModel();
model.setName(tableName);
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
htd.getValues().entrySet()) {
model.addAttribute(Bytes.toString(e.getKey().get()),
Bytes.toString(e.getValue().get()));
}
for (HColumnDescriptor hcd: htd.getFamilies()) {
ColumnSchemaModel columnModel = new ColumnSchemaModel();
columnModel.setName(hcd.getNameAsString());
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
hcd.getValues().entrySet()) {
columnModel.addAttribute(Bytes.toString(e.getKey().get()),
Bytes.toString(e.getValue().get()));
}
model.addColumnFamily(columnModel);
}
ResponseBuilder response = Response.ok(model);
response.cacheControl(cacheControl);
return response.build();
} catch (TableNotFoundException e) {
throw new WebApplicationException(Response.Status.NOT_FOUND);
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
private Response replace(final byte[] tableName,
final TableSchemaModel model, final UriInfo uriInfo,
final HBaseAdmin admin) {
try {
HTableDescriptor htd = new HTableDescriptor(tableName);
for (Map.Entry<QName,Object> e: model.getAny().entrySet()) {
htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
for (ColumnSchemaModel family: model.getColumns()) {
HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
htd.addFamily(hcd);
}
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.modifyTable(tableName, htd);
admin.enableTable(tableName);
} else try {
admin.createTable(htd);
} catch (TableExistsException e) {
// race, someone else created a table with the same name
throw new WebApplicationException(e, Response.Status.NOT_MODIFIED);
}
return Response.created(uriInfo.getAbsolutePath()).build();
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
private Response update(final byte[] tableName,final TableSchemaModel model,
final UriInfo uriInfo, final HBaseAdmin admin) {
try {
HTableDescriptor htd = admin.getTableDescriptor(tableName);
admin.disableTable(tableName);
try {
for (ColumnSchemaModel family: model.getColumns()) {
HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
if (htd.hasFamily(hcd.getName())) {
admin.modifyColumn(tableName, hcd.getName(), hcd);
} else {
admin.addColumn(model.getName(), hcd);
}
}
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
} finally {
admin.enableTable(tableName);
}
return Response.ok().build();
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
private Response update(final TableSchemaModel model, final boolean replace,
final UriInfo uriInfo) {
try {
servlet.invalidateMaxAge(tableName);
byte[] tableName = Bytes.toBytes(actualTableName);
HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
if (replace || !admin.tableExists(tableName)) {
return replace(tableName, model, uriInfo, admin);
} else {
return update(tableName, model, uriInfo, admin);
}
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
@PUT
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response put(final TableSchemaModel model,
final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("PUT " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
// use the name given in the path, but warn if the name on the path and
// the name in the schema are different
if (model.getName() != tableName) {
LOG.warn("table name mismatch: path='" + tableName + "', schema='" +
model.getName() + "'");
}
return update(model, true, uriInfo);
}
@POST
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response post(final TableSchemaModel model,
final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("PUT " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
// use the name given in the path, but warn if the name on the path and
// the name in the schema are different
if (model.getName() != tableName) {
LOG.warn("table name mismatch: path='" + tableName + "', schema='" +
model.getName() + "'");
}
return update(model, false, uriInfo);
}
@DELETE
public Response delete(final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("DELETE " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
try {
HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
admin.disableTable(actualTableName);
admin.deleteTable(actualTableName);
return Response.ok().build();
} catch (TableNotFoundException e) {
throw new WebApplicationException(Response.Status.NOT_FOUND);
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
}
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import java.util.Map;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.CacheControl;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.xml.namespace.QName;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.HTablePool;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.stargate.User;
import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
import org.apache.hadoop.hbase.util.Bytes;
public class SchemaResource implements Constants {
private static final Log LOG = LogFactory.getLog(SchemaResource.class);
User user;
String tableName;
String actualTableName;
CacheControl cacheControl;
RESTServlet servlet;
public SchemaResource(User user, String table) throws IOException {
if (user != null) {
this.user = user;
this.actualTableName =
!user.isAdmin() ? (user.getName() + "." + table) : table;
} else {
this.actualTableName = table;
}
this.tableName = table;
servlet = RESTServlet.getInstance();
cacheControl = new CacheControl();
cacheControl.setNoCache(true);
cacheControl.setNoTransform(false);
}
private HTableDescriptor getTableSchema() throws IOException,
TableNotFoundException {
HTablePool pool = servlet.getTablePool();
HTableInterface table = pool.getTable(actualTableName);
try {
return table.getTableDescriptor();
} finally {
pool.putTable(table);
}
}
@GET
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response get(final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
try {
HTableDescriptor htd = getTableSchema();
TableSchemaModel model = new TableSchemaModel();
model.setName(tableName);
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
htd.getValues().entrySet()) {
model.addAttribute(Bytes.toString(e.getKey().get()),
Bytes.toString(e.getValue().get()));
}
for (HColumnDescriptor hcd: htd.getFamilies()) {
ColumnSchemaModel columnModel = new ColumnSchemaModel();
columnModel.setName(hcd.getNameAsString());
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
hcd.getValues().entrySet()) {
columnModel.addAttribute(Bytes.toString(e.getKey().get()),
Bytes.toString(e.getValue().get()));
}
model.addColumnFamily(columnModel);
}
ResponseBuilder response = Response.ok(model);
response.cacheControl(cacheControl);
return response.build();
} catch (TableNotFoundException e) {
throw new WebApplicationException(Response.Status.NOT_FOUND);
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
private Response replace(final byte[] tableName,
final TableSchemaModel model, final UriInfo uriInfo,
final HBaseAdmin admin) {
try {
HTableDescriptor htd = new HTableDescriptor(tableName);
for (Map.Entry<QName,Object> e: model.getAny().entrySet()) {
htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
for (ColumnSchemaModel family: model.getColumns()) {
HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
htd.addFamily(hcd);
}
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.modifyTable(tableName, htd);
admin.enableTable(tableName);
} else try {
admin.createTable(htd);
} catch (TableExistsException e) {
// race, someone else created a table with the same name
throw new WebApplicationException(e, Response.Status.NOT_MODIFIED);
}
return Response.created(uriInfo.getAbsolutePath()).build();
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
private Response update(final byte[] tableName,final TableSchemaModel model,
final UriInfo uriInfo, final HBaseAdmin admin) {
try {
HTableDescriptor htd = admin.getTableDescriptor(tableName);
admin.disableTable(tableName);
try {
for (ColumnSchemaModel family: model.getColumns()) {
HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
}
if (htd.hasFamily(hcd.getName())) {
admin.modifyColumn(tableName, hcd.getName(), hcd);
} else {
admin.addColumn(model.getName(), hcd);
}
}
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
} finally {
admin.enableTable(tableName);
}
return Response.ok().build();
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
private Response update(final TableSchemaModel model, final boolean replace,
final UriInfo uriInfo) {
try {
servlet.invalidateMaxAge(tableName);
byte[] tableName = Bytes.toBytes(actualTableName);
HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
if (replace || !admin.tableExists(tableName)) {
return replace(tableName, model, uriInfo, admin);
} else {
return update(tableName, model, uriInfo, admin);
}
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
@PUT
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response put(final TableSchemaModel model,
final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("PUT " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
// use the name given in the path, but warn if the name on the path and
// the name in the schema are different
if (model.getName() != tableName) {
LOG.warn("table name mismatch: path='" + tableName + "', schema='" +
model.getName() + "'");
}
return update(model, true, uriInfo);
}
@POST
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response post(final TableSchemaModel model,
final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("PUT " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
// use the name given in the path, but warn if the name on the path and
// the name in the schema are different
if (model.getName() != tableName) {
LOG.warn("table name mismatch: path='" + tableName + "', schema='" +
model.getName() + "'");
}
return update(model, false, uriInfo);
}
@DELETE
public Response delete(final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("DELETE " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
try {
HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
admin.disableTable(actualTableName);
admin.deleteTable(actualTableName);
return Response.ok().build();
} catch (TableNotFoundException e) {
throw new WebApplicationException(Response.Status.NOT_FOUND);
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
}

View File

@ -1,69 +1,69 @@
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.QueryParam;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Response;
import org.apache.hadoop.hbase.stargate.User;
public class TableResource implements Constants {
User user;
String table;
public TableResource(User user, String table) {
this.user = user;
this.table = table;
}
@Path("regions")
public RegionsResource getRegionsResource() throws IOException {
return new RegionsResource(user, table);
}
@Path("scanner")
public ScannerResource getScannerResource() throws IOException {
return new ScannerResource(user, table);
}
@Path("schema")
public SchemaResource getSchemaResource() throws IOException {
return new SchemaResource(user, table);
}
@Path("{rowspec: .+}")
public RowResource getRowResource(
final @PathParam("rowspec") String rowspec,
final @QueryParam("v") String versions) {
try {
return new RowResource(user, table, rowspec, versions);
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
}
}
}
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.QueryParam;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Response;
import org.apache.hadoop.hbase.stargate.User;
public class TableResource implements Constants {
User user;
String table;
public TableResource(User user, String table) {
this.user = user;
this.table = table;
}
@Path("regions")
public RegionsResource getRegionsResource() throws IOException {
return new RegionsResource(user, table);
}
@Path("scanner")
public ScannerResource getScannerResource() throws IOException {
return new ScannerResource(user, table);
}
@Path("schema")
public SchemaResource getSchemaResource() throws IOException {
return new SchemaResource(user, table);
}
@Path("{rowspec: .+}")
public RowResource getRowResource(
final @PathParam("rowspec") String rowspec,
final @QueryParam("v") String versions) {
try {
return new RowResource(user, table, rowspec, versions);
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
}
}
}

View File

@ -1,94 +1,94 @@
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import javax.servlet.ServletContext;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.CacheControl;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import javax.ws.rs.core.Response.ResponseBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.stargate.model.VersionModel;
/**
* Implements Stargate software version reporting via
* <p>
* <tt>/version/stargate</tt>
* <p>
* <tt>/version</tt> (alias for <tt>/version/stargate</tt>)
*/
public class VersionResource implements Constants {
private static final Log LOG = LogFactory.getLog(VersionResource.class);
private CacheControl cacheControl;
private RESTServlet servlet;
public VersionResource() throws IOException {
servlet = RESTServlet.getInstance();
cacheControl = new CacheControl();
cacheControl.setNoCache(true);
cacheControl.setNoTransform(false);
}
/**
* Build a response for a version request.
* @param context servlet context
* @param uriInfo (JAX-RS context variable) request URL
* @return a response for a version request
*/
@GET
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response get(final @Context ServletContext context,
final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
ResponseBuilder response = Response.ok(new VersionModel(context));
response.cacheControl(cacheControl);
return response.build();
}
/**
* Dispatch to StorageClusterVersionResource
*/
@Path("cluster")
public StorageClusterVersionResource getClusterVersionResource()
throws IOException {
return new StorageClusterVersionResource();
}
/**
* Dispatch <tt>/version/stargate</tt> to self.
*/
@Path("stargate")
public VersionResource getVersionResource() {
return this;
}
}
/*
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.stargate;
import java.io.IOException;
import javax.servlet.ServletContext;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.CacheControl;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriInfo;
import javax.ws.rs.core.Response.ResponseBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.stargate.model.VersionModel;
/**
* Implements Stargate software version reporting via
* <p>
* <tt>/version/stargate</tt>
* <p>
* <tt>/version</tt> (alias for <tt>/version/stargate</tt>)
*/
public class VersionResource implements Constants {
private static final Log LOG = LogFactory.getLog(VersionResource.class);
private CacheControl cacheControl;
private RESTServlet servlet;
public VersionResource() throws IOException {
servlet = RESTServlet.getInstance();
cacheControl = new CacheControl();
cacheControl.setNoCache(true);
cacheControl.setNoTransform(false);
}
/**
* Build a response for a version request.
* @param context servlet context
* @param uriInfo (JAX-RS context variable) request URL
* @return a response for a version request
*/
@GET
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response get(final @Context ServletContext context,
final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
ResponseBuilder response = Response.ok(new VersionModel(context));
response.cacheControl(cacheControl);
return response.build();
}
/**
* Dispatch to StorageClusterVersionResource
*/
@Path("cluster")
public StorageClusterVersionResource getClusterVersionResource()
throws IOException {
return new StorageClusterVersionResource();
}
/**
* Dispatch <tt>/version/stargate</tt> to self.
*/
@Path("stargate")
public VersionResource getVersionResource() {
return this;
}
}

View File

@ -1,26 +1,26 @@
// Copyright 2010 The Apache Software Foundation
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.hadoop.hbase.stargate.protobuf.generated;
message Cell {
optional bytes row = 1; // unused if Cell is in a CellSet
optional bytes column = 2;
optional int64 timestamp = 3;
optional bytes data = 4;
}
// Copyright 2010 The Apache Software Foundation
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.hadoop.hbase.stargate.protobuf.generated;
message Cell {
optional bytes row = 1; // unused if Cell is in a CellSet
optional bytes column = 2;
optional int64 timestamp = 3;
optional bytes data = 4;
}

View File

@ -1,29 +1,29 @@
// Copyright 2010 The Apache Software Foundation
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import "CellMessage.proto";
package org.apache.hadoop.hbase.stargate.protobuf.generated;
message CellSet {
message Row {
required bytes key = 1;
repeated Cell values = 2;
}
repeated Row rows = 1;
}
// Copyright 2010 The Apache Software Foundation
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import "CellMessage.proto";
package org.apache.hadoop.hbase.stargate.protobuf.generated;
message CellSet {
message Row {
required bytes key = 1;
repeated Cell values = 2;
}
repeated Row rows = 1;
}

View File

@ -1,32 +1,32 @@
// Copyright 2010 The Apache Software Foundation
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.hadoop.hbase.stargate.protobuf.generated;
message ColumnSchema {
optional string name = 1;
message Attribute {
required string name = 1;
required string value = 2;
}
repeated Attribute attrs = 2;
// optional helpful encodings of commonly used attributes
optional int32 ttl = 3;
optional int32 maxVersions = 4;
optional string compression = 5;
}
// Copyright 2010 The Apache Software Foundation
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.hadoop.hbase.stargate.protobuf.generated;
message ColumnSchema {
optional string name = 1;
message Attribute {
required string name = 1;
required string value = 2;
}
repeated Attribute attrs = 2;
// optional helpful encodings of commonly used attributes
optional int32 ttl = 3;
optional int32 maxVersions = 4;
optional string compression = 5;
}

View File

@ -1,30 +1,30 @@
// Copyright 2010 The Apache Software Foundation
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.hadoop.hbase.stargate.protobuf.generated;
message Scanner {
optional bytes startRow = 1;
optional bytes endRow = 2;
repeated bytes columns = 3;
optional int32 batch = 4;
optional int64 startTime = 5;
optional int64 endTime = 6;
optional int32 maxVersions = 7;
optional string filter = 8;
}
// Copyright 2010 The Apache Software Foundation
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.hadoop.hbase.stargate.protobuf.generated;
message Scanner {
optional bytes startRow = 1;
optional bytes endRow = 2;
repeated bytes columns = 3;
optional int32 batch = 4;
optional int64 startTime = 5;
optional int64 endTime = 6;
optional int32 maxVersions = 7;
optional string filter = 8;
}

View File

@ -1,45 +1,45 @@
// Copyright 2010 The Apache Software Foundation
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.hadoop.hbase.stargate.protobuf.generated;
message StorageClusterStatus {
message Region {
required bytes name = 1;
optional int32 stores = 2;
optional int32 storefiles = 3;
optional int32 storefileSizeMB = 4;
optional int32 memstoreSizeMB = 5;
optional int32 storefileIndexSizeMB = 6;
}
message Node {
required string name = 1; // name:port
optional int64 startCode = 2;
optional int32 requests = 3;
optional int32 heapSizeMB = 4;
optional int32 maxHeapSizeMB = 5;
repeated Region regions = 6;
}
// node status
repeated Node liveNodes = 1;
repeated string deadNodes = 2;
// summary statistics
optional int32 regions = 3;
optional int32 requests = 4;
optional double averageLoad = 5;
}
// Copyright 2010 The Apache Software Foundation
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.hadoop.hbase.stargate.protobuf.generated;
message StorageClusterStatus {
message Region {
required bytes name = 1;
optional int32 stores = 2;
optional int32 storefiles = 3;
optional int32 storefileSizeMB = 4;
optional int32 memstoreSizeMB = 5;
optional int32 storefileIndexSizeMB = 6;
}
message Node {
required string name = 1; // name:port
optional int64 startCode = 2;
optional int32 requests = 3;
optional int32 heapSizeMB = 4;
optional int32 maxHeapSizeMB = 5;
repeated Region regions = 6;
}
// node status
repeated Node liveNodes = 1;
repeated string deadNodes = 2;
// summary statistics
optional int32 regions = 3;
optional int32 requests = 4;
optional double averageLoad = 5;
}

View File

@ -1,31 +1,31 @@
// Copyright 2010 The Apache Software Foundation
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.hadoop.hbase.stargate.protobuf.generated;
message TableInfo {
required string name = 1;
message Region {
required string name = 1;
optional bytes startKey = 2;
optional bytes endKey = 3;
optional int64 id = 4;
optional string location = 5;
}
repeated Region regions = 2;
}
// Copyright 2010 The Apache Software Foundation
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.hadoop.hbase.stargate.protobuf.generated;
message TableInfo {
required string name = 1;
message Region {
required string name = 1;
optional bytes startKey = 2;
optional bytes endKey = 3;
optional int64 id = 4;
optional string location = 5;
}
repeated Region regions = 2;
}

View File

@ -1,34 +1,34 @@
// Copyright 2010 The Apache Software Foundation
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import "ColumnSchemaMessage.proto";
package org.apache.hadoop.hbase.stargate.protobuf.generated;
message TableSchema {
optional string name = 1;
message Attribute {
required string name = 1;
required string value = 2;
}
repeated Attribute attrs = 2;
repeated ColumnSchema columns = 3;
// optional helpful encodings of commonly used attributes
optional bool inMemory = 4;
optional bool readOnly = 5;
}
// Copyright 2010 The Apache Software Foundation
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import "ColumnSchemaMessage.proto";
package org.apache.hadoop.hbase.stargate.protobuf.generated;
message TableSchema {
optional string name = 1;
message Attribute {
required string name = 1;
required string value = 2;
}
repeated Attribute attrs = 2;
repeated ColumnSchema columns = 3;
// optional helpful encodings of commonly used attributes
optional bool inMemory = 4;
optional bool readOnly = 5;
}

View File

@ -1,27 +1,27 @@
// Copyright 2010 The Apache Software Foundation
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.hadoop.hbase.stargate.protobuf.generated;
message Version {
optional string stargateVersion = 1;
optional string jvmVersion = 2;
optional string osVersion = 3;
optional string serverVersion = 4;
optional string jerseyVersion = 5;
}
// Copyright 2010 The Apache Software Foundation
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.hadoop.hbase.stargate.protobuf.generated;
message Version {
optional string stargateVersion = 1;
optional string jvmVersion = 2;
optional string osVersion = 3;
optional string serverVersion = 4;
optional string jerseyVersion = 5;
}

View File

@ -1,138 +1,138 @@
/**
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Class that provides static method needed when putting deletes into memstore
*/
public class DeleteCompare {
/**
* Return codes from deleteCompare.
*/
enum DeleteCode {
/**
* Do nothing. Move to next KV in memstore
*/
SKIP,
/**
* Add to the list of deletes.
*/
DELETE,
/**
* Stop looking at KVs in memstore. Finalize.
*/
DONE
}
/**
* Method used when putting deletes into memstore to remove all the previous
* entries that are affected by this Delete
* @param mem
* @param deleteBuffer
* @param deleteRowOffset
* @param deleteRowLength
* @param deleteQualifierOffset
* @param deleteQualifierLength
* @param deleteTimeOffset
* @param deleteType
* @param comparator
* @return SKIP if current KeyValue should not be deleted, DELETE if
* current KeyValue should be deleted and DONE when the current KeyValue is
* out of the Deletes range
*/
public static DeleteCode deleteCompare(KeyValue mem, byte [] deleteBuffer,
int deleteRowOffset, short deleteRowLength, int deleteQualifierOffset,
int deleteQualifierLength, int deleteTimeOffset, byte deleteType,
KeyValue.KeyComparator comparator) {
//Parsing new KeyValue
byte [] memBuffer = mem.getBuffer();
int memOffset = mem.getOffset();
//Getting key lengths
int memKeyLen = Bytes.toInt(memBuffer, memOffset);
memOffset += Bytes.SIZEOF_INT;
//Skipping value lengths
memOffset += Bytes.SIZEOF_INT;
//Getting row lengths
short memRowLen = Bytes.toShort(memBuffer, memOffset);
memOffset += Bytes.SIZEOF_SHORT;
int res = comparator.compareRows(memBuffer, memOffset, memRowLen,
deleteBuffer, deleteRowOffset, deleteRowLength);
if(res > 0) {
return DeleteCode.DONE;
} else if(res < 0){
return DeleteCode.SKIP;
}
memOffset += memRowLen;
//Getting family lengths
byte memFamLen = memBuffer[memOffset];
memOffset += Bytes.SIZEOF_BYTE + memFamLen;
//Get column lengths
int memQualifierLen = memKeyLen - memRowLen - memFamLen -
Bytes.SIZEOF_SHORT - Bytes.SIZEOF_BYTE - Bytes.SIZEOF_LONG -
Bytes.SIZEOF_BYTE;
//Compare timestamp
int tsOffset = memOffset + memQualifierLen;
int timeRes = Bytes.compareTo(memBuffer, tsOffset, Bytes.SIZEOF_LONG,
deleteBuffer, deleteTimeOffset, Bytes.SIZEOF_LONG);
if (deleteType == KeyValue.Type.DeleteFamily.getCode()) {
if (timeRes <= 0) {
return DeleteCode.DELETE;
}
return DeleteCode.SKIP;
}
//Compare columns
res = Bytes.compareTo(memBuffer, memOffset, memQualifierLen,
deleteBuffer, deleteQualifierOffset, deleteQualifierLength);
if (res < 0) {
return DeleteCode.SKIP;
} else if(res > 0) {
return DeleteCode.DONE;
}
// same column, compare the time.
if (timeRes == 0) {
return DeleteCode.DELETE;
} else if (timeRes < 0) {
if (deleteType == KeyValue.Type.DeleteColumn.getCode()) {
return DeleteCode.DELETE;
}
return DeleteCode.DONE;
} else {
return DeleteCode.SKIP;
}
}
}
/**
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Class that provides static method needed when putting deletes into memstore
*/
public class DeleteCompare {
/**
* Return codes from deleteCompare.
*/
enum DeleteCode {
/**
* Do nothing. Move to next KV in memstore
*/
SKIP,
/**
* Add to the list of deletes.
*/
DELETE,
/**
* Stop looking at KVs in memstore. Finalize.
*/
DONE
}
/**
* Method used when putting deletes into memstore to remove all the previous
* entries that are affected by this Delete
* @param mem
* @param deleteBuffer
* @param deleteRowOffset
* @param deleteRowLength
* @param deleteQualifierOffset
* @param deleteQualifierLength
* @param deleteTimeOffset
* @param deleteType
* @param comparator
* @return SKIP if current KeyValue should not be deleted, DELETE if
* current KeyValue should be deleted and DONE when the current KeyValue is
* out of the Deletes range
*/
public static DeleteCode deleteCompare(KeyValue mem, byte [] deleteBuffer,
int deleteRowOffset, short deleteRowLength, int deleteQualifierOffset,
int deleteQualifierLength, int deleteTimeOffset, byte deleteType,
KeyValue.KeyComparator comparator) {
//Parsing new KeyValue
byte [] memBuffer = mem.getBuffer();
int memOffset = mem.getOffset();
//Getting key lengths
int memKeyLen = Bytes.toInt(memBuffer, memOffset);
memOffset += Bytes.SIZEOF_INT;
//Skipping value lengths
memOffset += Bytes.SIZEOF_INT;
//Getting row lengths
short memRowLen = Bytes.toShort(memBuffer, memOffset);
memOffset += Bytes.SIZEOF_SHORT;
int res = comparator.compareRows(memBuffer, memOffset, memRowLen,
deleteBuffer, deleteRowOffset, deleteRowLength);
if(res > 0) {
return DeleteCode.DONE;
} else if(res < 0){
return DeleteCode.SKIP;
}
memOffset += memRowLen;
//Getting family lengths
byte memFamLen = memBuffer[memOffset];
memOffset += Bytes.SIZEOF_BYTE + memFamLen;
//Get column lengths
int memQualifierLen = memKeyLen - memRowLen - memFamLen -
Bytes.SIZEOF_SHORT - Bytes.SIZEOF_BYTE - Bytes.SIZEOF_LONG -
Bytes.SIZEOF_BYTE;
//Compare timestamp
int tsOffset = memOffset + memQualifierLen;
int timeRes = Bytes.compareTo(memBuffer, tsOffset, Bytes.SIZEOF_LONG,
deleteBuffer, deleteTimeOffset, Bytes.SIZEOF_LONG);
if (deleteType == KeyValue.Type.DeleteFamily.getCode()) {
if (timeRes <= 0) {
return DeleteCode.DELETE;
}
return DeleteCode.SKIP;
}
//Compare columns
res = Bytes.compareTo(memBuffer, memOffset, memQualifierLen,
deleteBuffer, deleteQualifierOffset, deleteQualifierLength);
if (res < 0) {
return DeleteCode.SKIP;
} else if(res > 0) {
return DeleteCode.DONE;
}
// same column, compare the time.
if (timeRes == 0) {
return DeleteCode.DELETE;
} else if (timeRes < 0) {
if (deleteType == KeyValue.Type.DeleteColumn.getCode()) {
return DeleteCode.DELETE;
}
return DeleteCode.DONE;
} else {
return DeleteCode.SKIP;
}
}
}

View File

@ -1,97 +1,97 @@
/**
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
/**
* This interface is used for the tracking and enforcement of Deletes
* during the course of a Get or Scan operation.
* <p>
* This class is utilized through three methods:
* <ul><li>{@link #add} when encountering a Delete
* <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted
* <li>{@link #update} when reaching the end of a StoreFile
*/
public interface DeleteTracker {
/**
* Add the specified KeyValue to the list of deletes to check against for
* this row operation.
* <p>
* This is called when a Delete is encountered in a StoreFile.
* @param buffer KeyValue buffer
* @param qualifierOffset column qualifier offset
* @param qualifierLength column qualifier length
* @param timestamp timestamp
* @param type delete type as byte
*/
public void add(byte [] buffer, int qualifierOffset, int qualifierLength,
long timestamp, byte type);
/**
* Check if the specified KeyValue buffer has been deleted by a previously
* seen delete.
* @param buffer KeyValue buffer
* @param qualifierOffset column qualifier offset
* @param qualifierLength column qualifier length
* @param timestamp timestamp
* @return true is the specified KeyValue is deleted, false if not
*/
public boolean isDeleted(byte [] buffer, int qualifierOffset,
int qualifierLength, long timestamp);
/**
* @return true if there are no current delete, false otherwise
*/
public boolean isEmpty();
/**
* Called at the end of every StoreFile.
* <p>
* Many optimized implementations of Trackers will require an update at
* when the end of each StoreFile is reached.
*/
public void update();
/**
* Called between rows.
* <p>
* This clears everything as if a new DeleteTracker was instantiated.
*/
public void reset();
/**
* Return codes for comparison of two Deletes.
* <p>
* The codes tell the merging function what to do.
* <p>
* INCLUDE means add the specified Delete to the merged list.
* NEXT means move to the next element in the specified list(s).
*/
enum DeleteCompare {
INCLUDE_OLD_NEXT_OLD,
INCLUDE_OLD_NEXT_BOTH,
INCLUDE_NEW_NEXT_NEW,
INCLUDE_NEW_NEXT_BOTH,
NEXT_OLD,
NEXT_NEW
}
}
/**
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
/**
* This interface is used for the tracking and enforcement of Deletes
* during the course of a Get or Scan operation.
* <p>
* This class is utilized through three methods:
* <ul><li>{@link #add} when encountering a Delete
* <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted
* <li>{@link #update} when reaching the end of a StoreFile
*/
public interface DeleteTracker {
/**
* Add the specified KeyValue to the list of deletes to check against for
* this row operation.
* <p>
* This is called when a Delete is encountered in a StoreFile.
* @param buffer KeyValue buffer
* @param qualifierOffset column qualifier offset
* @param qualifierLength column qualifier length
* @param timestamp timestamp
* @param type delete type as byte
*/
public void add(byte [] buffer, int qualifierOffset, int qualifierLength,
long timestamp, byte type);
/**
* Check if the specified KeyValue buffer has been deleted by a previously
* seen delete.
* @param buffer KeyValue buffer
* @param qualifierOffset column qualifier offset
* @param qualifierLength column qualifier length
* @param timestamp timestamp
* @return true is the specified KeyValue is deleted, false if not
*/
public boolean isDeleted(byte [] buffer, int qualifierOffset,
int qualifierLength, long timestamp);
/**
* @return true if there are no current delete, false otherwise
*/
public boolean isEmpty();
/**
* Called at the end of every StoreFile.
* <p>
* Many optimized implementations of Trackers will require an update at
* when the end of each StoreFile is reached.
*/
public void update();
/**
* Called between rows.
* <p>
* This clears everything as if a new DeleteTracker was instantiated.
*/
public void reset();
/**
* Return codes for comparison of two Deletes.
* <p>
* The codes tell the merging function what to do.
* <p>
* INCLUDE means add the specified Delete to the merged list.
* NEXT means move to the next element in the specified list(s).
*/
enum DeleteCompare {
INCLUDE_OLD_NEXT_OLD,
INCLUDE_OLD_NEXT_BOTH,
INCLUDE_NEW_NEXT_NEW,
INCLUDE_NEW_NEXT_BOTH,
NEXT_OLD,
NEXT_NEW
}
}

View File

@ -1,111 +1,111 @@
/*
* Copyright 2009 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
/**
* This class is responsible for the tracking and enforcement of Deletes
* during the course of a Get operation.
* <p>
* This class is utilized through three methods:
* <ul><li>{@link #add} when encountering a Delete
* <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted
* <li>{@link #update} when reaching the end of a StoreFile
* <p>
* This class is NOT thread-safe as queries are never multi-threaded
*/
public class GetDeleteTracker implements DeleteTracker {
private static long UNSET = -1L;
private long familyStamp = UNSET;
protected List<Delete> deletes = null;
private List<Delete> newDeletes = new ArrayList<Delete>();
private Iterator<Delete> iterator;
private Delete delete = null;
/**
* Constructor
*/
public GetDeleteTracker() {}
/**
* Add the specified KeyValue to the list of deletes to check against for
* this row operation.
* <p>
* This is called when a Delete is encountered in a StoreFile.
* @param buffer
* @param qualifierOffset
* @param qualifierLength
* @param timestamp
* @param type
*/
@Override
public void add(byte [] buffer, int qualifierOffset, int qualifierLength,
long timestamp, byte type) {
if (type == KeyValue.Type.DeleteFamily.getCode()) {
if(timestamp > familyStamp) {
familyStamp = timestamp;
}
return;
}
if(timestamp > familyStamp) {
this.newDeletes.add(new Delete(buffer, qualifierOffset, qualifierLength,
type, timestamp));
}
}
/**
* Check if the specified KeyValue buffer has been deleted by a previously
* seen delete.
* @param buffer KeyValue buffer
* @param qualifierOffset column qualifier offset
* @param qualifierLength column qualifier length
* @param timestamp timestamp
* @return true is the specified KeyValue is deleted, false if not
*/
@Override
public boolean isDeleted(byte [] buffer, int qualifierOffset,
int qualifierLength, long timestamp) {
// Check against DeleteFamily
if (timestamp <= familyStamp) {
return true;
}
// Check if there are other deletes
if (this.delete == null) {
return false;
}
// Check column
int ret = Bytes.compareTo(buffer, qualifierOffset, qualifierLength,
this.delete.buffer, this.delete.qualifierOffset,
this.delete.qualifierLength);
/*
* Copyright 2009 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
/**
* This class is responsible for the tracking and enforcement of Deletes
* during the course of a Get operation.
* <p>
* This class is utilized through three methods:
* <ul><li>{@link #add} when encountering a Delete
* <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted
* <li>{@link #update} when reaching the end of a StoreFile
* <p>
* This class is NOT thread-safe as queries are never multi-threaded
*/
public class GetDeleteTracker implements DeleteTracker {
private static long UNSET = -1L;
private long familyStamp = UNSET;
protected List<Delete> deletes = null;
private List<Delete> newDeletes = new ArrayList<Delete>();
private Iterator<Delete> iterator;
private Delete delete = null;
/**
* Constructor
*/
public GetDeleteTracker() {}
/**
* Add the specified KeyValue to the list of deletes to check against for
* this row operation.
* <p>
* This is called when a Delete is encountered in a StoreFile.
* @param buffer
* @param qualifierOffset
* @param qualifierLength
* @param timestamp
* @param type
*/
@Override
public void add(byte [] buffer, int qualifierOffset, int qualifierLength,
long timestamp, byte type) {
if (type == KeyValue.Type.DeleteFamily.getCode()) {
if(timestamp > familyStamp) {
familyStamp = timestamp;
}
return;
}
if(timestamp > familyStamp) {
this.newDeletes.add(new Delete(buffer, qualifierOffset, qualifierLength,
type, timestamp));
}
}
/**
* Check if the specified KeyValue buffer has been deleted by a previously
* seen delete.
* @param buffer KeyValue buffer
* @param qualifierOffset column qualifier offset
* @param qualifierLength column qualifier length
* @param timestamp timestamp
* @return true is the specified KeyValue is deleted, false if not
*/
@Override
public boolean isDeleted(byte [] buffer, int qualifierOffset,
int qualifierLength, long timestamp) {
// Check against DeleteFamily
if (timestamp <= familyStamp) {
return true;
}
// Check if there are other deletes
if (this.delete == null) {
return false;
}
// Check column
int ret = Bytes.compareTo(buffer, qualifierOffset, qualifierLength,
this.delete.buffer, this.delete.qualifierOffset,
this.delete.qualifierLength);
while (ret != 0) {
if (ret <= -1) {
// Have not reached the next delete yet
return false;
return false;
} else if (ret >= 1) {
// Deletes an earlier column, need to move down deletes
if (this.iterator.hasNext()) {
@ -118,287 +118,287 @@ public class GetDeleteTracker implements DeleteTracker {
this.delete.buffer, this.delete.qualifierOffset,
this.delete.qualifierLength);
}
}
}
}
// Check Timestamp
if(timestamp > this.delete.timestamp) {
return false;
}
// Check Type
switch(KeyValue.Type.codeToType(this.delete.type)) {
case Delete:
boolean equal = timestamp == this.delete.timestamp;
if(this.iterator.hasNext()) {
this.delete = this.iterator.next();
} else {
this.delete = null;
}
if(equal){
return true;
}
// timestamp < this.delete.timestamp
// Delete of an explicit column newer than current
return isDeleted(buffer, qualifierOffset, qualifierLength, timestamp);
case DeleteColumn:
return true;
}
// should never reach this
return false;
}
@Override
public boolean isEmpty() {
return this.familyStamp == UNSET && this.delete == null &&
this.newDeletes.isEmpty();
}
@Override
public void reset() {
this.deletes = null;
this.delete = null;
this.newDeletes = new ArrayList<Delete>();
this.familyStamp = UNSET;
this.iterator = null;
}
/**
* Called at the end of every StoreFile.
* <p>
* Many optimized implementations of Trackers will require an update at
* when the end of each StoreFile is reached.
*/
@Override
public void update() {
// If no previous deletes, use new deletes and return
if (this.deletes == null || this.deletes.size() == 0) {
finalize(this.newDeletes);
return;
}
// If no new delete, retain previous deletes and return
if(this.newDeletes.size() == 0) {
return;
}
// Merge previous deletes with new deletes
List<Delete> mergeDeletes =
new ArrayList<Delete>(this.newDeletes.size());
int oldIndex = 0;
int newIndex = 0;
Delete newDelete = newDeletes.get(oldIndex);
Delete oldDelete = deletes.get(oldIndex);
while(true) {
switch(compareDeletes(oldDelete,newDelete)) {
case NEXT_NEW: {
if(++newIndex == newDeletes.size()) {
// Done with new, add the rest of old to merged and return
mergeDown(mergeDeletes, deletes, oldIndex);
finalize(mergeDeletes);
return;
}
newDelete = this.newDeletes.get(newIndex);
break;
}
case INCLUDE_NEW_NEXT_NEW: {
mergeDeletes.add(newDelete);
if(++newIndex == newDeletes.size()) {
// Done with new, add the rest of old to merged and return
mergeDown(mergeDeletes, deletes, oldIndex);
finalize(mergeDeletes);
return;
}
newDelete = this.newDeletes.get(newIndex);
break;
}
case INCLUDE_NEW_NEXT_BOTH: {
mergeDeletes.add(newDelete);
++oldIndex;
++newIndex;
if(oldIndex == deletes.size()) {
if(newIndex == newDeletes.size()) {
finalize(mergeDeletes);
return;
}
mergeDown(mergeDeletes, newDeletes, newIndex);
finalize(mergeDeletes);
return;
} else if(newIndex == newDeletes.size()) {
mergeDown(mergeDeletes, deletes, oldIndex);
finalize(mergeDeletes);
return;
}
oldDelete = this.deletes.get(oldIndex);
newDelete = this.newDeletes.get(newIndex);
break;
}
case INCLUDE_OLD_NEXT_BOTH: {
mergeDeletes.add(oldDelete);
++oldIndex;
++newIndex;
if(oldIndex == deletes.size()) {
if(newIndex == newDeletes.size()) {
finalize(mergeDeletes);
return;
}
mergeDown(mergeDeletes, newDeletes, newIndex);
finalize(mergeDeletes);
return;
} else if(newIndex == newDeletes.size()) {
mergeDown(mergeDeletes, deletes, oldIndex);
finalize(mergeDeletes);
return;
}
oldDelete = this.deletes.get(oldIndex);
newDelete = this.newDeletes.get(newIndex);
break;
}
case INCLUDE_OLD_NEXT_OLD: {
mergeDeletes.add(oldDelete);
if(++oldIndex == deletes.size()) {
mergeDown(mergeDeletes, newDeletes, newIndex);
finalize(mergeDeletes);
return;
}
oldDelete = this.deletes.get(oldIndex);
break;
}
case NEXT_OLD: {
if(++oldIndex == deletes.size()) {
// Done with old, add the rest of new to merged and return
mergeDown(mergeDeletes, newDeletes, newIndex);
finalize(mergeDeletes);
return;
}
oldDelete = this.deletes.get(oldIndex);
}
}
}
}
private void finalize(List<Delete> mergeDeletes) {
this.deletes = mergeDeletes;
this.newDeletes = new ArrayList<Delete>();
if(this.deletes.size() > 0){
this.iterator = deletes.iterator();
this.delete = iterator.next();
}
}
private void mergeDown(List<Delete> mergeDeletes, List<Delete> srcDeletes,
int srcIndex) {
int index = srcIndex;
while(index < srcDeletes.size()) {
mergeDeletes.add(srcDeletes.get(index++));
}
}
protected DeleteCompare compareDeletes(Delete oldDelete, Delete newDelete) {
// Compare columns
// Just compairing qualifier portion, can keep on using Bytes.compareTo().
int ret = Bytes.compareTo(oldDelete.buffer, oldDelete.qualifierOffset,
oldDelete.qualifierLength, newDelete.buffer, newDelete.qualifierOffset,
newDelete.qualifierLength);
if(ret <= -1) {
return DeleteCompare.INCLUDE_OLD_NEXT_OLD;
} else if(ret >= 1) {
return DeleteCompare.INCLUDE_NEW_NEXT_NEW;
}
// Same column
// Branches below can be optimized. Keeping like this until testing
// is complete.
if(oldDelete.type == newDelete.type) {
// the one case where we can merge 2 deletes -> 1 delete.
if(oldDelete.type == KeyValue.Type.Delete.getCode()){
if(oldDelete.timestamp > newDelete.timestamp) {
return DeleteCompare.INCLUDE_OLD_NEXT_OLD;
} else if(oldDelete.timestamp < newDelete.timestamp) {
return DeleteCompare.INCLUDE_NEW_NEXT_NEW;
} else {
return DeleteCompare.INCLUDE_OLD_NEXT_BOTH;
}
}
if(oldDelete.timestamp < newDelete.timestamp) {
return DeleteCompare.INCLUDE_NEW_NEXT_BOTH;
}
return DeleteCompare.INCLUDE_OLD_NEXT_BOTH;
}
// old delete is more specific than the new delete.
// if the olddelete is newer then the newdelete, we have to
// keep it
if(oldDelete.type < newDelete.type) {
if(oldDelete.timestamp > newDelete.timestamp) {
return DeleteCompare.INCLUDE_OLD_NEXT_OLD;
} else if(oldDelete.timestamp < newDelete.timestamp) {
return DeleteCompare.NEXT_OLD;
} else {
return DeleteCompare.NEXT_OLD;
}
}
// new delete is more specific than the old delete.
if(oldDelete.type > newDelete.type) {
if(oldDelete.timestamp > newDelete.timestamp) {
return DeleteCompare.NEXT_NEW;
} else if(oldDelete.timestamp < newDelete.timestamp) {
return DeleteCompare.INCLUDE_NEW_NEXT_NEW;
} else {
return DeleteCompare.NEXT_NEW;
}
}
// Should never reach,
// throw exception for assertion?
throw new RuntimeException("GetDeleteTracker:compareDelete reached terminal state");
}
/**
* Internal class used to store the necessary information for a Delete.
* <p>
* Rather than reparsing the KeyValue, or copying fields, this class points
* to the underlying KeyValue buffer with pointers to the qualifier and fields
* for type and timestamp. No parsing work is done in DeleteTracker now.
* <p>
* Fields are public because they are accessed often, directly, and only
* within this class.
*/
protected static class Delete {
byte [] buffer;
int qualifierOffset;
int qualifierLength;
byte type;
long timestamp;
/**
* Constructor
* @param buffer
* @param qualifierOffset
* @param qualifierLength
* @param type
* @param timestamp
*/
public Delete(byte [] buffer, int qualifierOffset, int qualifierLength,
byte type, long timestamp) {
this.buffer = buffer;
this.qualifierOffset = qualifierOffset;
this.qualifierLength = qualifierLength;
this.type = type;
this.timestamp = timestamp;
}
}
}
// Check Timestamp
if(timestamp > this.delete.timestamp) {
return false;
}
// Check Type
switch(KeyValue.Type.codeToType(this.delete.type)) {
case Delete:
boolean equal = timestamp == this.delete.timestamp;
if(this.iterator.hasNext()) {
this.delete = this.iterator.next();
} else {
this.delete = null;
}
if(equal){
return true;
}
// timestamp < this.delete.timestamp
// Delete of an explicit column newer than current
return isDeleted(buffer, qualifierOffset, qualifierLength, timestamp);
case DeleteColumn:
return true;
}
// should never reach this
return false;
}
@Override
public boolean isEmpty() {
return this.familyStamp == UNSET && this.delete == null &&
this.newDeletes.isEmpty();
}
@Override
public void reset() {
this.deletes = null;
this.delete = null;
this.newDeletes = new ArrayList<Delete>();
this.familyStamp = UNSET;
this.iterator = null;
}
/**
* Called at the end of every StoreFile.
* <p>
* Many optimized implementations of Trackers will require an update at
* when the end of each StoreFile is reached.
*/
@Override
public void update() {
// If no previous deletes, use new deletes and return
if (this.deletes == null || this.deletes.size() == 0) {
finalize(this.newDeletes);
return;
}
// If no new delete, retain previous deletes and return
if(this.newDeletes.size() == 0) {
return;
}
// Merge previous deletes with new deletes
List<Delete> mergeDeletes =
new ArrayList<Delete>(this.newDeletes.size());
int oldIndex = 0;
int newIndex = 0;
Delete newDelete = newDeletes.get(oldIndex);
Delete oldDelete = deletes.get(oldIndex);
while(true) {
switch(compareDeletes(oldDelete,newDelete)) {
case NEXT_NEW: {
if(++newIndex == newDeletes.size()) {
// Done with new, add the rest of old to merged and return
mergeDown(mergeDeletes, deletes, oldIndex);
finalize(mergeDeletes);
return;
}
newDelete = this.newDeletes.get(newIndex);
break;
}
case INCLUDE_NEW_NEXT_NEW: {
mergeDeletes.add(newDelete);
if(++newIndex == newDeletes.size()) {
// Done with new, add the rest of old to merged and return
mergeDown(mergeDeletes, deletes, oldIndex);
finalize(mergeDeletes);
return;
}
newDelete = this.newDeletes.get(newIndex);
break;
}
case INCLUDE_NEW_NEXT_BOTH: {
mergeDeletes.add(newDelete);
++oldIndex;
++newIndex;
if(oldIndex == deletes.size()) {
if(newIndex == newDeletes.size()) {
finalize(mergeDeletes);
return;
}
mergeDown(mergeDeletes, newDeletes, newIndex);
finalize(mergeDeletes);
return;
} else if(newIndex == newDeletes.size()) {
mergeDown(mergeDeletes, deletes, oldIndex);
finalize(mergeDeletes);
return;
}
oldDelete = this.deletes.get(oldIndex);
newDelete = this.newDeletes.get(newIndex);
break;
}
case INCLUDE_OLD_NEXT_BOTH: {
mergeDeletes.add(oldDelete);
++oldIndex;
++newIndex;
if(oldIndex == deletes.size()) {
if(newIndex == newDeletes.size()) {
finalize(mergeDeletes);
return;
}
mergeDown(mergeDeletes, newDeletes, newIndex);
finalize(mergeDeletes);
return;
} else if(newIndex == newDeletes.size()) {
mergeDown(mergeDeletes, deletes, oldIndex);
finalize(mergeDeletes);
return;
}
oldDelete = this.deletes.get(oldIndex);
newDelete = this.newDeletes.get(newIndex);
break;
}
case INCLUDE_OLD_NEXT_OLD: {
mergeDeletes.add(oldDelete);
if(++oldIndex == deletes.size()) {
mergeDown(mergeDeletes, newDeletes, newIndex);
finalize(mergeDeletes);
return;
}
oldDelete = this.deletes.get(oldIndex);
break;
}
case NEXT_OLD: {
if(++oldIndex == deletes.size()) {
// Done with old, add the rest of new to merged and return
mergeDown(mergeDeletes, newDeletes, newIndex);
finalize(mergeDeletes);
return;
}
oldDelete = this.deletes.get(oldIndex);
}
}
}
}
private void finalize(List<Delete> mergeDeletes) {
this.deletes = mergeDeletes;
this.newDeletes = new ArrayList<Delete>();
if(this.deletes.size() > 0){
this.iterator = deletes.iterator();
this.delete = iterator.next();
}
}
private void mergeDown(List<Delete> mergeDeletes, List<Delete> srcDeletes,
int srcIndex) {
int index = srcIndex;
while(index < srcDeletes.size()) {
mergeDeletes.add(srcDeletes.get(index++));
}
}
protected DeleteCompare compareDeletes(Delete oldDelete, Delete newDelete) {
// Compare columns
// Just compairing qualifier portion, can keep on using Bytes.compareTo().
int ret = Bytes.compareTo(oldDelete.buffer, oldDelete.qualifierOffset,
oldDelete.qualifierLength, newDelete.buffer, newDelete.qualifierOffset,
newDelete.qualifierLength);
if(ret <= -1) {
return DeleteCompare.INCLUDE_OLD_NEXT_OLD;
} else if(ret >= 1) {
return DeleteCompare.INCLUDE_NEW_NEXT_NEW;
}
// Same column
// Branches below can be optimized. Keeping like this until testing
// is complete.
if(oldDelete.type == newDelete.type) {
// the one case where we can merge 2 deletes -> 1 delete.
if(oldDelete.type == KeyValue.Type.Delete.getCode()){
if(oldDelete.timestamp > newDelete.timestamp) {
return DeleteCompare.INCLUDE_OLD_NEXT_OLD;
} else if(oldDelete.timestamp < newDelete.timestamp) {
return DeleteCompare.INCLUDE_NEW_NEXT_NEW;
} else {
return DeleteCompare.INCLUDE_OLD_NEXT_BOTH;
}
}
if(oldDelete.timestamp < newDelete.timestamp) {
return DeleteCompare.INCLUDE_NEW_NEXT_BOTH;
}
return DeleteCompare.INCLUDE_OLD_NEXT_BOTH;
}
// old delete is more specific than the new delete.
// if the olddelete is newer then the newdelete, we have to
// keep it
if(oldDelete.type < newDelete.type) {
if(oldDelete.timestamp > newDelete.timestamp) {
return DeleteCompare.INCLUDE_OLD_NEXT_OLD;
} else if(oldDelete.timestamp < newDelete.timestamp) {
return DeleteCompare.NEXT_OLD;
} else {
return DeleteCompare.NEXT_OLD;
}
}
// new delete is more specific than the old delete.
if(oldDelete.type > newDelete.type) {
if(oldDelete.timestamp > newDelete.timestamp) {
return DeleteCompare.NEXT_NEW;
} else if(oldDelete.timestamp < newDelete.timestamp) {
return DeleteCompare.INCLUDE_NEW_NEXT_NEW;
} else {
return DeleteCompare.NEXT_NEW;
}
}
// Should never reach,
// throw exception for assertion?
throw new RuntimeException("GetDeleteTracker:compareDelete reached terminal state");
}
/**
* Internal class used to store the necessary information for a Delete.
* <p>
* Rather than reparsing the KeyValue, or copying fields, this class points
* to the underlying KeyValue buffer with pointers to the qualifier and fields
* for type and timestamp. No parsing work is done in DeleteTracker now.
* <p>
* Fields are public because they are accessed often, directly, and only
* within this class.
*/
protected static class Delete {
byte [] buffer;
int qualifierOffset;
int qualifierLength;
byte type;
long timestamp;
/**
* Constructor
* @param buffer
* @param qualifierOffset
* @param qualifierLength
* @param type
* @param timestamp
*/
public Delete(byte [] buffer, int qualifierOffset, int qualifierLength,
byte type, long timestamp) {
this.buffer = buffer;
this.qualifierOffset = qualifierOffset;
this.qualifierLength = qualifierLength;
this.type = type;
this.timestamp = timestamp;
}
}
}

View File

@ -1,120 +1,120 @@
<%@ page contentType="text/html;charset=UTF-8"
import="java.util.*"
import="java.net.URLEncoder"
<%@ page contentType="text/html;charset=UTF-8"
import="java.util.*"
import="java.net.URLEncoder"
import="org.apache.hadoop.conf.Configuration"
import="org.apache.hadoop.io.Text"
import="org.apache.hadoop.hbase.util.Bytes"
import="org.apache.hadoop.hbase.util.FSUtils"
import="org.apache.hadoop.hbase.master.HMaster"
import="org.apache.hadoop.hbase.HConstants"
import="org.apache.hadoop.hbase.master.MetaRegion"
import="org.apache.hadoop.hbase.client.HBaseAdmin"
import="org.apache.hadoop.hbase.io.ImmutableBytesWritable"
import="org.apache.hadoop.hbase.HServerInfo"
import="org.apache.hadoop.hbase.HServerAddress"
import="org.apache.hadoop.hbase.HBaseConfiguration"
import="org.apache.hadoop.hbase.HColumnDescriptor"
import="org.apache.hadoop.hbase.HTableDescriptor" %><%
HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
Configuration conf = master.getConfiguration();
HServerAddress rootLocation = master.getRegionManager().getRootRegionLocation();
Map<byte [], MetaRegion> onlineRegions = master.getRegionManager().getOnlineMetaRegions();
Map<String, HServerInfo> serverToServerInfos =
master.getServerManager().getServersToServerInfo();
int interval = conf.getInt("hbase.regionserver.msginterval", 1000)/1000;
if (interval == 0) {
interval = 1;
}
Map<String, Integer> frags = master.getTableFragmentation();
%><?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
<title>HBase Master: <%= master.getMasterAddress().getHostname()%>:<%= master.getMasterAddress().getPort() %></title>
<link rel="stylesheet" type="text/css" href="/static/hbase.css" />
</head>
<body>
<a id="logo" href="http://wiki.apache.org/lucene-hadoop/Hbase"><img src="/static/hbase_logo_med.gif" alt="HBase Logo" title="HBase Logo" /></a>
<h1 id="page_title">Master: <%=master.getMasterAddress().getHostname()%>:<%=master.getMasterAddress().getPort()%></h1>
<p id="links_menu"><a href="/logs/">Local logs</a>, <a href="/stacks">Thread Dump</a>, <a href="/logLevel">Log Level</a></p>
<hr id="head_rule" />
<h2>Master Attributes</h2>
<table>
<tr><th>Attribute Name</th><th>Value</th><th>Description</th></tr>
<tr><td>HBase Version</td><td><%= org.apache.hadoop.hbase.util.VersionInfo.getVersion() %>, r<%= org.apache.hadoop.hbase.util.VersionInfo.getRevision() %></td><td>HBase version and svn revision</td></tr>
<tr><td>HBase Compiled</td><td><%= org.apache.hadoop.hbase.util.VersionInfo.getDate() %>, <%= org.apache.hadoop.hbase.util.VersionInfo.getUser() %></td><td>When HBase version was compiled and by whom</td></tr>
<tr><td>Hadoop Version</td><td><%= org.apache.hadoop.util.VersionInfo.getVersion() %>, r<%= org.apache.hadoop.util.VersionInfo.getRevision() %></td><td>Hadoop version and svn revision</td></tr>
<tr><td>Hadoop Compiled</td><td><%= org.apache.hadoop.util.VersionInfo.getDate() %>, <%= org.apache.hadoop.util.VersionInfo.getUser() %></td><td>When Hadoop version was compiled and by whom</td></tr>
<tr><td>HBase Root Directory</td><td><%= master.getRootDir().toString() %></td><td>Location of HBase home directory</td></tr>
<tr><td>Load average</td><td><%= master.getServerManager().getAverageLoad() %></td><td>Average number of regions per regionserver. Naive computation.</td></tr>
<tr><td>Regions On FS</td><td><%= master.getRegionManager().countRegionsOnFS() %></td><td>Number of regions on FileSystem. Rough count.</td></tr>
<tr><td>Fragmentation</td><td><%= frags.get("-TOTAL-") != null ? frags.get("-TOTAL-").intValue() + "%" : "n/a" %></td><td>Overall fragmentation of all tables, including .META. and -ROOT-.</td></tr>
<tr><td>Zookeeper Quorum</td><td><%= master.getZooKeeperWrapper().getQuorumServers() %></td><td>Addresses of all registered ZK servers. For more, see <a href="/zk.jsp">zk dump</a>.</td></tr>
</table>
<h2>Catalog Tables</h2>
<%
if (rootLocation != null) { %>
<table>
<tr><th>Table</th><th title="Fragmentation - Will be 0% after a major compaction and fluctuate during normal usage.">Frag.</th><th>Description</th></tr>
<tr><td><a href="/table.jsp?name=<%= Bytes.toString(HConstants.ROOT_TABLE_NAME) %>"><%= Bytes.toString(HConstants.ROOT_TABLE_NAME) %></a></td>
<td align="center"><%= frags.get("-ROOT-") != null ? frags.get("-ROOT-").intValue() + "%" : "n/a" %></td>
<td>The -ROOT- table holds references to all .META. regions.</td>
</tr>
<%
if (onlineRegions != null && onlineRegions.size() > 0) { %>
<tr><td><a href="/table.jsp?name=<%= Bytes.toString(HConstants.META_TABLE_NAME) %>"><%= Bytes.toString(HConstants.META_TABLE_NAME) %></a></td>
<td align="center"><%= frags.get(".META.") != null ? frags.get(".META.").intValue() + "%" : "n/a" %></td>
<td>The .META. table holds references to all User Table regions</td>
</tr>
<% } %>
</table>
<%} %>
<h2>User Tables</h2>
<% HTableDescriptor[] tables = new HBaseAdmin(conf).listTables();
if(tables != null && tables.length > 0) { %>
<table>
<tr><th>Table</th><th title="Fragmentation - Will be 0% after a major compaction and fluctuate during normal usage.">Frag.</th><th>Description</th></tr>
<% for(HTableDescriptor htDesc : tables ) { %>
<tr><td><a href=/table.jsp?name=<%= htDesc.getNameAsString() %>><%= htDesc.getNameAsString() %></a> </td>
<td align="center"><%= frags.get(htDesc.getNameAsString()) != null ? frags.get(htDesc.getNameAsString()).intValue() + "%" : "n/a" %></td>
<td><%= htDesc.toString() %></td>
</tr>
<% } %>
<p> <%= tables.length %> table(s) in set.</p>
</table>
<% } %>
<h2>Region Servers</h2>
<% if (serverToServerInfos != null && serverToServerInfos.size() > 0) { %>
<% int totalRegions = 0;
int totalRequests = 0;
%>
<table>
<tr><th rowspan="<%= serverToServerInfos.size() + 1%>"></th><th>Address</th><th>Start Code</th><th>Load</th></tr>
<% String[] serverNames = serverToServerInfos.keySet().toArray(new String[serverToServerInfos.size()]);
Arrays.sort(serverNames);
for (String serverName: serverNames) {
HServerInfo hsi = serverToServerInfos.get(serverName);
String hostname = hsi.getServerAddress().getHostname() + ":" + hsi.getInfoPort();
String url = "http://" + hostname + "/";
totalRegions += hsi.getLoad().getNumberOfRegions();
totalRequests += hsi.getLoad().getNumberOfRequests() / interval;
long startCode = hsi.getStartCode();
%>
<tr><td><a href="<%= url %>"><%= hostname %></a></td><td><%= startCode %></td><td><%= hsi.getLoad().toString(interval) %></td></tr>
<% } %>
<tr><th>Total: </th><td>servers: <%= serverToServerInfos.size() %></td><td>&nbsp;</td><td>requests=<%= totalRequests %>, regions=<%= totalRegions %></td></tr>
</table>
<p>Load is requests per second and count of regions loaded</p>
<% } %>
</body>
</html>
import="org.apache.hadoop.io.Text"
import="org.apache.hadoop.hbase.util.Bytes"
import="org.apache.hadoop.hbase.util.FSUtils"
import="org.apache.hadoop.hbase.master.HMaster"
import="org.apache.hadoop.hbase.HConstants"
import="org.apache.hadoop.hbase.master.MetaRegion"
import="org.apache.hadoop.hbase.client.HBaseAdmin"
import="org.apache.hadoop.hbase.io.ImmutableBytesWritable"
import="org.apache.hadoop.hbase.HServerInfo"
import="org.apache.hadoop.hbase.HServerAddress"
import="org.apache.hadoop.hbase.HBaseConfiguration"
import="org.apache.hadoop.hbase.HColumnDescriptor"
import="org.apache.hadoop.hbase.HTableDescriptor" %><%
HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
Configuration conf = master.getConfiguration();
HServerAddress rootLocation = master.getRegionManager().getRootRegionLocation();
Map<byte [], MetaRegion> onlineRegions = master.getRegionManager().getOnlineMetaRegions();
Map<String, HServerInfo> serverToServerInfos =
master.getServerManager().getServersToServerInfo();
int interval = conf.getInt("hbase.regionserver.msginterval", 1000)/1000;
if (interval == 0) {
interval = 1;
}
Map<String, Integer> frags = master.getTableFragmentation();
%><?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
<title>HBase Master: <%= master.getMasterAddress().getHostname()%>:<%= master.getMasterAddress().getPort() %></title>
<link rel="stylesheet" type="text/css" href="/static/hbase.css" />
</head>
<body>
<a id="logo" href="http://wiki.apache.org/lucene-hadoop/Hbase"><img src="/static/hbase_logo_med.gif" alt="HBase Logo" title="HBase Logo" /></a>
<h1 id="page_title">Master: <%=master.getMasterAddress().getHostname()%>:<%=master.getMasterAddress().getPort()%></h1>
<p id="links_menu"><a href="/logs/">Local logs</a>, <a href="/stacks">Thread Dump</a>, <a href="/logLevel">Log Level</a></p>
<hr id="head_rule" />
<h2>Master Attributes</h2>
<table>
<tr><th>Attribute Name</th><th>Value</th><th>Description</th></tr>
<tr><td>HBase Version</td><td><%= org.apache.hadoop.hbase.util.VersionInfo.getVersion() %>, r<%= org.apache.hadoop.hbase.util.VersionInfo.getRevision() %></td><td>HBase version and svn revision</td></tr>
<tr><td>HBase Compiled</td><td><%= org.apache.hadoop.hbase.util.VersionInfo.getDate() %>, <%= org.apache.hadoop.hbase.util.VersionInfo.getUser() %></td><td>When HBase version was compiled and by whom</td></tr>
<tr><td>Hadoop Version</td><td><%= org.apache.hadoop.util.VersionInfo.getVersion() %>, r<%= org.apache.hadoop.util.VersionInfo.getRevision() %></td><td>Hadoop version and svn revision</td></tr>
<tr><td>Hadoop Compiled</td><td><%= org.apache.hadoop.util.VersionInfo.getDate() %>, <%= org.apache.hadoop.util.VersionInfo.getUser() %></td><td>When Hadoop version was compiled and by whom</td></tr>
<tr><td>HBase Root Directory</td><td><%= master.getRootDir().toString() %></td><td>Location of HBase home directory</td></tr>
<tr><td>Load average</td><td><%= master.getServerManager().getAverageLoad() %></td><td>Average number of regions per regionserver. Naive computation.</td></tr>
<tr><td>Regions On FS</td><td><%= master.getRegionManager().countRegionsOnFS() %></td><td>Number of regions on FileSystem. Rough count.</td></tr>
<tr><td>Fragmentation</td><td><%= frags.get("-TOTAL-") != null ? frags.get("-TOTAL-").intValue() + "%" : "n/a" %></td><td>Overall fragmentation of all tables, including .META. and -ROOT-.</td></tr>
<tr><td>Zookeeper Quorum</td><td><%= master.getZooKeeperWrapper().getQuorumServers() %></td><td>Addresses of all registered ZK servers. For more, see <a href="/zk.jsp">zk dump</a>.</td></tr>
</table>
<h2>Catalog Tables</h2>
<%
if (rootLocation != null) { %>
<table>
<tr><th>Table</th><th title="Fragmentation - Will be 0% after a major compaction and fluctuate during normal usage.">Frag.</th><th>Description</th></tr>
<tr><td><a href="/table.jsp?name=<%= Bytes.toString(HConstants.ROOT_TABLE_NAME) %>"><%= Bytes.toString(HConstants.ROOT_TABLE_NAME) %></a></td>
<td align="center"><%= frags.get("-ROOT-") != null ? frags.get("-ROOT-").intValue() + "%" : "n/a" %></td>
<td>The -ROOT- table holds references to all .META. regions.</td>
</tr>
<%
if (onlineRegions != null && onlineRegions.size() > 0) { %>
<tr><td><a href="/table.jsp?name=<%= Bytes.toString(HConstants.META_TABLE_NAME) %>"><%= Bytes.toString(HConstants.META_TABLE_NAME) %></a></td>
<td align="center"><%= frags.get(".META.") != null ? frags.get(".META.").intValue() + "%" : "n/a" %></td>
<td>The .META. table holds references to all User Table regions</td>
</tr>
<% } %>
</table>
<%} %>
<h2>User Tables</h2>
<% HTableDescriptor[] tables = new HBaseAdmin(conf).listTables();
if(tables != null && tables.length > 0) { %>
<table>
<tr><th>Table</th><th title="Fragmentation - Will be 0% after a major compaction and fluctuate during normal usage.">Frag.</th><th>Description</th></tr>
<% for(HTableDescriptor htDesc : tables ) { %>
<tr><td><a href=/table.jsp?name=<%= htDesc.getNameAsString() %>><%= htDesc.getNameAsString() %></a> </td>
<td align="center"><%= frags.get(htDesc.getNameAsString()) != null ? frags.get(htDesc.getNameAsString()).intValue() + "%" : "n/a" %></td>
<td><%= htDesc.toString() %></td>
</tr>
<% } %>
<p> <%= tables.length %> table(s) in set.</p>
</table>
<% } %>
<h2>Region Servers</h2>
<% if (serverToServerInfos != null && serverToServerInfos.size() > 0) { %>
<% int totalRegions = 0;
int totalRequests = 0;
%>
<table>
<tr><th rowspan="<%= serverToServerInfos.size() + 1%>"></th><th>Address</th><th>Start Code</th><th>Load</th></tr>
<% String[] serverNames = serverToServerInfos.keySet().toArray(new String[serverToServerInfos.size()]);
Arrays.sort(serverNames);
for (String serverName: serverNames) {
HServerInfo hsi = serverToServerInfos.get(serverName);
String hostname = hsi.getServerAddress().getHostname() + ":" + hsi.getInfoPort();
String url = "http://" + hostname + "/";
totalRegions += hsi.getLoad().getNumberOfRegions();
totalRequests += hsi.getLoad().getNumberOfRequests() / interval;
long startCode = hsi.getStartCode();
%>
<tr><td><a href="<%= url %>"><%= hostname %></a></td><td><%= startCode %></td><td><%= hsi.getLoad().toString(interval) %></td></tr>
<% } %>
<tr><th>Total: </th><td>servers: <%= serverToServerInfos.size() %></td><td>&nbsp;</td><td>requests=<%= totalRequests %>, regions=<%= totalRegions %></td></tr>
</table>
<p>Load is requests per second and count of regions loaded</p>
<% } %>
</body>
</html>

View File

@ -1,191 +1,191 @@
package org.apache.hadoop.hbase.regionserver;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueTestUtil;
import org.apache.hadoop.hbase.regionserver.DeleteCompare.DeleteCode;
import org.apache.hadoop.hbase.util.Bytes;
import junit.framework.TestCase;
public class TestDeleteCompare extends TestCase {
//Cases to compare:
//1. DeleteFamily and whatever of the same row
//2. DeleteColumn and whatever of the same row + qualifier
//3. Delete and the matching put
//4. Big test that include starting on the wrong row and qualifier
public void testDeleteCompare_DeleteFamily() {
//Creating memstore
Set<KeyValue> memstore = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 3, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 2, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 1, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col2", 1, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col3", 3, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col3", 2, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col3", 1, "d-c"));
memstore.add(KeyValueTestUtil.create("row21", "fam", "col1", 1, "d-c"));
//Creating expected result
List<DeleteCode> expected = new ArrayList<DeleteCode>();
expected.add(DeleteCode.SKIP);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.SKIP);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DONE);
KeyValue delete = KeyValueTestUtil.create("row11",
"fam", "", 2, KeyValue.Type.DeleteFamily, "dont-care");
byte [] deleteBuffer = delete.getBuffer();
int deleteRowOffset = delete.getRowOffset();
short deleteRowLen = delete.getRowLength();
int deleteQualifierOffset = delete.getQualifierOffset();
int deleteQualifierLen = delete.getQualifierLength();
int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen;
byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG];
List<DeleteCode> actual = new ArrayList<DeleteCode>();
for(KeyValue mem : memstore){
actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset,
deleteRowLen, deleteQualifierOffset, deleteQualifierLen,
deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR));
}
assertEquals(expected.size(), actual.size());
for(int i=0; i<expected.size(); i++){
assertEquals(expected.get(i), actual.get(i));
}
}
public void testDeleteCompare_DeleteColumn() {
//Creating memstore
Set<KeyValue> memstore = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 3, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 2, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 1, "d-c"));
memstore.add(KeyValueTestUtil.create("row21", "fam", "col1", 1, "d-c"));
//Creating expected result
List<DeleteCode> expected = new ArrayList<DeleteCode>();
expected.add(DeleteCode.SKIP);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DONE);
KeyValue delete = KeyValueTestUtil.create("row11", "fam", "col1", 2,
KeyValue.Type.DeleteColumn, "dont-care");
byte [] deleteBuffer = delete.getBuffer();
int deleteRowOffset = delete.getRowOffset();
short deleteRowLen = delete.getRowLength();
int deleteQualifierOffset = delete.getQualifierOffset();
int deleteQualifierLen = delete.getQualifierLength();
int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen;
byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG];
List<DeleteCode> actual = new ArrayList<DeleteCode>();
for(KeyValue mem : memstore){
actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset,
deleteRowLen, deleteQualifierOffset, deleteQualifierLen,
deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR));
}
assertEquals(expected.size(), actual.size());
for(int i=0; i<expected.size(); i++){
assertEquals(expected.get(i), actual.get(i));
}
}
public void testDeleteCompare_Delete() {
//Creating memstore
Set<KeyValue> memstore = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 3, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 2, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 1, "d-c"));
//Creating expected result
List<DeleteCode> expected = new ArrayList<DeleteCode>();
expected.add(DeleteCode.SKIP);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DONE);
KeyValue delete = KeyValueTestUtil.create("row11", "fam", "col1", 2,
KeyValue.Type.Delete, "dont-care");
byte [] deleteBuffer = delete.getBuffer();
int deleteRowOffset = delete.getRowOffset();
short deleteRowLen = delete.getRowLength();
int deleteQualifierOffset = delete.getQualifierOffset();
int deleteQualifierLen = delete.getQualifierLength();
int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen;
byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG];
List<DeleteCode> actual = new ArrayList<DeleteCode>();
for(KeyValue mem : memstore){
actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset,
deleteRowLen, deleteQualifierOffset, deleteQualifierLen,
deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR));
}
assertEquals(expected.size(), actual.size());
for(int i=0; i<expected.size(); i++){
assertEquals(expected.get(i), actual.get(i));
}
}
public void testDeleteCompare_Multiple() {
//Creating memstore
Set<KeyValue> memstore = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 1, "d-c"));
memstore.add(KeyValueTestUtil.create("row21", "fam", "col1", 4, "d-c"));
memstore.add(KeyValueTestUtil.create("row21", "fam", "col1", 3, "d-c"));
memstore.add(KeyValueTestUtil.create("row21", "fam", "col1", 2, "d-c"));
memstore.add(KeyValueTestUtil.create("row21", "fam", "col1", 1,
KeyValue.Type.Delete, "dont-care"));
memstore.add(KeyValueTestUtil.create("row31", "fam", "col1", 1, "dont-care"));
//Creating expected result
List<DeleteCode> expected = new ArrayList<DeleteCode>();
expected.add(DeleteCode.SKIP);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DONE);
KeyValue delete = KeyValueTestUtil.create("row21", "fam", "col1", 5,
KeyValue.Type.DeleteColumn, "dont-care");
byte [] deleteBuffer = delete.getBuffer();
int deleteRowOffset = delete.getRowOffset();
short deleteRowLen = delete.getRowLength();
int deleteQualifierOffset = delete.getQualifierOffset();
int deleteQualifierLen = delete.getQualifierLength();
int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen;
byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG];
List<DeleteCode> actual = new ArrayList<DeleteCode>();
for(KeyValue mem : memstore){
actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset,
deleteRowLen, deleteQualifierOffset, deleteQualifierLen,
deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR));
}
assertEquals(expected.size(), actual.size());
for(int i=0; i<expected.size(); i++){
assertEquals(expected.get(i), actual.get(i));
}
}
}
package org.apache.hadoop.hbase.regionserver;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueTestUtil;
import org.apache.hadoop.hbase.regionserver.DeleteCompare.DeleteCode;
import org.apache.hadoop.hbase.util.Bytes;
import junit.framework.TestCase;
public class TestDeleteCompare extends TestCase {
//Cases to compare:
//1. DeleteFamily and whatever of the same row
//2. DeleteColumn and whatever of the same row + qualifier
//3. Delete and the matching put
//4. Big test that include starting on the wrong row and qualifier
public void testDeleteCompare_DeleteFamily() {
//Creating memstore
Set<KeyValue> memstore = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 3, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 2, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 1, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col2", 1, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col3", 3, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col3", 2, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col3", 1, "d-c"));
memstore.add(KeyValueTestUtil.create("row21", "fam", "col1", 1, "d-c"));
//Creating expected result
List<DeleteCode> expected = new ArrayList<DeleteCode>();
expected.add(DeleteCode.SKIP);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.SKIP);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DONE);
KeyValue delete = KeyValueTestUtil.create("row11",
"fam", "", 2, KeyValue.Type.DeleteFamily, "dont-care");
byte [] deleteBuffer = delete.getBuffer();
int deleteRowOffset = delete.getRowOffset();
short deleteRowLen = delete.getRowLength();
int deleteQualifierOffset = delete.getQualifierOffset();
int deleteQualifierLen = delete.getQualifierLength();
int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen;
byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG];
List<DeleteCode> actual = new ArrayList<DeleteCode>();
for(KeyValue mem : memstore){
actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset,
deleteRowLen, deleteQualifierOffset, deleteQualifierLen,
deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR));
}
assertEquals(expected.size(), actual.size());
for(int i=0; i<expected.size(); i++){
assertEquals(expected.get(i), actual.get(i));
}
}
public void testDeleteCompare_DeleteColumn() {
//Creating memstore
Set<KeyValue> memstore = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 3, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 2, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 1, "d-c"));
memstore.add(KeyValueTestUtil.create("row21", "fam", "col1", 1, "d-c"));
//Creating expected result
List<DeleteCode> expected = new ArrayList<DeleteCode>();
expected.add(DeleteCode.SKIP);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DONE);
KeyValue delete = KeyValueTestUtil.create("row11", "fam", "col1", 2,
KeyValue.Type.DeleteColumn, "dont-care");
byte [] deleteBuffer = delete.getBuffer();
int deleteRowOffset = delete.getRowOffset();
short deleteRowLen = delete.getRowLength();
int deleteQualifierOffset = delete.getQualifierOffset();
int deleteQualifierLen = delete.getQualifierLength();
int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen;
byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG];
List<DeleteCode> actual = new ArrayList<DeleteCode>();
for(KeyValue mem : memstore){
actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset,
deleteRowLen, deleteQualifierOffset, deleteQualifierLen,
deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR));
}
assertEquals(expected.size(), actual.size());
for(int i=0; i<expected.size(); i++){
assertEquals(expected.get(i), actual.get(i));
}
}
public void testDeleteCompare_Delete() {
//Creating memstore
Set<KeyValue> memstore = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 3, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 2, "d-c"));
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 1, "d-c"));
//Creating expected result
List<DeleteCode> expected = new ArrayList<DeleteCode>();
expected.add(DeleteCode.SKIP);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DONE);
KeyValue delete = KeyValueTestUtil.create("row11", "fam", "col1", 2,
KeyValue.Type.Delete, "dont-care");
byte [] deleteBuffer = delete.getBuffer();
int deleteRowOffset = delete.getRowOffset();
short deleteRowLen = delete.getRowLength();
int deleteQualifierOffset = delete.getQualifierOffset();
int deleteQualifierLen = delete.getQualifierLength();
int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen;
byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG];
List<DeleteCode> actual = new ArrayList<DeleteCode>();
for(KeyValue mem : memstore){
actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset,
deleteRowLen, deleteQualifierOffset, deleteQualifierLen,
deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR));
}
assertEquals(expected.size(), actual.size());
for(int i=0; i<expected.size(); i++){
assertEquals(expected.get(i), actual.get(i));
}
}
public void testDeleteCompare_Multiple() {
//Creating memstore
Set<KeyValue> memstore = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
memstore.add(KeyValueTestUtil.create("row11", "fam", "col1", 1, "d-c"));
memstore.add(KeyValueTestUtil.create("row21", "fam", "col1", 4, "d-c"));
memstore.add(KeyValueTestUtil.create("row21", "fam", "col1", 3, "d-c"));
memstore.add(KeyValueTestUtil.create("row21", "fam", "col1", 2, "d-c"));
memstore.add(KeyValueTestUtil.create("row21", "fam", "col1", 1,
KeyValue.Type.Delete, "dont-care"));
memstore.add(KeyValueTestUtil.create("row31", "fam", "col1", 1, "dont-care"));
//Creating expected result
List<DeleteCode> expected = new ArrayList<DeleteCode>();
expected.add(DeleteCode.SKIP);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DELETE);
expected.add(DeleteCode.DONE);
KeyValue delete = KeyValueTestUtil.create("row21", "fam", "col1", 5,
KeyValue.Type.DeleteColumn, "dont-care");
byte [] deleteBuffer = delete.getBuffer();
int deleteRowOffset = delete.getRowOffset();
short deleteRowLen = delete.getRowLength();
int deleteQualifierOffset = delete.getQualifierOffset();
int deleteQualifierLen = delete.getQualifierLength();
int deleteTimestampOffset = deleteQualifierOffset + deleteQualifierLen;
byte deleteType = deleteBuffer[deleteTimestampOffset +Bytes.SIZEOF_LONG];
List<DeleteCode> actual = new ArrayList<DeleteCode>();
for(KeyValue mem : memstore){
actual.add(DeleteCompare.deleteCompare(mem, deleteBuffer, deleteRowOffset,
deleteRowLen, deleteQualifierOffset, deleteQualifierLen,
deleteTimestampOffset, deleteType, KeyValue.KEY_COMPARATOR));
}
assertEquals(expected.size(), actual.size());
for(int i=0; i<expected.size(); i++){
assertEquals(expected.get(i), actual.get(i));
}
}
}