diff --git a/src/contrib/build-contrib.xml b/src/contrib/build-contrib.xml
index b1d7b2cb544..4f20c7ced81 100644
--- a/src/contrib/build-contrib.xml
+++ b/src/contrib/build-contrib.xml
@@ -70,13 +70,12 @@
+
-
+
+
-
-
-
diff --git a/src/contrib/build.xml b/src/contrib/build.xml
index 6f0e556ad88..468824c1a0d 100644
--- a/src/contrib/build.xml
+++ b/src/contrib/build.xml
@@ -31,6 +31,12 @@
+
+
+
+
+
+
@@ -46,13 +52,10 @@
-
-
-
+
-
diff --git a/src/contrib/stargate/build.xml b/src/contrib/stargate/build.xml
new file mode 100644
index 00000000000..4eae529c3d7
--- /dev/null
+++ b/src/contrib/stargate/build.xml
@@ -0,0 +1,120 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Tests failed!
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/contrib/stargate/lib/asm-3.1.jar b/src/contrib/stargate/lib/asm-3.1.jar
new file mode 100644
index 00000000000..8217cae0a1b
Binary files /dev/null and b/src/contrib/stargate/lib/asm-3.1.jar differ
diff --git a/src/contrib/stargate/lib/jackson-asl-0.9.4.jar b/src/contrib/stargate/lib/jackson-asl-0.9.4.jar
new file mode 100644
index 00000000000..b3444ca9214
Binary files /dev/null and b/src/contrib/stargate/lib/jackson-asl-0.9.4.jar differ
diff --git a/src/contrib/stargate/lib/jaxb-impl-2.1.10.jar b/src/contrib/stargate/lib/jaxb-impl-2.1.10.jar
new file mode 100644
index 00000000000..37ad4cc1fcf
Binary files /dev/null and b/src/contrib/stargate/lib/jaxb-impl-2.1.10.jar differ
diff --git a/src/contrib/stargate/lib/jersey-core-1.1.0-ea.jar b/src/contrib/stargate/lib/jersey-core-1.1.0-ea.jar
new file mode 100644
index 00000000000..d3103268419
Binary files /dev/null and b/src/contrib/stargate/lib/jersey-core-1.1.0-ea.jar differ
diff --git a/src/contrib/stargate/lib/jersey-json-1.1.0-ea.jar b/src/contrib/stargate/lib/jersey-json-1.1.0-ea.jar
new file mode 100644
index 00000000000..53c7ffd798b
Binary files /dev/null and b/src/contrib/stargate/lib/jersey-json-1.1.0-ea.jar differ
diff --git a/src/contrib/stargate/lib/jersey-server-1.1.0-ea.jar b/src/contrib/stargate/lib/jersey-server-1.1.0-ea.jar
new file mode 100644
index 00000000000..006a51572af
Binary files /dev/null and b/src/contrib/stargate/lib/jersey-server-1.1.0-ea.jar differ
diff --git a/src/contrib/stargate/lib/jsr311-api-1.1.jar b/src/contrib/stargate/lib/jsr311-api-1.1.jar
new file mode 100644
index 00000000000..9fc5443f7d8
Binary files /dev/null and b/src/contrib/stargate/lib/jsr311-api-1.1.jar differ
diff --git a/src/contrib/stargate/lib/persistence-api-1.0.jar b/src/contrib/stargate/lib/persistence-api-1.0.jar
new file mode 100644
index 00000000000..fe5dbcd47cc
Binary files /dev/null and b/src/contrib/stargate/lib/persistence-api-1.0.jar differ
diff --git a/src/contrib/stargate/lib/protobuf-java-2.1.0.jar b/src/contrib/stargate/lib/protobuf-java-2.1.0.jar
new file mode 100644
index 00000000000..92e21ad5b29
Binary files /dev/null and b/src/contrib/stargate/lib/protobuf-java-2.1.0.jar differ
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/Constants.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/Constants.java
new file mode 100644
index 00000000000..0ea081556e6
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/Constants.java
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+public interface Constants {
+ public static final String MIMETYPE_TEXT = "text/plain";
+ public static final String MIMETYPE_XML = "text/xml";
+ public static final String MIMETYPE_BINARY = "application/octet-stream";
+ public static final String MIMETYPE_PROTOBUF = "application/x-protobuf";
+ public static final String MIMETYPE_JSON = "application/json";
+ public static final String MIMETYPE_JAVASCRIPT = "application/x-javascript";
+
+ public static final String PATH_STATUS_CLUSTER = "/status/cluster";
+ public static final String PATH_VERSION = "/version";
+ public static final String PATH_VERSION_CLUSTER = "/version/cluster";
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/Main.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/Main.java
new file mode 100644
index 00000000000..5e49156c236
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/Main.java
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.PosixParser;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.mortbay.jetty.Connector;
+import org.mortbay.jetty.Handler;
+import org.mortbay.jetty.NCSARequestLog;
+import org.mortbay.jetty.Server;
+import org.mortbay.jetty.deployer.WebAppDeployer;
+import org.mortbay.jetty.handler.ContextHandlerCollection;
+import org.mortbay.jetty.handler.DefaultHandler;
+import org.mortbay.jetty.handler.HandlerCollection;
+import org.mortbay.jetty.handler.RequestLogHandler;
+import org.mortbay.jetty.nio.SelectChannelConnector;
+import org.mortbay.thread.QueuedThreadPool;
+
+public class Main {
+
+ public static void main(String[] args) throws Exception {
+ // process command line
+ Options options = new Options();
+ options.addOption("p", "port", true, "service port");
+ CommandLineParser parser = new PosixParser();
+ CommandLine cmd = parser.parse(options, args);
+ int port = 8080;
+ if (cmd.hasOption("p")) {
+ port = Integer.valueOf(cmd.getOptionValue("p"));
+ }
+
+ HBaseConfiguration conf = new HBaseConfiguration();
+ if (cmd.hasOption("m")) {
+ conf.set("hbase.master", cmd.getOptionValue("m"));
+ }
+
+ /*
+ * RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean(); if (runtime
+ * != null) { LOG.info("vmName=" + runtime.getVmName() + ", vmVendor=" +
+ * runtime.getVmVendor() + ", vmVersion=" + runtime.getVmVersion());
+ * LOG.info("vmInputArguments=" + runtime.getInputArguments()); }
+ */
+ /*
+ * poached from:
+ * http://jetty.mortbay.org/xref/org/mortbay/jetty/example/LikeJettyXml.html
+ */
+ String jetty_home = ".";
+ Server server = new Server();
+
+ QueuedThreadPool threadPool = new QueuedThreadPool();
+ threadPool.setMaxThreads(100);
+ server.setThreadPool(threadPool);
+
+ Connector connector = new SelectChannelConnector();
+ connector.setPort(port);
+ connector.setMaxIdleTime(30000);
+ server.setConnectors(new Connector[] { connector });
+
+ HandlerCollection handlers = new HandlerCollection();
+ ContextHandlerCollection contexts = new ContextHandlerCollection();
+ RequestLogHandler requestLogHandler = new RequestLogHandler();
+ handlers.setHandlers(new Handler[] { contexts, new DefaultHandler(),
+ requestLogHandler });
+ server.setHandler(handlers);
+
+ WebAppDeployer deployer1 = new WebAppDeployer();
+ deployer1.setContexts(contexts);
+ deployer1.setWebAppDir(jetty_home + "/webapps");
+ deployer1.setParentLoaderPriority(false);
+ deployer1.setExtract(true);
+ deployer1.setAllowDuplicates(false);
+ // deployer1.setDefaultsDescriptor(jetty_home + "/etc/webdefault.xml");
+ server.addLifeCycle(deployer1);
+
+ NCSARequestLog requestLog = new NCSARequestLog(jetty_home
+ + "/logs/jetty-yyyy_mm_dd.log");
+ requestLog.setExtended(false);
+ requestLogHandler.setRequestLog(requestLog);
+
+ server.setStopAtShutdown(true);
+ server.setSendServerVersion(true);
+ server.start();
+ server.join();
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/RESTServlet.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/RESTServlet.java
new file mode 100644
index 00000000000..69a21b57532
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/RESTServlet.java
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HTablePool;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.sun.jersey.server.impl.container.servlet.ServletAdaptor;
+import com.sun.jersey.spi.container.servlet.ServletContainer;
+
+public class RESTServlet extends ServletAdaptor {
+
+ private static final long serialVersionUID = 1L;
+ public static final int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours
+ public static final String VERSION_STRING = "0.0.1";
+
+ private static RESTServlet instance;
+
+ private final HBaseConfiguration conf;
+ protected Map maxAgeMap =
+ Collections.synchronizedMap(new HashMap());
+
+ public synchronized static RESTServlet getInstance() throws IOException {
+ if (instance == null) {
+ instance = new RESTServlet();
+ }
+ return instance;
+ }
+
+ public RESTServlet() throws IOException {
+ this.conf = new HBaseConfiguration();
+ }
+
+
+ protected HTablePool getTablePool(String name) {
+ return HTablePool.getPool(conf, Bytes.toBytes(name));
+ }
+
+ protected HBaseConfiguration getConfiguration() {
+ return conf;
+ }
+
+ /**
+ * @param tableName
+ * @return the maximum cache age suitable for use with this table, in
+ * seconds
+ * @throws IOException
+ */
+ public int getMaxAge(String tableName) throws IOException {
+ Integer i = maxAgeMap.get(tableName);
+ if (i != null) {
+ return i.intValue();
+ }
+ HTablePool pool = this.getTablePool(tableName);
+ HTable table = pool.get();
+ if (table != null) {
+ int maxAge = DEFAULT_MAX_AGE;
+ for (HColumnDescriptor family:
+ table.getTableDescriptor().getFamilies()) {
+ int ttl = family.getTimeToLive();
+ if (ttl < 0) {
+ continue;
+ }
+ if (ttl < maxAge) {
+ maxAge = ttl;
+ }
+ }
+ maxAgeMap.put(tableName, maxAge);
+ return maxAge;
+ }
+ return DEFAULT_MAX_AGE;
+ }
+
+ public void invalidateMaxAge(String tableName) {
+ maxAgeMap.remove(tableName);
+ }
+
+ public static final String getVersion() {
+ StringBuilder version = new StringBuilder();
+ version.append("Stargate ");
+ version.append(VERSION_STRING);
+ version.append(" [JVM: ");
+ version.append(System.getProperty("java.vm.vendor"));
+ version.append(' ');
+ version.append(System.getProperty("java.version"));
+ version.append('-');
+ version.append(System.getProperty("java.vm.version"));
+ version.append("] [OS: ");
+ version.append(System.getProperty("os.name"));
+ version.append(' ');
+ version.append(System.getProperty("os.version"));
+ version.append(' ');
+ version.append(System.getProperty("os.arch"));
+ version.append("] [Jersey: ");
+ version.append(ServletContainer.class.getPackage()
+ .getImplementationVersion());
+ version.append(']');
+ return version.toString();
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/RegionsResource.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/RegionsResource.java
new file mode 100644
index 00000000000..e637bc0c863
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/RegionsResource.java
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Map;
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HTablePool;
+import org.apache.hadoop.hbase.stargate.model.TableInfoModel;
+import org.apache.hadoop.hbase.stargate.model.TableRegionModel;
+
+public class RegionsResource implements Constants {
+ private static final Log LOG = LogFactory.getLog(RegionsResource.class);
+
+ private String table;
+ private CacheControl cacheControl;
+
+ public RegionsResource(String table) {
+ this.table = table;
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ private Map getTableRegions()
+ throws IOException {
+ HTablePool pool = RESTServlet.getInstance().getTablePool(this.table);
+ HTable table = pool.get();
+ try {
+ return table.getRegionsInfo();
+ } finally {
+ pool.put(table);
+ }
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
+ MIMETYPE_PROTOBUF})
+ public Response get(@Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ try {
+ TableInfoModel model = new TableInfoModel(table);
+ Map regions = getTableRegions();
+ for (Map.Entry e: regions.entrySet()) {
+ HRegionInfo hri = e.getKey();
+ HServerAddress addr = e.getValue();
+ InetSocketAddress sa = addr.getInetSocketAddress();
+ model.add(
+ new TableRegionModel(table, hri.getRegionId(), hri.getStartKey(),
+ hri.getEndKey(),
+ sa.getHostName() + ":" + Integer.valueOf(sa.getPort())));
+ }
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ return response.build();
+ } catch (TableNotFoundException e) {
+ throw new WebApplicationException(Response.Status.NOT_FOUND);
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ResourceConfig.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ResourceConfig.java
new file mode 100644
index 00000000000..04f3e658c69
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ResourceConfig.java
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import com.sun.jersey.api.core.PackagesResourceConfig;
+
+public class ResourceConfig extends PackagesResourceConfig {
+ public ResourceConfig() {
+ super("org.apache.hadoop.hbase.stargate");
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ResultGenerator.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ResultGenerator.java
new file mode 100644
index 00000000000..f9ee960d134
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ResultGenerator.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.hadoop.hbase.KeyValue;
+
+public abstract class ResultGenerator implements Iterator {
+ public static ResultGenerator fromRowSpec(String table, RowSpec rowspec)
+ throws IOException {
+ if (rowspec.isSingleRow()) {
+ return new RowResultGenerator(table, rowspec);
+ } else {
+ return new ScannerResultGenerator(table, rowspec);
+ }
+ }
+
+ public abstract void close();
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/RowResource.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/RowResource.java
new file mode 100644
index 00000000000..990ab95b83c
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/RowResource.java
@@ -0,0 +1,342 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+import java.util.List;
+
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HTablePool;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.stargate.model.CellModel;
+import org.apache.hadoop.hbase.stargate.model.CellSetModel;
+import org.apache.hadoop.hbase.stargate.model.RowModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class RowResource implements Constants {
+ private static final Log LOG = LogFactory.getLog(RowResource.class);
+
+ private String table;
+ private RowSpec rowspec;
+ private CacheControl cacheControl;
+
+ public RowResource(String table, String rowspec, String versions)
+ throws IOException {
+ this.table = table;
+ this.rowspec = new RowSpec(rowspec);
+ if (versions != null) {
+ this.rowspec.setMaxVersions(Integer.valueOf(versions));
+ }
+ cacheControl = new CacheControl();
+ cacheControl.setMaxAge(RESTServlet.getInstance().getMaxAge(table));
+ cacheControl.setNoTransform(false);
+ }
+
+ @GET
+ @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
+ MIMETYPE_PROTOBUF})
+ public Response get(@Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ try {
+ ResultGenerator generator = ResultGenerator.fromRowSpec(table, rowspec);
+ if (!generator.hasNext()) {
+ throw new WebApplicationException(Response.Status.NOT_FOUND);
+ }
+ CellSetModel model = new CellSetModel();
+ KeyValue value = generator.next();
+ byte[] rowKey = value.getRow();
+ RowModel rowModel = new RowModel(rowKey);
+ do {
+ if (!Bytes.equals(value.getRow(), rowKey)) {
+ model.addRow(rowModel);
+ rowKey = value.getRow();
+ rowModel = new RowModel(rowKey);
+ }
+ rowModel.addCell(
+ new CellModel(value.getColumn(), value.getTimestamp(),
+ value.getValue()));
+ value = generator.next();
+ } while (value != null);
+ model.addRow(rowModel);
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ return response.build();
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+
+ @GET
+ @Produces(MIMETYPE_BINARY)
+ public Response getBinary(@Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
+ }
+ // doesn't make sense to use a non specific coordinate as this can only
+ // return a single cell
+ if (!rowspec.hasColumns() || rowspec.getColumns().length > 1) {
+ throw new WebApplicationException(Response.Status.BAD_REQUEST);
+ }
+ try {
+ ResultGenerator generator = ResultGenerator.fromRowSpec(table, rowspec);
+ if (!generator.hasNext()) {
+ throw new WebApplicationException(Response.Status.NOT_FOUND);
+ }
+ KeyValue value = generator.next();
+ ResponseBuilder response = Response.ok(value.getValue());
+ response.cacheControl(cacheControl);
+ response.header("X-Timestamp", value.getTimestamp());
+ return response.build();
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+
+ private void deleteRow() {
+ HTablePool pool;
+ try {
+ pool = RESTServlet.getInstance().getTablePool(this.table);
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.INTERNAL_SERVER_ERROR);
+ }
+ HTable table = null;
+ try {
+ table = pool.get();
+ table.delete(new Delete(rowspec.getRow()));
+ table.flushCommits();
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ } finally {
+ if (table != null) {
+ pool.put(table);
+ }
+ }
+ }
+
+ private Response update(CellSetModel model, boolean replace) {
+ if (replace) {
+ deleteRow();
+ }
+ HTablePool pool;
+ try {
+ pool = RESTServlet.getInstance().getTablePool(this.table);
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.INTERNAL_SERVER_ERROR);
+ }
+ HTable table = null;
+ try {
+ table = pool.get();
+ for (RowModel row: model.getRows()) {
+ Put put = new Put(row.getKey());
+ for (CellModel cell: row.getCells()) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("update cell '" +
+ Bytes.toStringBinary(cell.getColumn()) + "' @" +
+ cell.getTimestamp() + " length " + cell.getValue().length);
+ }
+ byte [][] parts = KeyValue.parseColumn(cell.getColumn());
+ if (cell.hasUserTimestamp()) {
+ put.add(parts[0], parts[1], cell.getTimestamp(), cell.getValue());
+ } else {
+ put.add(parts[0], parts[1], cell.getValue());
+ }
+ }
+ table.put(put);
+ }
+ table.flushCommits();
+ ResponseBuilder response = Response.ok();
+ return response.build();
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ } finally {
+ if (table != null) {
+ pool.put(table);
+ }
+ }
+ }
+
+ private Response updateBinary(byte[] message, HttpHeaders headers,
+ boolean replace) {
+ if (replace) {
+ deleteRow();
+ }
+ HTablePool pool;
+ try {
+ pool = RESTServlet.getInstance().getTablePool(this.table);
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.INTERNAL_SERVER_ERROR);
+ }
+ HTable table = null;
+ try {
+ byte[] row = rowspec.getRow();
+ byte[][] columns = rowspec.getColumns();
+ byte[] column = null;
+ if (columns != null) {
+ column = columns[0];
+ }
+ long timestamp = -1;
+ List vals = headers.getRequestHeader("X-Row");
+ if (vals != null && !vals.isEmpty()) {
+ row = Bytes.toBytes(vals.get(0));
+ }
+ vals = headers.getRequestHeader("X-Column");
+ if (vals != null && !vals.isEmpty()) {
+ column = Bytes.toBytes(vals.get(0));
+ }
+ vals = headers.getRequestHeader("X-Timestamp");
+ if (vals != null && !vals.isEmpty()) {
+ timestamp = Long.valueOf(vals.get(0));
+ }
+ if (column == null) {
+ throw new WebApplicationException(Response.Status.BAD_REQUEST);
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("update cell '" + Bytes.toStringBinary(column) + "' @" +
+ timestamp + " length " + message.length);
+ }
+ Put put = new Put(row);
+ byte parts[][] = KeyValue.parseColumn(column);
+ if (timestamp >= 0) {
+ put.add(parts[0], parts[1], timestamp, message);
+ } else {
+ put.add(parts[0], parts[1], message);
+ }
+ table = pool.get();
+ table.put(put);
+ table.flushCommits();
+ return Response.ok().build();
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ } finally {
+ if (table != null) {
+ pool.put(table);
+ }
+ }
+ }
+
+ @PUT
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
+ MIMETYPE_PROTOBUF})
+ public Response put(CellSetModel model, @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("PUT " + uriInfo.getAbsolutePath());
+ }
+ return update(model, true);
+ }
+
+ @PUT
+ @Consumes(MIMETYPE_BINARY)
+ public Response putBinary(byte[] message, @Context UriInfo uriInfo,
+ @Context HttpHeaders headers)
+ {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
+ }
+ return updateBinary(message, headers, true);
+ }
+
+ @POST
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
+ MIMETYPE_PROTOBUF})
+ public Response post(CellSetModel model, @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("POST " + uriInfo.getAbsolutePath());
+ }
+ return update(model, false);
+ }
+
+ @POST
+ @Consumes(MIMETYPE_BINARY)
+ public Response postBinary(byte[] message, @Context UriInfo uriInfo,
+ @Context HttpHeaders headers)
+ {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("POST " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
+ }
+ return updateBinary(message, headers, false);
+ }
+
+ @DELETE
+ public Response delete(@Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("DELETE " + uriInfo.getAbsolutePath());
+ }
+ Delete delete = new Delete(rowspec.getRow());
+ for (byte[] column: rowspec.getColumns()) {
+ byte[][] split = KeyValue.parseColumn(column);
+ if (rowspec.hasTimestamp()) {
+ delete.deleteColumns(split[0], split[1], rowspec.getTimestamp());
+ } else {
+ delete.deleteColumns(split[0], split[1]);
+ }
+ }
+ HTablePool pool;
+ try {
+ pool = RESTServlet.getInstance().getTablePool(this.table);
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.INTERNAL_SERVER_ERROR);
+ }
+ HTable table = null;
+ try {
+ table = pool.get();
+ table.delete(delete);
+ table.flushCommits();
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ } finally {
+ if (table != null) {
+ pool.put(table);
+ }
+ }
+ return Response.ok().build();
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/RowResultGenerator.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/RowResultGenerator.java
new file mode 100644
index 00000000000..39974689e56
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/RowResultGenerator.java
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HTablePool;
+import org.apache.hadoop.hbase.client.Result;
+
+public class RowResultGenerator extends ResultGenerator {
+ private Iterator valuesI;
+
+ public RowResultGenerator(String tableName, RowSpec rowspec)
+ throws IllegalArgumentException, IOException {
+ HTablePool pool = RESTServlet.getInstance().getTablePool(tableName);
+ HTable table = pool.get();
+ try {
+ Get get = new Get(rowspec.getRow());
+ if (rowspec.hasColumns()) {
+ get.addColumns(rowspec.getColumns());
+ } else {
+ // rowspec does not explicitly specify columns, return them all
+ for (HColumnDescriptor family:
+ table.getTableDescriptor().getFamilies()) {
+ get.addFamily(family.getName());
+ }
+ }
+ get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
+ get.setMaxVersions(rowspec.getMaxVersions());
+ Result result = table.get(get);
+ if (result != null && !result.isEmpty()) {
+ valuesI = result.list().iterator();
+ }
+ } finally {
+ pool.put(table);
+ }
+ }
+
+ public void close() {
+ }
+
+ public boolean hasNext() {
+ if (valuesI == null) {
+ return false;
+ }
+ return valuesI.hasNext();
+ }
+
+ public KeyValue next() {
+ if (valuesI == null) {
+ return null;
+ }
+ try {
+ return valuesI.next();
+ } catch (NoSuchElementException e) {
+ return null;
+ }
+ }
+
+ public void remove() {
+ throw new UnsupportedOperationException("remove not supported");
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/RowSpec.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/RowSpec.java
new file mode 100644
index 00000000000..cbaac22ec99
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/RowSpec.java
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.util.Collection;
+import java.util.TreeSet;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class RowSpec {
+ public static final long DEFAULT_START_TIMESTAMP = 0;
+ public static final long DEFAULT_END_TIMESTAMP = Long.MAX_VALUE;
+
+ private byte[] row = HConstants.EMPTY_START_ROW;
+ private byte[] endRow = null;
+ private TreeSet columns =
+ new TreeSet(Bytes.BYTES_COMPARATOR);
+ private long startTime = DEFAULT_START_TIMESTAMP;
+ private long endTime = DEFAULT_END_TIMESTAMP;
+ private int maxVersions = HColumnDescriptor.DEFAULT_VERSIONS;
+
+ public RowSpec(String path) throws IllegalArgumentException {
+ int i = 0;
+ while (path.charAt(i) == '/') {
+ i++;
+ }
+ i = parseRowKeys(path, i);
+ i = parseColumns(path, i);
+ i = parseTimestamp(path, i);
+ }
+
+ private int parseRowKeys(String path, int i)
+ throws IllegalArgumentException {
+ StringBuilder startRow = new StringBuilder();
+ StringBuilder endRow = null;
+ try {
+ char c;
+ boolean doEndRow = false;
+ while (i < path.length() && (c = path.charAt(i)) != '/') {
+ if (c == ',') {
+ doEndRow = true;
+ i++;
+ break;
+ }
+ startRow.append(c);
+ i++;
+ }
+ i++;
+ this.row = Bytes.toBytes(startRow.toString());
+ if (doEndRow) {
+ endRow = new StringBuilder();
+ while ((c = path.charAt(i)) != '/') {
+ endRow.append(c);
+ i++;
+ }
+ i++;
+ }
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException(e);
+ }
+ // HBase does not support wildcards on row keys so we will emulate a
+ // suffix glob by synthesizing appropriate start and end row keys for
+ // table scanning
+ if (startRow.charAt(startRow.length() - 1) == '*') {
+ if (endRow != null)
+ throw new IllegalArgumentException("invalid path: start row "+
+ "specified with wildcard");
+ this.row = Bytes.toBytes(startRow.substring(0,
+ startRow.lastIndexOf("*")));
+ this.endRow = new byte[this.row.length + 1];
+ System.arraycopy(this.row, 0, this.endRow, 0, this.row.length);
+ this.endRow[this.row.length] = (byte)255;
+ } else {
+ this.row = Bytes.toBytes(startRow.toString());
+ if (endRow != null) {
+ this.endRow = Bytes.toBytes(endRow.toString());
+ }
+ }
+ return i;
+ }
+
+ private int parseColumns(String path, int i)
+ throws IllegalArgumentException {
+ if (i >= path.length()) {
+ return i;
+ }
+ try {
+ char c;
+ StringBuilder column = new StringBuilder();
+ boolean hasColon = false;
+ while (i < path.length() && (c = path.charAt(i)) != '/') {
+ if (c == ',') {
+ if (column.length() < 1) {
+ throw new IllegalArgumentException("invalid path");
+ }
+ if (!hasColon) {
+ column.append(':');
+ }
+ this.columns.add(Bytes.toBytes(column.toString()));
+ column = new StringBuilder();
+ hasColon = false;
+ i++;
+ continue;
+ }
+ if (c == ':') {
+ hasColon = true;
+ }
+ column.append(c);
+ i++;
+ }
+ i++;
+ // trailing list entry
+ if (column.length() > 1) {
+ if (!hasColon) {
+ column.append(':');
+ }
+ this.columns.add(Bytes.toBytes(column.toString()));
+ }
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException(e);
+ }
+ return i;
+ }
+
+ private int parseTimestamp(String path, int i)
+ throws IllegalArgumentException {
+ if (i >= path.length()) {
+ return i;
+ }
+ long time0 = 0, time1 = 0;
+ try {
+ char c = 0;
+ StringBuilder stamp = new StringBuilder();
+ while (i < path.length()) {
+ c = path.charAt(i);
+ if (c == '/' || c == ',') {
+ break;
+ }
+ stamp.append(c);
+ i++;
+ }
+ try {
+ time0 = Long.valueOf(stamp.toString());
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException(e);
+ }
+ if (c == ',') {
+ stamp = new StringBuilder();
+ i++;
+ while (i < path.length() && ((c = path.charAt(i)) != '/')) {
+ stamp.append(c);
+ i++;
+ }
+ try {
+ time1 = Long.valueOf(stamp.toString());
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException(e);
+ }
+ }
+ if (c == '/') {
+ i++;
+ }
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException(e);
+ }
+ if (time1 != 0) {
+ startTime = time0;
+ endTime = time1;
+ } else {
+ endTime = time0;
+ }
+ return i;
+ }
+
+ public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns,
+ long startTime, long endTime, int maxVersions) {
+ this.row = startRow;
+ this.endRow = endRow;
+ if (columns != null) {
+ for (byte[] col: columns) {
+ this.columns.add(col);
+ }
+ }
+ this.startTime = startTime;
+ this.endTime = endTime;
+ this.maxVersions = maxVersions;
+ }
+
+ public RowSpec(byte[] startRow, byte[] endRow, Collection columns,
+ long startTime, long endTime, int maxVersions) {
+ this.row = startRow;
+ this.endRow = endRow;
+ if (columns != null) {
+ this.columns.addAll(columns);
+ }
+ this.startTime = startTime;
+ this.endTime = endTime;
+ this.maxVersions = maxVersions;
+ }
+
+ public boolean isSingleRow() {
+ return endRow == null;
+ }
+
+ public int getMaxVersions() {
+ return maxVersions;
+ }
+
+ public void setMaxVersions(int maxVersions) {
+ this.maxVersions = maxVersions;
+ }
+
+ public boolean hasColumns() {
+ return !columns.isEmpty();
+ }
+
+ public byte[] getRow() {
+ return row;
+ }
+
+ public byte[] getStartRow() {
+ return row;
+ }
+
+ public boolean hasEndRow() {
+ return endRow != null;
+ }
+
+ public byte[] getEndRow() {
+ return endRow;
+ }
+
+ public void addColumn(byte[] column) {
+ columns.add(column);
+ }
+
+ public byte[][] getColumns() {
+ return columns.toArray(new byte[columns.size()][]);
+ }
+
+ public boolean hasTimestamp() {
+ return (startTime == 0) && (endTime != Long.MAX_VALUE);
+ }
+
+ public long getTimestamp() {
+ return endTime;
+ }
+
+ public long getStartTime() {
+ return startTime;
+ }
+
+ public void setStartTime(long startTime) {
+ this.startTime = startTime;
+ }
+
+ public long getEndTime() {
+ return endTime;
+ }
+
+ public void setEndTime(long endTime) {
+ this.endTime = endTime;
+ }
+
+ public String toString() {
+ StringBuilder result = new StringBuilder();
+ result.append("{startRow => '");
+ if (row != null) {
+ result.append(Bytes.toString(row));
+ }
+ result.append("', endRow => '");
+ if (endRow != null) {
+ result.append(Bytes.toString(endRow));
+ }
+ result.append("', columns => [");
+ for (byte[] col: columns) {
+ result.append(" '");
+ result.append(Bytes.toString(col));
+ result.append("'");
+ }
+ result.append(" ], startTime => ");
+ result.append(Long.toString(startTime));
+ result.append(", endTime => ");
+ result.append(Long.toString(endTime));
+ result.append(", maxVersions => ");
+ result.append(Integer.toString(maxVersions));
+ result.append("}");
+ return result.toString();
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ScannerInstanceResource.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ScannerInstanceResource.java
new file mode 100644
index 00000000000..08b8f46cbd3
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ScannerInstanceResource.java
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.stargate.model.CellModel;
+import org.apache.hadoop.hbase.stargate.model.CellSetModel;
+import org.apache.hadoop.hbase.stargate.model.RowModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.sun.jersey.core.util.Base64;
+
+public class ScannerInstanceResource implements Constants {
+ private static final Log LOG =
+ LogFactory.getLog(ScannerInstanceResource.class);
+
+ protected ResultGenerator generator;
+ private String id;
+ private int batch;
+ private CacheControl cacheControl;
+
+ public ScannerInstanceResource(String table, String id,
+ ResultGenerator generator, int batch) throws IOException {
+ this.id = id;
+ this.generator = generator;
+ this.batch = batch;
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ @GET
+ @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
+ MIMETYPE_PROTOBUF})
+ public Response get(@Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ CellSetModel model = new CellSetModel();
+ RowModel rowModel = null;
+ byte[] rowKey = null;
+ int count = batch;
+ do {
+ KeyValue value = null;
+ try {
+ value = generator.next();
+ } catch (IllegalStateException e) {
+ ScannerResource.delete(id);
+ throw new WebApplicationException(Response.Status.GONE);
+ }
+ if (value == null) {
+ LOG.info("generator exhausted");
+ // respond with 204 (No Content) if an empty cell set would be
+ // returned
+ if (count == batch) {
+ return Response.noContent().build();
+ }
+ break;
+ }
+ if (rowKey == null) {
+ rowKey = value.getRow();
+ rowModel = new RowModel(rowKey);
+ }
+ if (!Bytes.equals(value.getRow(), rowKey)) {
+ model.addRow(rowModel);
+ rowKey = value.getRow();
+ rowModel = new RowModel(rowKey);
+ }
+ rowModel.addCell(
+ new CellModel(value.getColumn(), value.getTimestamp(),
+ value.getValue()));
+ } while (--count > 0);
+ model.addRow(rowModel);
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ return response.build();
+ }
+
+ @GET
+ @Produces(MIMETYPE_BINARY)
+ public Response getBinary(@Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath() + " as " +
+ MIMETYPE_BINARY);
+ }
+ try {
+ KeyValue value = generator.next();
+ if (value == null) {
+ LOG.info("generator exhausted");
+ return Response.noContent().build();
+ }
+ ResponseBuilder response = Response.ok(value.getValue());
+ response.cacheControl(cacheControl);
+ response.header("X-Row", Base64.encode(value.getRow()));
+ response.header("X-Column", Base64.encode(value.getColumn()));
+ response.header("X-Timestamp", value.getTimestamp());
+ return response.build();
+ } catch (IllegalStateException e) {
+ ScannerResource.delete(id);
+ throw new WebApplicationException(Response.Status.GONE);
+ }
+ }
+
+ @DELETE
+ public Response delete(@Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("DELETE " + uriInfo.getAbsolutePath());
+ }
+ ScannerResource.delete(id);
+ return Response.ok().build();
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ScannerResource.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ScannerResource.java
new file mode 100644
index 00000000000..4860968c1fc
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ScannerResource.java
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.stargate.model.ScannerModel;
+
+public class ScannerResource implements Constants {
+
+ private static final Log LOG = LogFactory.getLog(ScannerResource.class);
+ protected static final Map scanners =
+ new HashMap();
+
+ private String table;
+
+ public ScannerResource(String table) {
+ this.table = table;
+ }
+
+ private Response update(ScannerModel model, boolean replace,
+ UriInfo uriInfo) {
+ try {
+ byte[] endRow = model.hasEndRow() ? model.getEndRow() : null;
+ RowSpec spec = new RowSpec(model.getStartRow(), endRow,
+ model.getColumns(), model.getStartTime(), model.getEndTime(), 1);
+ ScannerResultGenerator gen = new ScannerResultGenerator(table, spec);
+ String id = gen.getID();
+ ScannerInstanceResource instance =
+ new ScannerInstanceResource(table, id, gen, model.getBatch());
+ synchronized (scanners) {
+ scanners.put(id, instance);
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("new scanner: " + id);
+ }
+ UriBuilder builder = uriInfo.getAbsolutePathBuilder();
+ URI uri = builder.path(id).build();
+ return Response.created(uri).build();
+ } catch (InvalidProtocolBufferException e) {
+ throw new WebApplicationException(e, Response.Status.BAD_REQUEST);
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+
+ @PUT
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
+ MIMETYPE_PROTOBUF})
+ public Response put(ScannerModel model, @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("PUT " + uriInfo.getAbsolutePath());
+ }
+ return update(model, true, uriInfo);
+ }
+
+ @POST
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
+ MIMETYPE_PROTOBUF})
+ public Response post(ScannerModel model, @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("POST " + uriInfo.getAbsolutePath());
+ }
+ return update(model, false, uriInfo);
+ }
+
+ @Path("{scanner: .+}")
+ public ScannerInstanceResource getScannerInstanceResource(
+ @PathParam("scanner") String id) {
+ synchronized (scanners) {
+ ScannerInstanceResource instance = scanners.get(id);
+ if (instance == null) {
+ throw new WebApplicationException(Response.Status.NOT_FOUND);
+ }
+ return instance;
+ }
+ }
+
+ static void delete(String id) {
+ synchronized (scanners) {
+ ScannerInstanceResource instance = scanners.remove(id);
+ if (instance != null) {
+ instance.generator.close();
+ }
+ }
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ScannerResultGenerator.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ScannerResultGenerator.java
new file mode 100644
index 00000000000..f5fdc6d9527
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/ScannerResultGenerator.java
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.UnknownScannerException;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HTablePool;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.util.StringUtils;
+
+public class ScannerResultGenerator extends ResultGenerator {
+ private static final Log LOG =
+ LogFactory.getLog(ScannerResultGenerator.class);
+
+ private String id;
+ private Iterator rowI;
+ private ResultScanner scanner;
+ private Result cached;
+
+ public ScannerResultGenerator(String tableName, RowSpec rowspec)
+ throws IllegalArgumentException, IOException {
+ HTablePool pool = RESTServlet.getInstance().getTablePool(tableName);
+ HTable table = pool.get();
+ try {
+ Scan scan;
+ if (rowspec.hasEndRow()) {
+ scan = new Scan(rowspec.getStartRow(), rowspec.getEndRow());
+ } else {
+ scan = new Scan(rowspec.getStartRow());
+ }
+ if (rowspec.hasColumns()) {
+ scan.addColumns(rowspec.getColumns());
+ } else {
+ for (HColumnDescriptor family:
+ table.getTableDescriptor().getFamilies()) {
+ scan.addFamily(family.getName());
+ }
+ }
+ scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
+ scan.setMaxVersions(rowspec.getMaxVersions());
+ scanner = table.getScanner(scan);
+ cached = null;
+ id = Long.toString(System.currentTimeMillis()) +
+ Integer.toHexString(scanner.hashCode());
+ } finally {
+ pool.put(table);
+ }
+ }
+
+ public String getID() {
+ return id;
+ }
+
+ public void close() {
+ }
+
+ public boolean hasNext() {
+ if (rowI != null && rowI.hasNext()) {
+ return true;
+ }
+ if (cached != null) {
+ return true;
+ }
+ try {
+ Result result = scanner.next();
+ if (result != null && !result.isEmpty()) {
+ cached = result;
+ }
+ } catch (UnknownScannerException e) {
+ throw new IllegalArgumentException(e);
+ } catch (IOException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ }
+ return cached != null;
+ }
+
+ public KeyValue next() {
+ boolean loop;
+ do {
+ loop = false;
+ if (rowI != null) {
+ if (rowI.hasNext()) {
+ return rowI.next();
+ } else {
+ rowI = null;
+ }
+ }
+ if (cached != null) {
+ rowI = cached.list().iterator();
+ loop = true;
+ cached = null;
+ } else {
+ Result result = null;
+ try {
+ result = scanner.next();
+ } catch (UnknownScannerException e) {
+ throw new IllegalArgumentException(e);
+ } catch (IOException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ }
+ if (result != null && !result.isEmpty()) {
+ rowI = result.list().iterator();
+ loop = true;
+ }
+ }
+ } while (loop);
+ return null;
+ }
+
+ public void remove() {
+ throw new UnsupportedOperationException("remove not supported");
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/SchemaResource.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/SchemaResource.java
new file mode 100644
index 00000000000..3a6fbde18fb
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/SchemaResource.java
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+import java.util.Map;
+
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+import javax.xml.namespace.QName;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HTablePool;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
+import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class SchemaResource implements Constants {
+ private static final Log LOG = LogFactory.getLog(SchemaResource.class);
+
+ private String table;
+ private CacheControl cacheControl;
+
+ public SchemaResource(String table) {
+ this.table = table;
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ private HTableDescriptor getTableSchema() throws IOException,
+ TableNotFoundException {
+ HTablePool pool = RESTServlet.getInstance().getTablePool(this.table);
+ HTable table = pool.get();
+ try {
+ return table.getTableDescriptor();
+ } finally {
+ pool.put(table);
+ }
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
+ MIMETYPE_PROTOBUF})
+ public Response get(@Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ try {
+ HTableDescriptor htd = getTableSchema();
+ TableSchemaModel model = new TableSchemaModel();
+ model.setName(htd.getNameAsString());
+ for (Map.Entry e:
+ htd.getValues().entrySet()) {
+ model.addAttribute(Bytes.toString(e.getKey().get()),
+ Bytes.toString(e.getValue().get()));
+ }
+ for (HColumnDescriptor hcd: htd.getFamilies()) {
+ ColumnSchemaModel columnModel = new ColumnSchemaModel();
+ columnModel.setName(hcd.getNameAsString());
+ for (Map.Entry e:
+ hcd.getValues().entrySet()) {
+ columnModel.addAttribute(Bytes.toString(e.getKey().get()),
+ Bytes.toString(e.getValue().get()));
+ }
+ model.addColumnFamily(columnModel);
+ }
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ return response.build();
+ } catch (TableNotFoundException e) {
+ throw new WebApplicationException(Response.Status.NOT_FOUND);
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+
+ private Response update(TableSchemaModel model, boolean replace,
+ UriInfo uriInfo) {
+ // NOTE: 'replace' is currently ignored... we always replace the schema
+ try {
+ HTableDescriptor htd = new HTableDescriptor(table);
+ for (Map.Entry e: model.getAny().entrySet()) {
+ htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+ }
+ for (ColumnSchemaModel family: model.getColumns()) {
+ HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
+ for (Map.Entry e: family.getAny().entrySet()) {
+ hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+ }
+ htd.addFamily(hcd);
+ }
+ RESTServlet server = RESTServlet.getInstance();
+ HBaseAdmin admin = new HBaseAdmin(server.getConfiguration());
+ if (admin.tableExists(table)) {
+ admin.disableTable(table);
+ admin.modifyTable(Bytes.toBytes(table), htd);
+ server.invalidateMaxAge(table);
+ admin.enableTable(table);
+ return Response.ok().build();
+ } else {
+ admin.createTable(htd);
+ return Response.created(uriInfo.getAbsolutePath()).build();
+ }
+ } catch (TableExistsException e) {
+ // race, someone else created a table with the same name
+ throw new WebApplicationException(e, Response.Status.NOT_MODIFIED);
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+
+ @PUT
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
+ MIMETYPE_PROTOBUF})
+ public Response put(TableSchemaModel model, @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("PUT " + uriInfo.getAbsolutePath());
+ }
+ return update(model, true, uriInfo);
+ }
+
+ @POST
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
+ MIMETYPE_PROTOBUF})
+ public Response post(TableSchemaModel model, @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("PUT " + uriInfo.getAbsolutePath());
+ }
+ return update(model, false, uriInfo);
+ }
+
+ @DELETE
+ public Response delete(@Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("DELETE " + uriInfo.getAbsolutePath());
+ }
+ try {
+ HBaseAdmin admin =
+ new HBaseAdmin(RESTServlet.getInstance().getConfiguration());
+ admin.disableTable(table);
+ admin.deleteTable(table);
+ return Response.ok().build();
+ } catch (TableNotFoundException e) {
+ throw new WebApplicationException(Response.Status.NOT_FOUND);
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/StorageClusterStatusResource.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/StorageClusterStatusResource.java
new file mode 100644
index 00000000000..51e583ae211
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/StorageClusterStatusResource.java
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.HServerLoad;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.stargate.model.StorageClusterStatusModel;
+
+@Path(Constants.PATH_STATUS_CLUSTER)
+public class StorageClusterStatusResource implements Constants {
+ private static final Log LOG =
+ LogFactory.getLog(StorageClusterStatusResource.class);
+
+ private CacheControl cacheControl;
+
+ public StorageClusterStatusResource() {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
+ MIMETYPE_PROTOBUF})
+ public Response get(@Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ try {
+ RESTServlet server = RESTServlet.getInstance();
+ HBaseAdmin admin = new HBaseAdmin(server.getConfiguration());
+ ClusterStatus status = admin.getClusterStatus();
+ StorageClusterStatusModel model = new StorageClusterStatusModel();
+ model.setRegions(status.getRegionsCount());
+ model.setRequests(status.getRequestsCount());
+ model.setAverageLoad(status.getAverageLoad());
+ for (HServerInfo info: status.getServerInfo()) {
+ StorageClusterStatusModel.Node node =
+ model.addLiveNode(
+ info.getServerAddress().getHostname() + ":" +
+ Integer.toString(info.getServerAddress().getPort()),
+ info.getStartCode());
+ HServerLoad load = info.getLoad();
+ node.setRequests(load.getNumberOfRequests());
+ for (HServerLoad.RegionLoad region: load.getRegionsLoad()) {
+ node.addRegion(region.getName());
+ }
+ }
+ for (String name: status.getDeadServerNames()) {
+ model.addDeadNode(name);
+ }
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ return response.build();
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/StorageClusterVersionResource.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/StorageClusterVersionResource.java
new file mode 100644
index 00000000000..ac164ed2ae3
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/StorageClusterVersionResource.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import javax.ws.rs.core.Response.ResponseBuilder;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.stargate.model.StorageClusterVersionModel;
+
+@Path(Constants.PATH_VERSION_CLUSTER)
+public class StorageClusterVersionResource implements Constants {
+ private static final Log LOG =
+ LogFactory.getLog(StorageClusterVersionResource.class);
+
+ private CacheControl cacheControl;
+
+ public StorageClusterVersionResource() {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
+ MIMETYPE_PROTOBUF})
+ public Response get(@Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ try {
+ RESTServlet server = RESTServlet.getInstance();
+ HBaseAdmin admin = new HBaseAdmin(server.getConfiguration());
+ StorageClusterVersionModel model = new StorageClusterVersionModel();
+ model.setVersion(admin.getClusterStatus().getHBaseVersion());
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ return response.build();
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/TableResource.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/TableResource.java
new file mode 100644
index 00000000000..1e99393866a
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/TableResource.java
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import javax.ws.rs.core.Response.ResponseBuilder;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.stargate.model.TableListModel;
+import org.apache.hadoop.hbase.stargate.model.TableModel;
+
+@Path("/")
+public class TableResource implements Constants {
+ private static final Log LOG = LogFactory.getLog(TableResource.class);
+
+ private CacheControl cacheControl;
+
+ public TableResource() {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ private HTableDescriptor[] getTableList() throws IOException {
+ HBaseAdmin admin =
+ new HBaseAdmin(RESTServlet.getInstance().getConfiguration());
+ HTableDescriptor[] list = admin.listTables();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("getTableList:");
+ for (HTableDescriptor htd: list) {
+ LOG.debug(htd.toString());
+ }
+ }
+ return list;
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
+ MIMETYPE_PROTOBUF})
+ public Response get(@Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ try {
+ TableListModel tableList = new TableListModel();
+ for (HTableDescriptor htd: getTableList()) {
+ if (htd.isMetaRegion()) {
+ continue;
+ }
+ tableList.add(new TableModel(htd.getNameAsString()));
+ }
+ ResponseBuilder response = Response.ok(tableList);
+ response.cacheControl(cacheControl);
+ return response.build();
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+
+ @Path("{table}/regions")
+ public RegionsResource getRegionsResource(
+ @PathParam("table") String table) {
+ return new RegionsResource(table);
+ }
+
+ @Path("{table}/scanner")
+ public ScannerResource getScannerResource(
+ @PathParam("table") String table) {
+ return new ScannerResource(table);
+ }
+
+ @Path("{table}/schema")
+ public SchemaResource getSchemaResource(
+ @PathParam("table") String table) {
+ return new SchemaResource(table);
+ }
+
+ @Path("{table}/{rowspec: .+}")
+ public RowResource getRowResource(
+ @PathParam("table") String table,
+ @PathParam("rowspec") String rowspec,
+ @QueryParam("v") String versions) {
+ try {
+ return new RowResource(table, rowspec, versions);
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.INTERNAL_SERVER_ERROR);
+ }
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/VersionResource.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/VersionResource.java
new file mode 100644
index 00000000000..798f94564c0
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/VersionResource.java
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import javax.servlet.ServletContext;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import javax.ws.rs.core.Response.ResponseBuilder;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.stargate.model.VersionModel;
+
+@Path(Constants.PATH_VERSION)
+public class VersionResource implements Constants {
+ private static final Log LOG = LogFactory.getLog(VersionResource.class);
+
+ private CacheControl cacheControl;
+
+ public VersionResource() {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
+ MIMETYPE_PROTOBUF})
+ public Response get(@Context ServletContext context, @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ ResponseBuilder response = Response.ok(new VersionModel(context));
+ response.cacheControl(cacheControl);
+ return response.build();
+ }
+
+ // "/version/stargate" is an alias for "/version"
+ @Path("stargate")
+ public VersionResource getVersionResource() {
+ return this;
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/client/Client.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/client/Client.java
new file mode 100644
index 00000000000..73695c9e6ed
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/client/Client.java
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.client;
+
+import java.io.IOException;
+
+import org.apache.commons.httpclient.Header;
+import org.apache.commons.httpclient.HttpClient;
+import org.apache.commons.httpclient.HttpMethod;
+import org.apache.commons.httpclient.HttpVersion;
+import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager;
+import org.apache.commons.httpclient.URI;
+import org.apache.commons.httpclient.methods.ByteArrayRequestEntity;
+import org.apache.commons.httpclient.methods.DeleteMethod;
+import org.apache.commons.httpclient.methods.GetMethod;
+import org.apache.commons.httpclient.methods.HeadMethod;
+import org.apache.commons.httpclient.methods.PostMethod;
+import org.apache.commons.httpclient.methods.PutMethod;
+import org.apache.commons.httpclient.params.HttpClientParams;
+import org.apache.commons.httpclient.params.HttpConnectionManagerParams;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+public class Client {
+ public static final Header[] EMPTY_HEADER_ARRAY = new Header[0];
+
+ private static final Log LOG = LogFactory.getLog(Client.class);
+
+ private HttpClient httpClient;
+ private Cluster cluster;
+
+ public Client() {
+ this(null);
+ }
+
+ public Client(Cluster cluster) {
+ this.cluster = cluster;
+ httpClient = new HttpClient(new MultiThreadedHttpConnectionManager());
+ HttpConnectionManagerParams managerParams =
+ httpClient.getHttpConnectionManager().getParams();
+ managerParams.setConnectionTimeout(2000); // 2 s
+ HttpClientParams clientParams = httpClient.getParams();
+ clientParams.setVersion(HttpVersion.HTTP_1_1);
+ }
+
+ public void shutdown() {
+ MultiThreadedHttpConnectionManager manager =
+ (MultiThreadedHttpConnectionManager) httpClient.getHttpConnectionManager();
+ manager.shutdown();
+ }
+
+ @SuppressWarnings("deprecation")
+ public int executePathOnly(Cluster c, HttpMethod method, Header[] headers,
+ String path) throws IOException {
+ IOException lastException;
+ if (c.nodes.size() < 1) {
+ throw new IOException("Cluster is empty");
+ }
+ int start = (int)Math.round((c.nodes.size() - 1) * Math.random());
+ int i = start;
+ do {
+ c.lastHost = c.nodes.get(i);
+ try {
+ StringBuffer sb = new StringBuffer();
+ sb.append("http://");
+ sb.append(c.lastHost);
+ sb.append(path);
+ URI uri = new URI(sb.toString());
+ return executeURI(method, headers, uri.toString());
+ } catch (IOException e) {
+ lastException = e;
+ }
+ } while (++i != start && i < c.nodes.size());
+ throw lastException;
+ }
+
+ @SuppressWarnings("deprecation")
+ public int executeURI(HttpMethod method, Header[] headers, String uri)
+ throws IOException {
+ method.setURI(new URI(uri));
+ if (headers != null) {
+ for (Header header: headers) {
+ method.addRequestHeader(header);
+ }
+ }
+ long startTime = System.currentTimeMillis();
+ int code = httpClient.executeMethod(method);
+ long endTime = System.currentTimeMillis();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(method.getName() + " " + uri + ": " + code + " " +
+ method.getStatusText() + " in " + (endTime - startTime) + " ms");
+ }
+ return code;
+ }
+
+ public int execute(Cluster c, HttpMethod method, Header[] headers,
+ String path) throws IOException {
+ if (path.startsWith("/")) {
+ return executePathOnly(c, method, headers, path);
+ }
+ return executeURI(method, headers, path);
+ }
+
+ public Cluster getCluster() {
+ return cluster;
+ }
+
+ public void setCluster(Cluster cluster) {
+ this.cluster = cluster;
+ }
+
+ public Response head(String path) throws IOException {
+ return head(cluster, path);
+ }
+
+ public Response head(Cluster c, String path) throws IOException {
+ HeadMethod method = new HeadMethod();
+ int code = execute(c, method, null, path);
+ Header[] headers = method.getResponseHeaders();
+ method.releaseConnection();
+ return new Response(code, headers, null);
+ }
+
+ public Response get(String path) throws IOException {
+ return get(cluster, path);
+ }
+
+ public Response get(Cluster c, String path) throws IOException {
+ return get(c, path, EMPTY_HEADER_ARRAY);
+ }
+
+ public Response get(String path, String accept) throws IOException {
+ return get(cluster, path, accept);
+ }
+
+ public Response get(Cluster c, String path, String accept)
+ throws IOException {
+ Header[] headers = new Header[1];
+ headers[0] = new Header("Accept", accept);
+ return get(c, path, headers);
+ }
+
+ public Response get(String path, Header[] headers) throws IOException {
+ return get(cluster, path, headers);
+ }
+
+ public Response get(Cluster c, String path, Header[] headers)
+ throws IOException {
+ GetMethod method = new GetMethod();
+ int code = execute(c, method, headers, path);
+ headers = method.getResponseHeaders();
+ byte[] body = method.getResponseBody();
+ method.releaseConnection();
+ return new Response(code, headers, body);
+ }
+
+ public Response put(String path, String contentType, byte[] content)
+ throws IOException {
+ return put(cluster, path, contentType, content);
+ }
+
+ public Response put(Cluster c, String path, String contentType,
+ byte[] content) throws IOException {
+ Header[] headers = new Header[1];
+ headers[0] = new Header("Content-Type", contentType);
+ return put(c, path, headers, content);
+ }
+
+ public Response put(String path, Header[] headers, byte[] body)
+ throws IOException {
+ return put(cluster, path, headers, body);
+ }
+
+ public Response put(Cluster c, String path, Header[] headers,
+ byte[] body) throws IOException {
+ PutMethod method = new PutMethod();
+ method.setRequestEntity(new ByteArrayRequestEntity(body));
+ int code = execute(c, method, headers, path);
+ headers = method.getResponseHeaders();
+ body = method.getResponseBody();
+ method.releaseConnection();
+ return new Response(code, headers, body);
+ }
+
+ public Response post(String path, String contentType, byte[] content)
+ throws IOException {
+ return post(cluster, path, contentType, content);
+ }
+
+ public Response post(Cluster c, String path, String contentType,
+ byte[] content) throws IOException {
+ Header[] headers = new Header[1];
+ headers[0] = new Header("Content-Type", contentType);
+ return post(c, path, headers, content);
+ }
+
+ public Response post(String path, Header[] headers, byte[] content)
+ throws IOException {
+ return post(cluster, path, headers, content);
+ }
+
+ public Response post(Cluster c, String path, Header[] headers,
+ byte[] content) throws IOException {
+ PostMethod method = new PostMethod();
+ method.setRequestEntity(new ByteArrayRequestEntity(content));
+ int code = execute(c, method, headers, path);
+ headers = method.getResponseHeaders();
+ content = method.getResponseBody();
+ method.releaseConnection();
+ return new Response(code, headers, content);
+ }
+
+ public Response delete(String path) throws IOException {
+ return delete(cluster, path);
+ }
+
+ public Response delete(Cluster c, String path) throws IOException {
+ DeleteMethod method = new DeleteMethod();
+ int code = execute(c, method, null, path);
+ Header[] headers = method.getResponseHeaders();
+ method.releaseConnection();
+ return new Response(code, headers);
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/client/Cluster.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/client/Cluster.java
new file mode 100644
index 00000000000..e06d11983a3
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/client/Cluster.java
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.client;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+public class Cluster {
+ protected List nodes =
+ Collections.synchronizedList(new ArrayList());
+ protected String lastHost;
+
+ /**
+ * Constructor
+ */
+ public Cluster() {}
+
+ /**
+ * Constructor
+ * @param nodes a list of service locations, in 'host:port' format
+ */
+ public Cluster(List nodes) {
+ nodes.addAll(nodes);
+ }
+
+ /**
+ * Add a node to the cluster
+ * @param name the service location in 'host:port' format
+ */
+ public Cluster add(String node) {
+ nodes.add(node);
+ return this;
+ }
+
+ /**
+ * Add a node to the cluster
+ * @param name host name
+ * @param port service port
+ */
+ public Cluster add(String name, int port) {
+ StringBuilder sb = new StringBuilder();
+ sb.append(name);
+ sb.append(':');
+ sb.append(port);
+ return add(sb.toString());
+ }
+
+ /**
+ * Remove a node from the cluster
+ * @param name the service location in 'host:port' format
+ */
+ public Cluster remove(String node) {
+ nodes.remove(node);
+ return this;
+ }
+
+ /**
+ * Remove a node from the cluster
+ * @param name host name
+ * @param port service port
+ */
+ public Cluster remove(String name, int port) {
+ StringBuilder sb = new StringBuilder();
+ sb.append(name);
+ sb.append(':');
+ sb.append(port);
+ return remove(sb.toString());
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/client/Response.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/client/Response.java
new file mode 100644
index 00000000000..b537918031e
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/client/Response.java
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.client;
+
+import org.apache.commons.httpclient.Header;
+
+public class Response {
+ private int code;
+ private Header[] headers;
+ private byte[] body;
+
+ /**
+ * Constructor
+ * @param code the HTTP response code
+ */
+ public Response(int code) {
+ this(code, null, null);
+ }
+
+ /**
+ * Constructor
+ * @param code the HTTP response code
+ * @param headers the HTTP response headers
+ */
+ public Response(int code, Header[] headers) {
+ this(code, headers, null);
+ }
+
+ /**
+ * Constructor
+ * @param code the HTTP response code
+ * @param headers the HTTP response headers
+ * @param body the response body, can be null
+ */
+ public Response(int code, Header[] headers, byte[] body) {
+ this.code = code;
+ this.headers = headers;
+ this.body = body;
+ }
+
+ /**
+ * @return the HTTP response code
+ */
+ public int getCode() {
+ return code;
+ }
+
+ /**
+ * @return the HTTP response headers
+ */
+ public Header[] getHeaders() {
+ return headers;
+ }
+
+ /**
+ * @return the value of the Location header
+ */
+ public String getLocation() {
+ for (Header header: headers) {
+ if (header.getName().equals("Location")) {
+ return header.getValue();
+ }
+ }
+ return null;
+ }
+
+ /**
+ * @return true if a response body was sent
+ */
+ public boolean hasBody() {
+ return body != null;
+ }
+
+ /**
+ * @return the HTTP response body
+ */
+ public byte[] getBody() {
+ return body;
+ }
+
+ /**
+ * @param code the HTTP response code
+ */
+ public void setCode(int code) {
+ this.code = code;
+ }
+
+ /**
+ * @param headers the HTTP response headers
+ */
+ public void setHeaders(Header[] headers) {
+ this.headers = headers;
+ }
+
+ /**
+ * @param body the response body
+ */
+ public void setBody(byte[] body) {
+ this.body = body;
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/CellModel.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/CellModel.java
new file mode 100644
index 00000000000..db6c509cc54
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/CellModel.java
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlType;
+import javax.xml.bind.annotation.XmlValue;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell;
+
+import com.google.protobuf.ByteString;
+
+@XmlRootElement(name="Cell")
+@XmlType(propOrder={"column","timestamp"})
+public class CellModel implements IProtobufWrapper, Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private long timestamp = HConstants.LATEST_TIMESTAMP;
+ private byte[] column;
+ private byte[] value;
+
+ public CellModel() {}
+
+ /**
+ * @param column
+ * @param value
+ */
+ public CellModel(byte[] column, byte[] value) {
+ super();
+ this.column = column;
+ this.value = value;
+ }
+
+ /**
+ * @param column
+ * @param timestamp
+ * @param value
+ */
+ public CellModel(byte[] column, long timestamp, byte[] value) {
+ super();
+ this.column = column;
+ this.timestamp = timestamp;
+ this.value = value;
+ }
+
+ /**
+ * @return the column
+ */
+ @XmlAttribute
+ public byte[] getColumn() {
+ return column;
+ }
+
+ /**
+ * @param column the column to set
+ */
+ public void setColumn(byte[] column) {
+ this.column = column;
+ }
+
+ /**
+ * @return true if the timestamp property has been specified by the
+ * user
+ */
+ public boolean hasUserTimestamp() {
+ return timestamp != HConstants.LATEST_TIMESTAMP;
+ }
+
+ /**
+ * @return the timestamp
+ */
+ @XmlAttribute
+ public long getTimestamp() {
+ return timestamp;
+ }
+
+ /**
+ * @param timestamp the timestamp to set
+ */
+ public void setTimestamp(long timestamp) {
+ this.timestamp = timestamp;
+ }
+
+ /**
+ * @return the value
+ */
+ @XmlValue
+ public byte[] getValue() {
+ return value;
+ }
+
+ /**
+ * @param value the value to set
+ */
+ public void setValue(byte[] value) {
+ this.value = value;
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ Cell.Builder builder = Cell.newBuilder();
+ builder.setColumn(ByteString.copyFrom(getColumn()));
+ builder.setData(ByteString.copyFrom(getValue()));
+ if (hasUserTimestamp()) {
+ builder.setTimestamp(getTimestamp());
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public IProtobufWrapper getObjectFromMessage(byte[] message)
+ throws IOException {
+ Cell.Builder builder = Cell.newBuilder();
+ builder.mergeFrom(message);
+ setColumn(builder.getColumn().toByteArray());
+ setValue(builder.getData().toByteArray());
+ if (builder.hasTimestamp()) {
+ setTimestamp(builder.getTimestamp());
+ }
+ return this;
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/CellSetModel.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/CellSetModel.java
new file mode 100644
index 00000000000..55d28f8bc1a
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/CellSetModel.java
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlElement;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet;
+
+import com.google.protobuf.ByteString;
+
+@XmlRootElement(name="CellSet")
+public class CellSetModel implements Serializable, IProtobufWrapper {
+
+ private static final long serialVersionUID = 1L;
+
+ private List rows;
+
+
+ public CellSetModel() {
+ this.rows = new ArrayList();
+ }
+
+ /**
+ * @param rows
+ */
+
+ public CellSetModel(List rows) {
+ super();
+ this.rows = rows;
+ }
+
+ /**
+ * Add a row to this cell set
+ *
+ * @param row
+ */
+ public void addRow(RowModel row) {
+ rows.add(row);
+ }
+
+ /**
+ * @return the rows
+ */
+ @XmlElement(name="Row")
+ public List getRows() {
+ return rows;
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ CellSet.Builder builder = CellSet.newBuilder();
+ for (RowModel row: getRows()) {
+ CellSet.Row.Builder rowBuilder = CellSet.Row.newBuilder();
+ rowBuilder.setKey(ByteString.copyFrom(row.getKey()));
+ for (CellModel cell: row.getCells()) {
+ Cell.Builder cellBuilder = Cell.newBuilder();
+ cellBuilder.setColumn(ByteString.copyFrom(cell.getColumn()));
+ cellBuilder.setData(ByteString.copyFrom(cell.getValue()));
+ if (cell.hasUserTimestamp()) {
+ cellBuilder.setTimestamp(cell.getTimestamp());
+ }
+ rowBuilder.addValues(cellBuilder);
+ }
+ builder.addRows(rowBuilder);
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public IProtobufWrapper getObjectFromMessage(byte[] message)
+ throws IOException {
+ CellSet.Builder builder = CellSet.newBuilder();
+ builder.mergeFrom(message);
+ for (CellSet.Row row: builder.getRowsList()) {
+ RowModel rowModel = new RowModel(row.getKey().toByteArray());
+ for (Cell cell: row.getValuesList()) {
+ long timestamp = HConstants.LATEST_TIMESTAMP;
+ if (cell.hasTimestamp()) {
+ timestamp = cell.getTimestamp();
+ }
+ rowModel.addCell(
+ new CellModel(cell.getColumn().toByteArray(), timestamp,
+ cell.getData().toByteArray()));
+ }
+ addRow(rowModel);
+ }
+ return this;
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ColumnSchemaModel.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ColumnSchemaModel.java
new file mode 100644
index 00000000000..302bc3205be
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ColumnSchemaModel.java
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.xml.bind.annotation.XmlAnyAttribute;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlType;
+import javax.xml.namespace.QName;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+
+@XmlRootElement(name="ColumnSchema")
+@XmlType(propOrder = {"name"})
+public class ColumnSchemaModel implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private static QName BLOCKCACHE = new QName(HColumnDescriptor.BLOCKCACHE);
+ private static QName BLOCKSIZE = new QName(HColumnDescriptor.BLOCKSIZE);
+ private static QName BLOOMFILTER = new QName(HColumnDescriptor.BLOOMFILTER);
+ private static QName COMPRESSION = new QName(HColumnDescriptor.COMPRESSION);
+ private static QName IN_MEMORY = new QName(HConstants.IN_MEMORY);
+ private static QName TTL = new QName(HColumnDescriptor.TTL);
+ private static QName VERSIONS = new QName(HConstants.VERSIONS);
+
+ private String name;
+ private Map attrs = new HashMap();
+
+ public ColumnSchemaModel() {}
+
+ public void addAttribute(String name, Object value) {
+ attrs.put(new QName(name), value);
+ }
+
+ public String getAttribute(String name) {
+ return attrs.get(new QName(name)).toString();
+ }
+
+ /**
+ * @return the column name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return the map for holding unspecified (user) attributes
+ */
+ @XmlAnyAttribute
+ public Map getAny() {
+ return attrs;
+ }
+
+ /**
+ * @param the table name
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("{ NAME => '");
+ sb.append(name);
+ sb.append('\'');
+ for (Map.Entry e: attrs.entrySet()) {
+ sb.append(", ");
+ sb.append(e.getKey().getLocalPart());
+ sb.append(" => '");
+ sb.append(e.getValue().toString());
+ sb.append('\'');
+ }
+ sb.append(" }");
+ return sb.toString();
+ }
+
+ // getters and setters for common schema attributes
+
+ // cannot be standard bean type getters and setters, otherwise this would
+ // confuse JAXB
+
+ public boolean __getBlockcache() {
+ Object o = attrs.get(BLOCKCACHE);
+ return o != null ?
+ Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKCACHE;
+ }
+
+ public int __getBlocksize() {
+ Object o = attrs.get(BLOCKSIZE);
+ return o != null ?
+ Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKSIZE;
+ }
+
+ public boolean __getBloomfilter() {
+ Object o = attrs.get(BLOOMFILTER);
+ return o != null ?
+ Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOOMFILTER;
+ }
+
+ public String __getCompression() {
+ Object o = attrs.get(COMPRESSION);
+ return o != null ? o.toString() : HColumnDescriptor.DEFAULT_COMPRESSION;
+ }
+
+ public boolean __getInMemory() {
+ Object o = attrs.get(IN_MEMORY);
+ return o != null ?
+ Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_IN_MEMORY;
+ }
+
+ public int __getTTL() {
+ Object o = attrs.get(TTL);
+ return o != null ?
+ Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_TTL;
+ }
+
+ public int __getVersions() {
+ Object o = attrs.get(VERSIONS);
+ return o != null ?
+ Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_VERSIONS;
+ }
+
+ public void __setBlocksize(int value) {
+ attrs.put(BLOCKSIZE, Integer.toString(value));
+ }
+
+ public void __setBlockcache(boolean value) {
+ attrs.put(BLOCKCACHE, Boolean.toString(value));
+ }
+
+ public void __setBloomfilter(boolean value) {
+ attrs.put(BLOOMFILTER, Boolean.toString(value));
+ }
+
+ public void __setCompression(String value) {
+ attrs.put(COMPRESSION, value);
+ }
+
+ public void __setInMemory(boolean value) {
+ attrs.put(IN_MEMORY, Boolean.toString(value));
+ }
+
+ public void __setTTL(int value) {
+ attrs.put(TTL, Integer.toString(value));
+ }
+
+ public void __setVersions(int value) {
+ attrs.put(VERSIONS, Integer.toString(value));
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/IProtobufWrapper.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/IProtobufWrapper.java
new file mode 100644
index 00000000000..14d2dc5c73b
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/IProtobufWrapper.java
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+
+public abstract interface IProtobufWrapper {
+ public byte[] createProtobufOutput();
+ public IProtobufWrapper getObjectFromMessage(byte[] message)
+ throws IOException;
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/RowModel.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/RowModel.java
new file mode 100644
index 00000000000..29c2d00d6f5
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/RowModel.java
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+@XmlRootElement(name="Row")
+public class RowModel implements IProtobufWrapper, Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private byte[] key;
+ private List cells = new ArrayList();
+
+ public RowModel() { }
+
+ /**
+ * @param key
+ */
+ public RowModel(final String key) {
+ this(key.getBytes());
+ }
+
+ /**
+ * @param key
+ */
+ public RowModel(final byte[] key) {
+ super();
+ this.key = key;
+ cells = new ArrayList();
+ }
+
+ /**
+ * @param key
+ * @param cells
+ */
+ public RowModel(final String key, final List cells) {
+ this(key.getBytes(), cells);
+ }
+
+ /**
+ * @param key
+ * @param cells
+ */
+ public RowModel(final byte[] key, final List cells) {
+ super();
+ this.key = key;
+ this.cells = cells;
+ }
+
+ /**
+ * Adds a cell to the list of cells for this row
+ *
+ * @param cell
+ */
+ public void addCell(CellModel cell) {
+ cells.add(cell);
+ }
+
+ /**
+ * @return the key
+ */
+ @XmlAttribute
+ public byte[] getKey() {
+ return key;
+ }
+
+ /**
+ * @param key the key to set
+ */
+ public void setKey(byte[] key) {
+ this.key = key;
+ }
+
+ /**
+ * @return the cells
+ */
+ @XmlElement(name="Cell")
+ public List getCells() {
+ return cells;
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ // there is no standalone row protobuf message
+ throw new UnsupportedOperationException(
+ "no protobuf equivalent to RowModel");
+ }
+
+ @Override
+ public IProtobufWrapper getObjectFromMessage(byte[] message)
+ throws IOException {
+ // there is no standalone row protobuf message
+ throw new UnsupportedOperationException(
+ "no protobuf equivalent to RowModel");
+ }
+
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ScannerModel.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ScannerModel.java
new file mode 100644
index 00000000000..4e09754b262
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/ScannerModel.java
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.google.protobuf.ByteString;
+
+@XmlRootElement(name="Scanner")
+public class ScannerModel implements IProtobufWrapper, Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private byte[] startRow = HConstants.EMPTY_START_ROW;
+ private byte[] endRow = HConstants.EMPTY_END_ROW;;
+ private List columns = new ArrayList();
+ private int batch = 1;
+ private long startTime = 0;
+ private long endTime = Long.MAX_VALUE;
+
+ public ScannerModel() {}
+
+ public ScannerModel(byte[] startRow, byte[] endRow, List columns,
+ int batch, long endTime) {
+ super();
+ this.startRow = startRow;
+ this.endRow = endRow;
+ this.columns = columns;
+ this.batch = batch;
+ this.endTime = endTime;
+ }
+
+ public ScannerModel(byte[] startRow, byte[] endRow, List columns,
+ int batch, long startTime, long endTime) {
+ super();
+ this.startRow = startRow;
+ this.endRow = endRow;
+ this.columns = columns;
+ this.batch = batch;
+ this.startTime = startTime;
+ this.endTime = endTime;
+ }
+
+ public void addColumn(byte[] column) {
+ columns.add(column);
+ }
+
+ /**
+ * @return true if a start row was specified
+ */
+ public boolean hasStartRow() {
+ return !Bytes.equals(startRow, HConstants.EMPTY_START_ROW);
+ }
+
+ /**
+ * @return start row
+ */
+ @XmlAttribute
+ public byte[] getStartRow() {
+ return startRow;
+ }
+
+ /**
+ * @return true if an end row was specified
+ */
+ public boolean hasEndRow() {
+ return !Bytes.equals(endRow, HConstants.EMPTY_END_ROW);
+ }
+
+ /**
+ * @return end row
+ */
+ @XmlAttribute
+ public byte[] getEndRow() {
+ return endRow;
+ }
+
+ /**
+ * @return list of columns of interest, or empty for all
+ */
+ @XmlAttribute(name="column")
+ public List getColumns() {
+ return columns;
+ }
+
+ /**
+ * @return the number of cells to return in batch
+ */
+ @XmlAttribute
+ public int getBatch() {
+ return batch;
+ }
+
+ /**
+ * @return the lower bound on timestamps of items of interest
+ */
+ @XmlAttribute
+ public long getStartTime() {
+ return startTime;
+ }
+
+ /**
+ * @return the upper bound on timestamps of items of interest
+ */
+ @XmlAttribute
+ public long getEndTime() {
+ return endTime;
+ }
+
+ /**
+ * @param startRow start row
+ */
+ public void setStartRow(byte[] startRow) {
+ this.startRow = startRow;
+ }
+
+ /**
+ * @param endRow end row
+ */
+ public void setEndRow(byte[] endRow) {
+ this.endRow = endRow;
+ }
+
+ /**
+ * @param batch the number of cells to return in batch
+ */
+ public void setBatch(int batch) {
+ this.batch = batch;
+ }
+
+ /**
+ * @param startTime the lower bound on timestamps of items of interest
+ */
+ public void setStartTime(long startTime) {
+ this.startTime = startTime;
+ }
+
+ /**
+ * @param endTime the upper bound on timestamps of items of interest
+ */
+ public void setEndTime(long endTime) {
+ this.endTime = endTime;
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ Scanner.Builder builder = Scanner.newBuilder();
+ if (!Bytes.equals(startRow, HConstants.EMPTY_START_ROW)) {
+ builder.setStartRow(ByteString.copyFrom(startRow));
+ }
+ if (!Bytes.equals(endRow, HConstants.EMPTY_START_ROW)) {
+ builder.setEndRow(ByteString.copyFrom(endRow));
+ }
+ for (byte[] column: columns) {
+ builder.addColumns(ByteString.copyFrom(column));
+ }
+ builder.setBatch(batch);
+ if (startTime != 0) {
+ builder.setStartTime(startTime);
+ }
+ if (endTime != 0) {
+ builder.setEndTime(endTime);
+ }
+ builder.setBatch(getBatch());
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public IProtobufWrapper getObjectFromMessage(byte[] message)
+ throws IOException {
+ Scanner.Builder builder = Scanner.newBuilder();
+ builder.mergeFrom(message);
+ if (builder.hasStartRow()) {
+ startRow = builder.getStartRow().toByteArray();
+ }
+ if (builder.hasEndRow()) {
+ endRow = builder.getEndRow().toByteArray();
+ }
+ for (ByteString column: builder.getColumnsList()) {
+ addColumn(column.toByteArray());
+ }
+ if (builder.hasBatch()) {
+ batch = builder.getBatch();
+ }
+ if (builder.hasStartTime()) {
+ startTime = builder.getStartTime();
+ }
+ if (builder.hasEndTime()) {
+ endTime = builder.getEndTime();
+ }
+ return this;
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/StorageClusterStatusModel.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/StorageClusterStatusModel.java
new file mode 100644
index 00000000000..7dbf842e4ee
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/StorageClusterStatusModel.java
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlElementWrapper;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.google.protobuf.ByteString;
+
+@XmlRootElement(name="ClusterStatus")
+public class StorageClusterStatusModel
+ implements Serializable, IProtobufWrapper {
+ private static final long serialVersionUID = 1L;
+
+ public static class Node {
+
+ public static class Region {
+ private byte[] name;
+
+ public Region() {}
+
+ public Region(byte[] name) {
+ this.name = name;
+ }
+
+ @XmlAttribute
+ public byte[] getName() {
+ return name;
+ }
+
+ public void setName(byte[] name) {
+ this.name = name;
+ }
+ }
+
+ private String name;
+ private long startCode;
+ private int requests;
+ private List regions = new ArrayList();
+
+ public void addRegion(byte[] name) {
+ regions.add(new Region(name));
+ }
+
+ public Region getRegion(int i) {
+ return regions.get(i);
+ }
+
+ public Node() {}
+
+ public Node(String name, long startCode) {
+ this.name = name;
+ this.startCode = startCode;
+ }
+
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ @XmlAttribute
+ public long getStartCode() {
+ return startCode;
+ }
+
+ @XmlElement(name="Region")
+ public List getRegions() {
+ return regions;
+ }
+
+ @XmlAttribute
+ public int getRequests() {
+ return requests;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public void setStartCode(long startCode) {
+ this.startCode = startCode;
+ }
+
+ public void setRegions(List regions) {
+ this.regions = regions;
+ }
+
+ public void setRequests(int requests) {
+ this.requests = requests;
+ }
+ }
+
+ private List liveNodes = new ArrayList();
+ private List deadNodes = new ArrayList();
+ private int regions;
+ private int requests;
+ private double averageLoad;
+
+ public Node addLiveNode(String name, long startCode) {
+ Node node = new Node(name, startCode);
+ liveNodes.add(node);
+ return node;
+ }
+
+ public Node getLiveNode(int i) {
+ return liveNodes.get(i);
+ }
+
+ public void addDeadNode(String node) {
+ deadNodes.add(node);
+ }
+
+ public String getDeadNode(int i) {
+ return deadNodes.get(i);
+ }
+
+ public StorageClusterStatusModel() {}
+
+ @XmlElement(name="Node")
+ @XmlElementWrapper(name="LiveNodes")
+ public List getLiveNodes() {
+ return liveNodes;
+ }
+
+ @XmlElement(name="Node")
+ @XmlElementWrapper(name="DeadNodes")
+ public List getDeadNodes() {
+ return deadNodes;
+ }
+
+ @XmlAttribute
+ public int getRegions() {
+ return regions;
+ }
+
+ @XmlAttribute
+ public int getRequests() {
+ return requests;
+ }
+
+ @XmlAttribute
+ public double getAverageLoad() {
+ return averageLoad;
+ }
+
+ public void setLiveNodes(List nodes) {
+ this.liveNodes = nodes;
+ }
+
+ public void setDeadNodes(List nodes) {
+ this.deadNodes = nodes;
+ }
+
+ public void setRegions(int regions) {
+ this.regions = regions;
+ }
+
+ public void setRequests(int requests) {
+ this.requests = requests;
+ }
+
+ public void setAverageLoad(double averageLoad) {
+ this.averageLoad = averageLoad;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(String.format("%d live servers, %d dead servers, " +
+ "%.4f average load\n\n", liveNodes.size(), deadNodes.size(),
+ averageLoad));
+ if (!liveNodes.isEmpty()) {
+ sb.append(liveNodes.size());
+ sb.append(" live servers\n");
+ for (Node node: liveNodes) {
+ sb.append(" ");
+ sb.append(node.name);
+ sb.append(' ');
+ sb.append(node.startCode);
+ sb.append("\n requests=");
+ sb.append(node.requests);
+ sb.append(", regions=");
+ sb.append(node.regions.size());
+ sb.append("\n\n");
+ for (Node.Region region: node.regions) {
+ sb.append(" ");
+ sb.append(Bytes.toString(region.name));
+ sb.append('\n');
+ }
+ sb.append('\n');
+ }
+ }
+ if (!deadNodes.isEmpty()) {
+ sb.append('\n');
+ sb.append(deadNodes.size());
+ sb.append(" dead servers\n");
+ for (String node: deadNodes) {
+ sb.append(" ");
+ sb.append(node);
+ sb.append('\n');
+ }
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder();
+ builder.setRegions(regions);
+ builder.setRequests(requests);
+ builder.setAverageLoad(averageLoad);
+ for (Node node: liveNodes) {
+ StorageClusterStatus.Node.Builder nodeBuilder =
+ StorageClusterStatus.Node.newBuilder();
+ nodeBuilder.setName(node.name);
+ nodeBuilder.setStartCode(node.startCode);
+ nodeBuilder.setRequests(node.requests);
+ for (Node.Region region: node.regions) {
+ nodeBuilder.addRegions(ByteString.copyFrom(region.name));
+ }
+ builder.addLiveNodes(nodeBuilder);
+ }
+ for (String node: deadNodes) {
+ builder.addDeadNodes(node);
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public IProtobufWrapper getObjectFromMessage(byte[] message)
+ throws IOException {
+ StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder();
+ builder.mergeFrom(message);
+ if (builder.hasRegions()) {
+ regions = builder.getRegions();
+ }
+ if (builder.hasRequests()) {
+ requests = builder.getRequests();
+ }
+ if (builder.hasAverageLoad()) {
+ averageLoad = builder.getAverageLoad();
+ }
+ for (StorageClusterStatus.Node node: builder.getLiveNodesList()) {
+ long startCode = node.hasStartCode() ? node.getStartCode() : -1;
+ StorageClusterStatusModel.Node nodeModel =
+ addLiveNode(node.getName(), startCode);
+ int requests = node.hasRequests() ? node.getRequests() : 0;
+ nodeModel.setRequests(requests);
+ for (ByteString region: node.getRegionsList()) {
+ nodeModel.addRegion(region.toByteArray());
+ }
+ }
+ for (String node: builder.getDeadNodesList()) {
+ addDeadNode(node);
+ }
+ return this;
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/StorageClusterVersionModel.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/StorageClusterVersionModel.java
new file mode 100644
index 00000000000..81200e7ae43
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/StorageClusterVersionModel.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlValue;
+
+@XmlRootElement(name="ClusterVersion")
+public class StorageClusterVersionModel implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private String version;
+
+ @XmlValue
+ public String getVersion() {
+ return version;
+ }
+
+ public void setVersion(String version) {
+ this.version = version;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return version;
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableInfoModel.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableInfoModel.java
new file mode 100644
index 00000000000..c4a8d8e464b
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableInfoModel.java
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlType;
+
+import org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo;
+
+import com.google.protobuf.ByteString;
+
+@XmlRootElement(name="TableInfo")
+@XmlType(propOrder = {"name","regions"})
+public class TableInfoModel implements Serializable, IProtobufWrapper {
+ private static final long serialVersionUID = 1L;
+
+ private String name;
+ private List regions = new ArrayList();
+
+ public TableInfoModel() {}
+
+ public TableInfoModel(String name) {
+ this.name = name;
+ }
+
+ public void add(TableRegionModel object) {
+ regions.add(object);
+ }
+
+ public TableRegionModel get(int index) {
+ return regions.get(index);
+ }
+
+ /**
+ * @return the table name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return the regions
+ */
+ @XmlElement(name="Region")
+ public List getRegions() {
+ return regions;
+ }
+
+ /**
+ * @param the table name
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /**
+ * @param regions the regions to set
+ */
+ public void setRegions(List regions) {
+ this.regions = regions;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ for(TableRegionModel aRegion : regions) {
+ sb.append(aRegion.toString());
+ sb.append('\n');
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ TableInfo.Builder builder = TableInfo.newBuilder();
+ builder.setName(name);
+ for (TableRegionModel aRegion: regions) {
+ TableInfo.Region.Builder regionBuilder = TableInfo.Region.newBuilder();
+ regionBuilder.setName(aRegion.getName());
+ regionBuilder.setId(aRegion.getId());
+ regionBuilder.setStartKey(ByteString.copyFrom(aRegion.getStartKey()));
+ regionBuilder.setEndKey(ByteString.copyFrom(aRegion.getEndKey()));
+ regionBuilder.setLocation(aRegion.getLocation());
+ builder.addRegions(regionBuilder);
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public IProtobufWrapper getObjectFromMessage(byte[] message)
+ throws IOException {
+ TableInfo.Builder builder = TableInfo.newBuilder();
+ builder.mergeFrom(message);
+ setName(builder.getName());
+ for (TableInfo.Region region: builder.getRegionsList()) {
+ add(new TableRegionModel(builder.getName(), region.getId(),
+ region.getStartKey().toByteArray(),
+ region.getEndKey().toByteArray(),
+ region.getLocation()));
+ }
+ return this;
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableListModel.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableListModel.java
new file mode 100644
index 00000000000..187ba375989
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableListModel.java
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlElementRef;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList;
+
+@XmlRootElement(name="TableList")
+public class TableListModel implements Serializable, IProtobufWrapper {
+
+ private static final long serialVersionUID = 1L;
+
+ private List tables = new ArrayList();
+
+ public TableListModel() {}
+
+ public void add(TableModel object) {
+ tables.add(object);
+ }
+
+ public TableModel get(int index) {
+ return tables.get(index);
+ }
+
+ /**
+ * @return the tables
+ */
+ @XmlElementRef(name="table")
+ public List getTables() {
+ return tables;
+ }
+
+ /**
+ * @param tables the tables to set
+ */
+ public void setTables(List tables) {
+ this.tables = tables;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ for(TableModel aTable : tables) {
+ sb.append(aTable.toString());
+ sb.append('\n');
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ TableList.Builder builder = TableList.newBuilder();
+ for (TableModel aTable : tables) {
+ builder.addName(aTable.getName());
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public IProtobufWrapper getObjectFromMessage(byte[] message)
+ throws IOException {
+ TableList.Builder builder = TableList.newBuilder();
+ builder.mergeFrom(message);
+ for (String table: builder.getNameList()) {
+ this.add(new TableModel(table));
+ }
+ return this;
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableModel.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableModel.java
new file mode 100644
index 00000000000..7edbd76d96f
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableModel.java
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+
+@XmlRootElement(name="table")
+public class TableModel implements Serializable {
+
+ private static final long serialVersionUID = 1L;
+
+ private String name;
+
+ public TableModel() {}
+
+ /**
+ * @param name
+ */
+ public TableModel(String name) {
+ super();
+ this.name = name;
+ }
+
+ /**
+ * @return the name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @param name the name to set
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return this.name;
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableRegionModel.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableRegionModel.java
new file mode 100644
index 00000000000..8dc5254047a
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableRegionModel.java
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlType;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+@XmlRootElement(name="Region")
+@XmlType(propOrder = {"name","id","startKey","endKey","location"})
+public class TableRegionModel implements Serializable {
+
+ private static final long serialVersionUID = 1L;
+
+ private String table;
+ private long id;
+ private byte[] startKey;
+ private byte[] endKey;
+ private String location;
+
+ /**
+ * Constructor
+ */
+ public TableRegionModel() {}
+
+ /**
+ * Constructor
+ * @param table the table name
+ * @param id the encoded id of the region
+ * @param startKey the start key of the region
+ * @param endKey the end key of the region
+ * @param location the name and port of the region server hosting the region
+ */
+ public TableRegionModel(String table, long id, byte[] startKey,
+ byte[] endKey, String location) {
+ this.table = table;
+ this.id = id;
+ this.startKey = startKey;
+ this.endKey = endKey;
+ this.location = location;
+ }
+
+ /**
+ * @return the region name
+ */
+ @XmlAttribute
+ public String getName() {
+ StringBuffer sb = new StringBuffer();
+ sb.append(table);
+ sb.append(',');
+ sb.append(Bytes.toString(startKey));
+ sb.append(',');
+ sb.append(id);
+ return sb.toString();
+ }
+
+ /**
+ * @return the encoded region id
+ */
+ @XmlAttribute
+ public long getId() {
+ return id;
+ }
+
+ /**
+ * @return the start key
+ */
+ @XmlAttribute
+ public byte[] getStartKey() {
+ return startKey;
+ }
+
+ /**
+ * @return the end key
+ */
+ @XmlAttribute
+ public byte[] getEndKey() {
+ return endKey;
+ }
+
+ /**
+ * @return the name and port of the region server hosting the region
+ */
+ @XmlAttribute
+ public String getLocation() {
+ return location;
+ }
+
+ /**
+ * @param name region printable name
+ */
+ public void setName(String name) {
+ String split[] = name.split(",");
+ table = split[0];
+ startKey = Bytes.toBytes(split[1]);
+ id = Long.valueOf(split[2]);
+ }
+
+ /**
+ * @param id the region's encoded id
+ */
+ public void setId(long id) {
+ this.id = id;
+ }
+
+ /**
+ * @param startKey the start key
+ */
+ public void setStartKey(byte[] startKey) {
+ this.startKey = startKey;
+ }
+
+ /**
+ * @param endKey the end key
+ */
+ public void setEndKey(byte[] endKey) {
+ this.endKey = endKey;
+ }
+
+ /**
+ * @param location the name and port of the region server hosting the region
+ */
+ public void setLocation(String location) {
+ this.location = location;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(getName());
+ sb.append(" [\n id=");
+ sb.append(id);
+ sb.append("\n startKey='");
+ sb.append(Bytes.toString(startKey));
+ sb.append("'\n endKey='");
+ sb.append(Bytes.toString(endKey));
+ sb.append("'\n location='");
+ sb.append(location);
+ sb.append("'\n]\n");
+ return sb.toString();
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java
new file mode 100644
index 00000000000..3d7f937d3c9
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import javax.xml.bind.annotation.XmlAnyAttribute;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlType;
+import javax.xml.namespace.QName;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema;
+
+@XmlRootElement(name="TableSchema")
+@XmlType(propOrder = {"name","columns"})
+public class TableSchemaModel implements Serializable, IProtobufWrapper {
+ private static final long serialVersionUID = 1L;
+ private static final QName IN_MEMORY = new QName(HConstants.IN_MEMORY);
+ private static final QName IS_META = new QName(HTableDescriptor.IS_META);
+ private static final QName IS_ROOT = new QName(HTableDescriptor.IS_ROOT);
+ private static final QName READONLY = new QName(HTableDescriptor.READONLY);
+ private static final QName TTL = new QName(HColumnDescriptor.TTL);
+ private static final QName VERSIONS = new QName(HConstants.VERSIONS);
+ private static final QName COMPRESSION =
+ new QName(HColumnDescriptor.COMPRESSION);
+
+ private String name;
+ private Map attrs = new HashMap();
+ private List columns = new ArrayList();
+
+ public TableSchemaModel() {}
+
+ public void addAttribute(String name, Object value) {
+ attrs.put(new QName(name), value);
+ }
+
+ public String getAttribute(String name) {
+ return attrs.get(new QName(name)).toString();
+ }
+
+ public void addColumnFamily(ColumnSchemaModel object) {
+ columns.add(object);
+ }
+
+ public ColumnSchemaModel getColumnFamily(int index) {
+ return columns.get(index);
+ }
+
+ /**
+ * @return the table name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return the map for holding unspecified (user) attributes
+ */
+ @XmlAnyAttribute
+ public Map getAny() {
+ return attrs;
+ }
+
+ /**
+ * @return the columns
+ */
+ @XmlElement(name="ColumnSchema")
+ public List getColumns() {
+ return columns;
+ }
+
+ /**
+ * @param name the table name
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /**
+ * @param columns the columns to set
+ */
+ public void setColumns(List columns) {
+ this.columns = columns;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("{ NAME=> '");
+ sb.append(name);
+ sb.append('\'');
+ for (Map.Entry e: attrs.entrySet()) {
+ sb.append(", ");
+ sb.append(e.getKey().getLocalPart());
+ sb.append(" => '");
+ sb.append(e.getValue().toString());
+ sb.append('\'');
+ }
+ sb.append(", COLUMNS => [ ");
+ Iterator i = columns.iterator();
+ while (i.hasNext()) {
+ ColumnSchemaModel family = i.next();
+ sb.append(family.toString());
+ if (i.hasNext()) {
+ sb.append(',');
+ }
+ sb.append(' ');
+ }
+ sb.append("] }");
+ return sb.toString();
+ }
+
+ // getters and setters for common schema attributes
+
+ // cannot be standard bean type getters and setters, otherwise this would
+ // confuse JAXB
+
+ public boolean __getInMemory() {
+ Object o = attrs.get(IN_MEMORY);
+ return o != null ?
+ Boolean.valueOf(o.toString()) : HTableDescriptor.DEFAULT_IN_MEMORY;
+ }
+
+ public boolean __getIsMeta() {
+ Object o = attrs.get(IS_META);
+ return o != null ? Boolean.valueOf(o.toString()) : false;
+ }
+
+ public boolean __getIsRoot() {
+ Object o = attrs.get(IS_ROOT);
+ return o != null ? Boolean.valueOf(o.toString()) : false;
+ }
+
+ public boolean __getReadOnly() {
+ Object o = attrs.get(READONLY);
+ return o != null ?
+ Boolean.valueOf(o.toString()) : HTableDescriptor.DEFAULT_READONLY;
+ }
+
+ public void __setInMemory(boolean value) {
+ attrs.put(IN_MEMORY, Boolean.toString(value));
+ }
+
+ public void __setIsMeta(boolean value) {
+ attrs.put(IS_META, Boolean.toString(value));
+ }
+
+ public void __setIsRoot(boolean value) {
+ attrs.put(IS_ROOT, Boolean.toString(value));
+ }
+
+ public void __setReadOnly(boolean value) {
+ attrs.put(READONLY, Boolean.toString(value));
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ TableSchema.Builder builder = TableSchema.newBuilder();
+ builder.setName(name);
+ for (Map.Entry e: attrs.entrySet()) {
+ TableSchema.Attribute.Builder attrBuilder =
+ TableSchema.Attribute.newBuilder();
+ attrBuilder.setName(e.getKey().getLocalPart());
+ attrBuilder.setValue(e.getValue().toString());
+ builder.addAttrs(attrBuilder);
+ }
+ for (ColumnSchemaModel family: columns) {
+ Map familyAttrs = family.getAny();
+ ColumnSchema.Builder familyBuilder = ColumnSchema.newBuilder();
+ familyBuilder.setName(family.getName());
+ for (Map.Entry e: familyAttrs.entrySet()) {
+ ColumnSchema.Attribute.Builder attrBuilder =
+ ColumnSchema.Attribute.newBuilder();
+ attrBuilder.setName(e.getKey().getLocalPart());
+ attrBuilder.setValue(e.getValue().toString());
+ familyBuilder.addAttrs(attrBuilder);
+ }
+ if (familyAttrs.containsKey(TTL)) {
+ familyBuilder.setTtl(
+ Integer.valueOf(familyAttrs.get(TTL).toString()));
+ }
+ if (familyAttrs.containsKey(VERSIONS)) {
+ familyBuilder.setMaxVersions(
+ Integer.valueOf(familyAttrs.get(VERSIONS).toString()));
+ }
+ if (familyAttrs.containsKey(COMPRESSION)) {
+ familyBuilder.setCompression(familyAttrs.get(COMPRESSION).toString());
+ }
+ builder.addColumns(familyBuilder);
+ }
+ if (attrs.containsKey(IN_MEMORY)) {
+ builder.setInMemory(
+ Boolean.valueOf(attrs.get(IN_MEMORY).toString()));
+ }
+ if (attrs.containsKey(READONLY)) {
+ builder.setReadOnly(
+ Boolean.valueOf(attrs.get(READONLY).toString()));
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public IProtobufWrapper getObjectFromMessage(byte[] message)
+ throws IOException {
+ TableSchema.Builder builder = TableSchema.newBuilder();
+ builder.mergeFrom(message);
+ this.setName(builder.getName());
+ for (TableSchema.Attribute attr: builder.getAttrsList()) {
+ this.addAttribute(attr.getName(), attr.getValue());
+ }
+ if (builder.hasInMemory()) {
+ this.addAttribute(HConstants.IN_MEMORY, builder.getInMemory());
+ }
+ if (builder.hasReadOnly()) {
+ this.addAttribute(HTableDescriptor.READONLY, builder.getReadOnly());
+ }
+ for (ColumnSchema family: builder.getColumnsList()) {
+ ColumnSchemaModel familyModel = new ColumnSchemaModel();
+ familyModel.setName(family.getName());
+ for (ColumnSchema.Attribute attr: family.getAttrsList()) {
+ familyModel.addAttribute(attr.getName(), attr.getValue());
+ }
+ if (family.hasTtl()) {
+ familyModel.addAttribute(HColumnDescriptor.TTL, family.getTtl());
+ }
+ if (family.hasMaxVersions()) {
+ familyModel.addAttribute(HConstants.VERSIONS,
+ family.getMaxVersions());
+ }
+ if (family.hasCompression()) {
+ familyModel.addAttribute(HColumnDescriptor.COMPRESSION,
+ family.getCompression());
+ }
+ this.addColumnFamily(familyModel);
+ }
+ return this;
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/VersionModel.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/VersionModel.java
new file mode 100644
index 00000000000..5d99574fa29
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/model/VersionModel.java
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+import javax.servlet.ServletContext;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.stargate.RESTServlet;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version;
+
+import com.sun.jersey.spi.container.servlet.ServletContainer;
+
+@XmlRootElement(name="Version")
+public class VersionModel implements Serializable, IProtobufWrapper {
+
+ private static final long serialVersionUID = 1L;
+
+ private String stargateVersion;
+ private String jvmVersion;
+ private String osVersion;
+ private String serverVersion;
+ private String jerseyVersion;
+
+ public VersionModel() {}
+
+ public VersionModel(ServletContext context) {
+ stargateVersion = RESTServlet.VERSION_STRING;
+ jvmVersion = System.getProperty("java.vm.vendor") + ' ' +
+ System.getProperty("java.version") + '-' +
+ System.getProperty("java.vm.version");
+ osVersion = System.getProperty("os.name") + ' ' +
+ System.getProperty("os.version") + ' ' +
+ System.getProperty("os.arch");
+ serverVersion = context.getServerInfo();
+ jerseyVersion = ServletContainer.class.getPackage()
+ .getImplementationVersion();
+ }
+
+ @XmlAttribute(name="Stargate")
+ public String getStargateVersion() {
+ return stargateVersion;
+ }
+
+ @XmlAttribute(name="JVM")
+ public String getJvmVersion() {
+ return jvmVersion;
+ }
+
+ @XmlAttribute(name="OS")
+ public String getOsVersion() {
+ return osVersion;
+ }
+
+ @XmlAttribute(name="Server")
+ public String getServerVersion() {
+ return serverVersion;
+ }
+
+ @XmlAttribute(name="Jersey")
+ public String getJerseyVersion() {
+ return jerseyVersion;
+ }
+
+ public void setStargateVersion(String version) {
+ this.stargateVersion = version;
+ }
+
+ public void setOsVersion(String version) {
+ this.osVersion = version;
+ }
+
+ public void setJvmVersion(String version) {
+ this.jvmVersion = version;
+ }
+
+ public void setServerVersion(String version) {
+ this.serverVersion = version;
+ }
+
+ public void setJerseyVersion(String version) {
+ this.jerseyVersion = version;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("Stargate ");
+ sb.append(stargateVersion);
+ sb.append(" [JVM: ");
+ sb.append(jvmVersion);
+ sb.append("] [OS: ");
+ sb.append(osVersion);
+ sb.append("] [Server: ");
+ sb.append(serverVersion);
+ sb.append("] [Jersey: ");
+ sb.append(jerseyVersion);
+ sb.append("]\n");
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ Version.Builder builder = Version.newBuilder();
+ builder.setStargateVersion(stargateVersion);
+ builder.setJvmVersion(jvmVersion);
+ builder.setOsVersion(osVersion);
+ builder.setServerVersion(serverVersion);
+ builder.setJerseyVersion(jerseyVersion);
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public IProtobufWrapper getObjectFromMessage(byte[] message)
+ throws IOException {
+ Version.Builder builder = Version.newBuilder();
+ builder.mergeFrom(message);
+ if (builder.hasStargateVersion()) {
+ stargateVersion = builder.getStargateVersion();
+ }
+ if (builder.hasJvmVersion()) {
+ jvmVersion = builder.getJvmVersion();
+ }
+ if (builder.hasOsVersion()) {
+ osVersion = builder.getOsVersion();
+ }
+ if (builder.hasServerVersion()) {
+ serverVersion = builder.getServerVersion();
+ }
+ if (builder.hasJerseyVersion()) {
+ jerseyVersion = builder.getJerseyVersion();
+ }
+ return this;
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/CellMessage.proto b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/CellMessage.proto
new file mode 100644
index 00000000000..fbe1e979bb4
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/CellMessage.proto
@@ -0,0 +1,26 @@
+// Copyright 2009 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message Cell {
+ optional bytes row = 1; // unused if Cell is in a CellSet
+ optional bytes column = 2;
+ optional int64 timestamp = 3;
+ optional bytes data = 4;
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/CellSetMessage.proto b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/CellSetMessage.proto
new file mode 100644
index 00000000000..7dc337d6add
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/CellSetMessage.proto
@@ -0,0 +1,29 @@
+// Copyright 2009 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import "CellMessage.proto";
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message CellSet {
+ message Row {
+ required bytes key = 1;
+ repeated Cell values = 2;
+ }
+ repeated Row rows = 1;
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/ColumnSchemaMessage.proto b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/ColumnSchemaMessage.proto
new file mode 100644
index 00000000000..5778acfe685
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/ColumnSchemaMessage.proto
@@ -0,0 +1,32 @@
+// Copyright 2009 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message ColumnSchema {
+ optional string name = 1;
+ message Attribute {
+ required string name = 1;
+ required string value = 2;
+ }
+ repeated Attribute attrs = 2;
+ // optional helpful encodings of commonly used attributes
+ optional int32 ttl = 3;
+ optional int32 maxVersions = 4;
+ optional string compression = 5;
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/ScannerMessage.proto b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/ScannerMessage.proto
new file mode 100644
index 00000000000..754312731b3
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/ScannerMessage.proto
@@ -0,0 +1,28 @@
+// Copyright 2009 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message Scanner {
+ optional bytes startRow = 1;
+ optional bytes endRow = 2;
+ repeated bytes columns = 3;
+ optional int32 batch = 4;
+ optional int64 startTime = 5;
+ optional int64 endTime = 6;
+}
\ No newline at end of file
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/StorageClusterStatusMessage.proto b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/StorageClusterStatusMessage.proto
new file mode 100644
index 00000000000..2dadaf8864d
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/StorageClusterStatusMessage.proto
@@ -0,0 +1,35 @@
+// Copyright 2009 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message StorageClusterStatus {
+ message Node {
+ required string name = 1; // name:port
+ optional int64 startCode = 4;
+ optional int32 requests = 2;
+ repeated bytes regions = 3;
+ }
+ // node status
+ repeated Node liveNodes = 1;
+ repeated string deadNodes = 2;
+ // summary statistics
+ optional int32 regions = 3;
+ optional int32 requests = 4;
+ optional double averageLoad = 5;
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/TableInfoMessage.proto b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/TableInfoMessage.proto
new file mode 100644
index 00000000000..6357a3122ba
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/TableInfoMessage.proto
@@ -0,0 +1,31 @@
+// Copyright 2009 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message TableInfo {
+ required string name = 1;
+ message Region {
+ required string name = 1;
+ optional bytes startKey = 2;
+ optional bytes endKey = 3;
+ optional int64 id = 4;
+ optional string location = 5;
+ }
+ repeated Region regions = 2;
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/TableListMessage.proto b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/TableListMessage.proto
new file mode 100644
index 00000000000..13b6cd2fb53
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/TableListMessage.proto
@@ -0,0 +1,23 @@
+// Copyright 2009 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message TableList {
+ repeated string name = 1;
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/TableSchemaMessage.proto b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/TableSchemaMessage.proto
new file mode 100644
index 00000000000..1a55df80099
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/TableSchemaMessage.proto
@@ -0,0 +1,34 @@
+// Copyright 2009 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import "ColumnSchemaMessage.proto";
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message TableSchema {
+ optional string name = 1;
+ message Attribute {
+ required string name = 1;
+ required string value = 2;
+ }
+ repeated Attribute attrs = 2;
+ repeated ColumnSchema columns = 3;
+ // optional helpful encodings of commonly used attributes
+ optional bool inMemory = 4;
+ optional bool readOnly = 5;
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/VersionMessage.proto b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/VersionMessage.proto
new file mode 100644
index 00000000000..15429861010
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/VersionMessage.proto
@@ -0,0 +1,27 @@
+// Copyright 2009 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message Version {
+ optional string stargateVersion = 1;
+ optional string jvmVersion = 2;
+ optional string osVersion = 3;
+ optional string serverVersion = 4;
+ optional string jerseyVersion = 5;
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/CellMessage.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/CellMessage.java
new file mode 100644
index 00000000000..35606991d03
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/CellMessage.java
@@ -0,0 +1,443 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class CellMessage {
+ private CellMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class Cell extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Cell.newBuilder() to construct.
+ private Cell() {}
+
+ private static final Cell defaultInstance = new Cell();
+ public static Cell getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Cell getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_descriptor;
+ }
+
+ @Override
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_fieldAccessorTable;
+ }
+
+ // optional bytes row = 1;
+ public static final int ROW_FIELD_NUMBER = 1;
+ private boolean hasRow;
+ private com.google.protobuf.ByteString row_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasRow() { return hasRow; }
+ public com.google.protobuf.ByteString getRow() { return row_; }
+
+ // optional bytes column = 2;
+ public static final int COLUMN_FIELD_NUMBER = 2;
+ private boolean hasColumn;
+ private com.google.protobuf.ByteString column_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasColumn() { return hasColumn; }
+ public com.google.protobuf.ByteString getColumn() { return column_; }
+
+ // optional int64 timestamp = 3;
+ public static final int TIMESTAMP_FIELD_NUMBER = 3;
+ private boolean hasTimestamp;
+ private long timestamp_ = 0L;
+ public boolean hasTimestamp() { return hasTimestamp; }
+ public long getTimestamp() { return timestamp_; }
+
+ // optional bytes data = 4;
+ public static final int DATA_FIELD_NUMBER = 4;
+ private boolean hasData;
+ private com.google.protobuf.ByteString data_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasData() { return hasData; }
+ public com.google.protobuf.ByteString getData() { return data_; }
+
+ @Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (hasRow()) {
+ output.writeBytes(1, getRow());
+ }
+ if (hasColumn()) {
+ output.writeBytes(2, getColumn());
+ }
+ if (hasTimestamp()) {
+ output.writeInt64(3, getTimestamp());
+ }
+ if (hasData()) {
+ output.writeBytes(4, getData());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ @Override
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasRow()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getRow());
+ }
+ if (hasColumn()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getColumn());
+ }
+ if (hasTimestamp()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(3, getTimestamp());
+ }
+ if (hasData()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(4, getData());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return new Builder(); }
+ public Builder newBuilderForType() { return new Builder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell prototype) {
+ return new Builder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.newBuilder()
+ private Builder() {}
+
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell();
+
+ @Override
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell internalGetResult() {
+ return result;
+ }
+
+ @Override
+ public Builder clear() {
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell();
+ return this;
+ }
+
+ @Override
+ public Builder clone() {
+ return new Builder().mergeFrom(result);
+ }
+
+ @Override
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell build() {
+ if (result != null && !isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder."); }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ @Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.getDefaultInstance()) return this;
+ if (other.hasRow()) {
+ setRow(other.getRow());
+ }
+ if (other.hasColumn()) {
+ setColumn(other.getColumn());
+ }
+ if (other.hasTimestamp()) {
+ setTimestamp(other.getTimestamp());
+ }
+ if (other.hasData()) {
+ setData(other.getData());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return mergeFrom(input,
+ com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setRow(input.readBytes());
+ break;
+ }
+ case 18: {
+ setColumn(input.readBytes());
+ break;
+ }
+ case 24: {
+ setTimestamp(input.readInt64());
+ break;
+ }
+ case 34: {
+ setData(input.readBytes());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // optional bytes row = 1;
+ public boolean hasRow() {
+ return result.hasRow();
+ }
+ public com.google.protobuf.ByteString getRow() {
+ return result.getRow();
+ }
+ public Builder setRow(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasRow = true;
+ result.row_ = value;
+ return this;
+ }
+ public Builder clearRow() {
+ result.hasRow = false;
+ result.row_ = com.google.protobuf.ByteString.EMPTY;
+ return this;
+ }
+
+ // optional bytes column = 2;
+ public boolean hasColumn() {
+ return result.hasColumn();
+ }
+ public com.google.protobuf.ByteString getColumn() {
+ return result.getColumn();
+ }
+ public Builder setColumn(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasColumn = true;
+ result.column_ = value;
+ return this;
+ }
+ public Builder clearColumn() {
+ result.hasColumn = false;
+ result.column_ = com.google.protobuf.ByteString.EMPTY;
+ return this;
+ }
+
+ // optional int64 timestamp = 3;
+ public boolean hasTimestamp() {
+ return result.hasTimestamp();
+ }
+ public long getTimestamp() {
+ return result.getTimestamp();
+ }
+ public Builder setTimestamp(long value) {
+ result.hasTimestamp = true;
+ result.timestamp_ = value;
+ return this;
+ }
+ public Builder clearTimestamp() {
+ result.hasTimestamp = false;
+ result.timestamp_ = 0L;
+ return this;
+ }
+
+ // optional bytes data = 4;
+ public boolean hasData() {
+ return result.hasData();
+ }
+ public com.google.protobuf.ByteString getData() {
+ return result.getData();
+ }
+ public Builder setData(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasData = true;
+ result.data_ = value;
+ return this;
+ }
+ public Builder clearData() {
+ result.hasData = false;
+ result.data_ = com.google.protobuf.ByteString.EMPTY;
+ return this;
+ }
+ }
+
+ static {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.getDescriptor();
+ }
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String descriptorData =
+ "\n\021CellMessage.proto\0223org.apache.hadoop.h" +
+ "base.stargate.protobuf.generated\"D\n\004Cell" +
+ "\022\013\n\003row\030\001 \001(\014\022\016\n\006column\030\002 \001(\014\022\021\n\ttimesta" +
+ "mp\030\003 \001(\003\022\014\n\004data\030\004 \001(\014";
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_descriptor,
+ new java.lang.String[] { "Row", "Column", "Timestamp", "Data", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/CellSetMessage.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/CellSetMessage.java
new file mode 100644
index 00000000000..d752d0958e5
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/CellSetMessage.java
@@ -0,0 +1,743 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class CellSetMessage {
+ private CellSetMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class CellSet extends
+ com.google.protobuf.GeneratedMessage {
+ // Use CellSet.newBuilder() to construct.
+ private CellSet() {}
+
+ private static final CellSet defaultInstance = new CellSet();
+ public static CellSet getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CellSet getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_descriptor;
+ }
+
+ @Override
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_fieldAccessorTable;
+ }
+
+ public static final class Row extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Row.newBuilder() to construct.
+ private Row() {}
+
+ private static final Row defaultInstance = new Row();
+ public static Row getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Row getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_descriptor;
+ }
+
+ @Override
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_fieldAccessorTable;
+ }
+
+ // required bytes key = 1;
+ public static final int KEY_FIELD_NUMBER = 1;
+ private boolean hasKey;
+ private com.google.protobuf.ByteString key_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasKey() { return hasKey; }
+ public com.google.protobuf.ByteString getKey() { return key_; }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.Cell values = 2;
+ public static final int VALUES_FIELD_NUMBER = 2;
+ private java.util.List values_ =
+ java.util.Collections.emptyList();
+ public java.util.List getValuesList() {
+ return values_;
+ }
+ public int getValuesCount() { return values_.size(); }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell getValues(int index) {
+ return values_.get(index);
+ }
+
+ @Override
+ public final boolean isInitialized() {
+ if (!hasKey) return false;
+ return true;
+ }
+
+ @Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (hasKey()) {
+ output.writeBytes(1, getKey());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell element : getValuesList()) {
+ output.writeMessage(2, element);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ @Override
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasKey()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getKey());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell element : getValuesList()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, element);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return new Builder(); }
+ public Builder newBuilderForType() { return new Builder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row prototype) {
+ return new Builder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.newBuilder()
+ private Builder() {}
+
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row();
+
+ @Override
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row internalGetResult() {
+ return result;
+ }
+
+ @Override
+ public Builder clear() {
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row();
+ return this;
+ }
+
+ @Override
+ public Builder clone() {
+ return new Builder().mergeFrom(result);
+ }
+
+ @Override
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row build() {
+ if (result != null && !isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder."); }
+ if (result.values_ != java.util.Collections.EMPTY_LIST) {
+ result.values_ =
+ java.util.Collections.unmodifiableList(result.values_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ @Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.getDefaultInstance()) return this;
+ if (other.hasKey()) {
+ setKey(other.getKey());
+ }
+ if (!other.values_.isEmpty()) {
+ if (result.values_.isEmpty()) {
+ result.values_ = new java.util.ArrayList();
+ }
+ result.values_.addAll(other.values_);
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return mergeFrom(input,
+ com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setKey(input.readBytes());
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addValues(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // required bytes key = 1;
+ public boolean hasKey() {
+ return result.hasKey();
+ }
+ public com.google.protobuf.ByteString getKey() {
+ return result.getKey();
+ }
+ public Builder setKey(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasKey = true;
+ result.key_ = value;
+ return this;
+ }
+ public Builder clearKey() {
+ result.hasKey = false;
+ result.key_ = com.google.protobuf.ByteString.EMPTY;
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.Cell values = 2;
+ public java.util.List getValuesList() {
+ return java.util.Collections.unmodifiableList(result.values_);
+ }
+ public int getValuesCount() {
+ return result.getValuesCount();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell getValues(int index) {
+ return result.getValues(index);
+ }
+ public Builder setValues(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.values_.set(index, value);
+ return this;
+ }
+ public Builder setValues(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.Builder builderForValue) {
+ result.values_.set(index, builderForValue.build());
+ return this;
+ }
+ public Builder addValues(org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.values_.isEmpty()) {
+ result.values_ = new java.util.ArrayList();
+ }
+ result.values_.add(value);
+ return this;
+ }
+ public Builder addValues(org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.Builder builderForValue) {
+ if (result.values_.isEmpty()) {
+ result.values_ = new java.util.ArrayList();
+ }
+ result.values_.add(builderForValue.build());
+ return this;
+ }
+ public Builder addAllValues(
+ java.lang.Iterable extends org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell> values) {
+ if (result.values_.isEmpty()) {
+ result.values_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.values_);
+ return this;
+ }
+ public Builder clearValues() {
+ result.values_ = java.util.Collections.emptyList();
+ return this;
+ }
+ }
+
+ static {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.getDescriptor();
+ }
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.CellSet.Row rows = 1;
+ public static final int ROWS_FIELD_NUMBER = 1;
+ private java.util.List rows_ =
+ java.util.Collections.emptyList();
+ public java.util.List getRowsList() {
+ return rows_;
+ }
+ public int getRowsCount() { return rows_.size(); }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row getRows(int index) {
+ return rows_.get(index);
+ }
+
+ @Override
+ public final boolean isInitialized() {
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row element : getRowsList()) {
+ if (!element.isInitialized()) return false;
+ }
+ return true;
+ }
+
+ @Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row element : getRowsList()) {
+ output.writeMessage(1, element);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ @Override
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row element : getRowsList()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, element);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return new Builder(); }
+ public Builder newBuilderForType() { return new Builder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet prototype) {
+ return new Builder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.newBuilder()
+ private Builder() {}
+
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet();
+
+ @Override
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet internalGetResult() {
+ return result;
+ }
+
+ @Override
+ public Builder clear() {
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet();
+ return this;
+ }
+
+ @Override
+ public Builder clone() {
+ return new Builder().mergeFrom(result);
+ }
+
+ @Override
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet build() {
+ if (result != null && !isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder."); }
+ if (result.rows_ != java.util.Collections.EMPTY_LIST) {
+ result.rows_ =
+ java.util.Collections.unmodifiableList(result.rows_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ @Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.getDefaultInstance()) return this;
+ if (!other.rows_.isEmpty()) {
+ if (result.rows_.isEmpty()) {
+ result.rows_ = new java.util.ArrayList();
+ }
+ result.rows_.addAll(other.rows_);
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return mergeFrom(input,
+ com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addRows(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.CellSet.Row rows = 1;
+ public java.util.List getRowsList() {
+ return java.util.Collections.unmodifiableList(result.rows_);
+ }
+ public int getRowsCount() {
+ return result.getRowsCount();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row getRows(int index) {
+ return result.getRows(index);
+ }
+ public Builder setRows(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.rows_.set(index, value);
+ return this;
+ }
+ public Builder setRows(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.Builder builderForValue) {
+ result.rows_.set(index, builderForValue.build());
+ return this;
+ }
+ public Builder addRows(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.rows_.isEmpty()) {
+ result.rows_ = new java.util.ArrayList();
+ }
+ result.rows_.add(value);
+ return this;
+ }
+ public Builder addRows(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.Builder builderForValue) {
+ if (result.rows_.isEmpty()) {
+ result.rows_ = new java.util.ArrayList();
+ }
+ result.rows_.add(builderForValue.build());
+ return this;
+ }
+ public Builder addAllRows(
+ java.lang.Iterable extends org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row> values) {
+ if (result.rows_.isEmpty()) {
+ result.rows_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.rows_);
+ return this;
+ }
+ public Builder clearRows() {
+ result.rows_ = java.util.Collections.emptyList();
+ return this;
+ }
+ }
+
+ static {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.getDescriptor();
+ }
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String descriptorData =
+ "\n\024CellSetMessage.proto\0223org.apache.hadoo" +
+ "p.hbase.stargate.protobuf.generated\032\021Cel" +
+ "lMessage.proto\"\270\001\n\007CellSet\022N\n\004rows\030\001 \003(\013" +
+ "2@.org.apache.hadoop.hbase.stargate.prot" +
+ "obuf.generated.CellSet.Row\032]\n\003Row\022\013\n\003key" +
+ "\030\001 \002(\014\022I\n\006values\030\002 \003(\01329.org.apache.hado" +
+ "op.hbase.stargate.protobuf.generated.Cel" +
+ "l";
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_descriptor,
+ new java.lang.String[] { "Rows", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Builder.class);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_descriptor =
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_descriptor.getNestedTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_descriptor,
+ new java.lang.String[] { "Key", "Values", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.getDescriptor(),
+ }, assigner);
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/ColumnSchemaMessage.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/ColumnSchemaMessage.java
new file mode 100644
index 00000000000..b0841211946
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/ColumnSchemaMessage.java
@@ -0,0 +1,861 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class ColumnSchemaMessage {
+ private ColumnSchemaMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class ColumnSchema extends
+ com.google.protobuf.GeneratedMessage {
+ // Use ColumnSchema.newBuilder() to construct.
+ private ColumnSchema() {}
+
+ private static final ColumnSchema defaultInstance = new ColumnSchema();
+ public static ColumnSchema getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public ColumnSchema getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_descriptor;
+ }
+
+ @Override
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_fieldAccessorTable;
+ }
+
+ public static final class Attribute extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Attribute.newBuilder() to construct.
+ private Attribute() {}
+
+ private static final Attribute defaultInstance = new Attribute();
+ public static Attribute getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Attribute getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_descriptor;
+ }
+
+ @Override
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable;
+ }
+
+ // required string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private boolean hasName;
+ private java.lang.String name_ = "";
+ public boolean hasName() { return hasName; }
+ public java.lang.String getName() { return name_; }
+
+ // required string value = 2;
+ public static final int VALUE_FIELD_NUMBER = 2;
+ private boolean hasValue;
+ private java.lang.String value_ = "";
+ public boolean hasValue() { return hasValue; }
+ public java.lang.String getValue() { return value_; }
+
+ @Override
+ public final boolean isInitialized() {
+ if (!hasName) return false;
+ if (!hasValue) return false;
+ return true;
+ }
+
+ @Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (hasName()) {
+ output.writeString(1, getName());
+ }
+ if (hasValue()) {
+ output.writeString(2, getValue());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ @Override
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasName()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(1, getName());
+ }
+ if (hasValue()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(2, getValue());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return new Builder(); }
+ public Builder newBuilderForType() { return new Builder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute prototype) {
+ return new Builder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.newBuilder()
+ private Builder() {}
+
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute();
+
+ @Override
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute internalGetResult() {
+ return result;
+ }
+
+ @Override
+ public Builder clear() {
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute();
+ return this;
+ }
+
+ @Override
+ public Builder clone() {
+ return new Builder().mergeFrom(result);
+ }
+
+ @Override
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute build() {
+ if (result != null && !isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder."); }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ @Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (other.hasValue()) {
+ setValue(other.getValue());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return mergeFrom(input,
+ com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setName(input.readString());
+ break;
+ }
+ case 18: {
+ setValue(input.readString());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // required string name = 1;
+ public boolean hasName() {
+ return result.hasName();
+ }
+ public java.lang.String getName() {
+ return result.getName();
+ }
+ public Builder setName(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasName = true;
+ result.name_ = value;
+ return this;
+ }
+ public Builder clearName() {
+ result.hasName = false;
+ result.name_ = "";
+ return this;
+ }
+
+ // required string value = 2;
+ public boolean hasValue() {
+ return result.hasValue();
+ }
+ public java.lang.String getValue() {
+ return result.getValue();
+ }
+ public Builder setValue(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasValue = true;
+ result.value_ = value;
+ return this;
+ }
+ public Builder clearValue() {
+ result.hasValue = false;
+ result.value_ = "";
+ return this;
+ }
+ }
+
+ static {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.getDescriptor();
+ }
+ }
+
+ // optional string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private boolean hasName;
+ private java.lang.String name_ = "";
+ public boolean hasName() { return hasName; }
+ public java.lang.String getName() { return name_; }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema.Attribute attrs = 2;
+ public static final int ATTRS_FIELD_NUMBER = 2;
+ private java.util.List attrs_ =
+ java.util.Collections.emptyList();
+ public java.util.List getAttrsList() {
+ return attrs_;
+ }
+ public int getAttrsCount() { return attrs_.size(); }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getAttrs(int index) {
+ return attrs_.get(index);
+ }
+
+ // optional int32 ttl = 3;
+ public static final int TTL_FIELD_NUMBER = 3;
+ private boolean hasTtl;
+ private int ttl_ = 0;
+ public boolean hasTtl() { return hasTtl; }
+ public int getTtl() { return ttl_; }
+
+ // optional int32 maxVersions = 4;
+ public static final int MAXVERSIONS_FIELD_NUMBER = 4;
+ private boolean hasMaxVersions;
+ private int maxVersions_ = 0;
+ public boolean hasMaxVersions() { return hasMaxVersions; }
+ public int getMaxVersions() { return maxVersions_; }
+
+ // optional string compression = 5;
+ public static final int COMPRESSION_FIELD_NUMBER = 5;
+ private boolean hasCompression;
+ private java.lang.String compression_ = "";
+ public boolean hasCompression() { return hasCompression; }
+ public java.lang.String getCompression() { return compression_; }
+
+ @Override
+ public final boolean isInitialized() {
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute element : getAttrsList()) {
+ if (!element.isInitialized()) return false;
+ }
+ return true;
+ }
+
+ @Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (hasName()) {
+ output.writeString(1, getName());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute element : getAttrsList()) {
+ output.writeMessage(2, element);
+ }
+ if (hasTtl()) {
+ output.writeInt32(3, getTtl());
+ }
+ if (hasMaxVersions()) {
+ output.writeInt32(4, getMaxVersions());
+ }
+ if (hasCompression()) {
+ output.writeString(5, getCompression());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ @Override
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasName()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(1, getName());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute element : getAttrsList()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, element);
+ }
+ if (hasTtl()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(3, getTtl());
+ }
+ if (hasMaxVersions()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(4, getMaxVersions());
+ }
+ if (hasCompression()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(5, getCompression());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return new Builder(); }
+ public Builder newBuilderForType() { return new Builder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema prototype) {
+ return new Builder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.newBuilder()
+ private Builder() {}
+
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema();
+
+ @Override
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema internalGetResult() {
+ return result;
+ }
+
+ @Override
+ public Builder clear() {
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema();
+ return this;
+ }
+
+ @Override
+ public Builder clone() {
+ return new Builder().mergeFrom(result);
+ }
+
+ @Override
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema build() {
+ if (result != null && !isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder."); }
+ if (result.attrs_ != java.util.Collections.EMPTY_LIST) {
+ result.attrs_ =
+ java.util.Collections.unmodifiableList(result.attrs_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ @Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (!other.attrs_.isEmpty()) {
+ if (result.attrs_.isEmpty()) {
+ result.attrs_ = new java.util.ArrayList();
+ }
+ result.attrs_.addAll(other.attrs_);
+ }
+ if (other.hasTtl()) {
+ setTtl(other.getTtl());
+ }
+ if (other.hasMaxVersions()) {
+ setMaxVersions(other.getMaxVersions());
+ }
+ if (other.hasCompression()) {
+ setCompression(other.getCompression());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return mergeFrom(input,
+ com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setName(input.readString());
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addAttrs(subBuilder.buildPartial());
+ break;
+ }
+ case 24: {
+ setTtl(input.readInt32());
+ break;
+ }
+ case 32: {
+ setMaxVersions(input.readInt32());
+ break;
+ }
+ case 42: {
+ setCompression(input.readString());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // optional string name = 1;
+ public boolean hasName() {
+ return result.hasName();
+ }
+ public java.lang.String getName() {
+ return result.getName();
+ }
+ public Builder setName(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasName = true;
+ result.name_ = value;
+ return this;
+ }
+ public Builder clearName() {
+ result.hasName = false;
+ result.name_ = "";
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema.Attribute attrs = 2;
+ public java.util.List getAttrsList() {
+ return java.util.Collections.unmodifiableList(result.attrs_);
+ }
+ public int getAttrsCount() {
+ return result.getAttrsCount();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getAttrs(int index) {
+ return result.getAttrs(index);
+ }
+ public Builder setAttrs(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.attrs_.set(index, value);
+ return this;
+ }
+ public Builder setAttrs(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder builderForValue) {
+ result.attrs_.set(index, builderForValue.build());
+ return this;
+ }
+ public Builder addAttrs(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.attrs_.isEmpty()) {
+ result.attrs_ = new java.util.ArrayList();
+ }
+ result.attrs_.add(value);
+ return this;
+ }
+ public Builder addAttrs(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder builderForValue) {
+ if (result.attrs_.isEmpty()) {
+ result.attrs_ = new java.util.ArrayList();
+ }
+ result.attrs_.add(builderForValue.build());
+ return this;
+ }
+ public Builder addAllAttrs(
+ java.lang.Iterable extends org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute> values) {
+ if (result.attrs_.isEmpty()) {
+ result.attrs_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.attrs_);
+ return this;
+ }
+ public Builder clearAttrs() {
+ result.attrs_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // optional int32 ttl = 3;
+ public boolean hasTtl() {
+ return result.hasTtl();
+ }
+ public int getTtl() {
+ return result.getTtl();
+ }
+ public Builder setTtl(int value) {
+ result.hasTtl = true;
+ result.ttl_ = value;
+ return this;
+ }
+ public Builder clearTtl() {
+ result.hasTtl = false;
+ result.ttl_ = 0;
+ return this;
+ }
+
+ // optional int32 maxVersions = 4;
+ public boolean hasMaxVersions() {
+ return result.hasMaxVersions();
+ }
+ public int getMaxVersions() {
+ return result.getMaxVersions();
+ }
+ public Builder setMaxVersions(int value) {
+ result.hasMaxVersions = true;
+ result.maxVersions_ = value;
+ return this;
+ }
+ public Builder clearMaxVersions() {
+ result.hasMaxVersions = false;
+ result.maxVersions_ = 0;
+ return this;
+ }
+
+ // optional string compression = 5;
+ public boolean hasCompression() {
+ return result.hasCompression();
+ }
+ public java.lang.String getCompression() {
+ return result.getCompression();
+ }
+ public Builder setCompression(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasCompression = true;
+ result.compression_ = value;
+ return this;
+ }
+ public Builder clearCompression() {
+ result.hasCompression = false;
+ result.compression_ = "";
+ return this;
+ }
+ }
+
+ static {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.getDescriptor();
+ }
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String descriptorData =
+ "\n\031ColumnSchemaMessage.proto\0223org.apache." +
+ "hadoop.hbase.stargate.protobuf.generated" +
+ "\"\331\001\n\014ColumnSchema\022\014\n\004name\030\001 \001(\t\022Z\n\005attrs" +
+ "\030\002 \003(\0132K.org.apache.hadoop.hbase.stargat" +
+ "e.protobuf.generated.ColumnSchema.Attrib" +
+ "ute\022\013\n\003ttl\030\003 \001(\005\022\023\n\013maxVersions\030\004 \001(\005\022\023\n" +
+ "\013compression\030\005 \001(\t\032(\n\tAttribute\022\014\n\004name\030" +
+ "\001 \002(\t\022\r\n\005value\030\002 \002(\t";
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_descriptor,
+ new java.lang.String[] { "Name", "Attrs", "Ttl", "MaxVersions", "Compression", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder.class);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_descriptor =
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_descriptor.getNestedTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_descriptor,
+ new java.lang.String[] { "Name", "Value", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/ScannerMessage.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/ScannerMessage.java
new file mode 100644
index 00000000000..c38fa08aad3
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/ScannerMessage.java
@@ -0,0 +1,558 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class ScannerMessage {
+ private ScannerMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class Scanner extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Scanner.newBuilder() to construct.
+ private Scanner() {}
+
+ private static final Scanner defaultInstance = new Scanner();
+ public static Scanner getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Scanner getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_descriptor;
+ }
+
+ @Override
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_fieldAccessorTable;
+ }
+
+ // optional bytes startRow = 1;
+ public static final int STARTROW_FIELD_NUMBER = 1;
+ private boolean hasStartRow;
+ private com.google.protobuf.ByteString startRow_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasStartRow() { return hasStartRow; }
+ public com.google.protobuf.ByteString getStartRow() { return startRow_; }
+
+ // optional bytes endRow = 2;
+ public static final int ENDROW_FIELD_NUMBER = 2;
+ private boolean hasEndRow;
+ private com.google.protobuf.ByteString endRow_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasEndRow() { return hasEndRow; }
+ public com.google.protobuf.ByteString getEndRow() { return endRow_; }
+
+ // repeated bytes columns = 3;
+ public static final int COLUMNS_FIELD_NUMBER = 3;
+ private java.util.List columns_ =
+ java.util.Collections.emptyList();
+ public java.util.List getColumnsList() {
+ return columns_;
+ }
+ public int getColumnsCount() { return columns_.size(); }
+ public com.google.protobuf.ByteString getColumns(int index) {
+ return columns_.get(index);
+ }
+
+ // optional int32 batch = 4;
+ public static final int BATCH_FIELD_NUMBER = 4;
+ private boolean hasBatch;
+ private int batch_ = 0;
+ public boolean hasBatch() { return hasBatch; }
+ public int getBatch() { return batch_; }
+
+ // optional int64 startTime = 5;
+ public static final int STARTTIME_FIELD_NUMBER = 5;
+ private boolean hasStartTime;
+ private long startTime_ = 0L;
+ public boolean hasStartTime() { return hasStartTime; }
+ public long getStartTime() { return startTime_; }
+
+ // optional int64 endTime = 6;
+ public static final int ENDTIME_FIELD_NUMBER = 6;
+ private boolean hasEndTime;
+ private long endTime_ = 0L;
+ public boolean hasEndTime() { return hasEndTime; }
+ public long getEndTime() { return endTime_; }
+
+ @Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (hasStartRow()) {
+ output.writeBytes(1, getStartRow());
+ }
+ if (hasEndRow()) {
+ output.writeBytes(2, getEndRow());
+ }
+ for (com.google.protobuf.ByteString element : getColumnsList()) {
+ output.writeBytes(3, element);
+ }
+ if (hasBatch()) {
+ output.writeInt32(4, getBatch());
+ }
+ if (hasStartTime()) {
+ output.writeInt64(5, getStartTime());
+ }
+ if (hasEndTime()) {
+ output.writeInt64(6, getEndTime());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ @Override
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasStartRow()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getStartRow());
+ }
+ if (hasEndRow()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getEndRow());
+ }
+ {
+ int dataSize = 0;
+ for (com.google.protobuf.ByteString element : getColumnsList()) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeBytesSizeNoTag(element);
+ }
+ size += dataSize;
+ size += 1 * getColumnsList().size();
+ }
+ if (hasBatch()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(4, getBatch());
+ }
+ if (hasStartTime()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(5, getStartTime());
+ }
+ if (hasEndTime()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(6, getEndTime());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return new Builder(); }
+ public Builder newBuilderForType() { return new Builder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner prototype) {
+ return new Builder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.newBuilder()
+ private Builder() {}
+
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner();
+
+ @Override
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner internalGetResult() {
+ return result;
+ }
+
+ @Override
+ public Builder clear() {
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner();
+ return this;
+ }
+
+ @Override
+ public Builder clone() {
+ return new Builder().mergeFrom(result);
+ }
+
+ @Override
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner build() {
+ if (result != null && !isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder."); }
+ if (result.columns_ != java.util.Collections.EMPTY_LIST) {
+ result.columns_ =
+ java.util.Collections.unmodifiableList(result.columns_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ @Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.getDefaultInstance()) return this;
+ if (other.hasStartRow()) {
+ setStartRow(other.getStartRow());
+ }
+ if (other.hasEndRow()) {
+ setEndRow(other.getEndRow());
+ }
+ if (!other.columns_.isEmpty()) {
+ if (result.columns_.isEmpty()) {
+ result.columns_ = new java.util.ArrayList();
+ }
+ result.columns_.addAll(other.columns_);
+ }
+ if (other.hasBatch()) {
+ setBatch(other.getBatch());
+ }
+ if (other.hasStartTime()) {
+ setStartTime(other.getStartTime());
+ }
+ if (other.hasEndTime()) {
+ setEndTime(other.getEndTime());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return mergeFrom(input,
+ com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setStartRow(input.readBytes());
+ break;
+ }
+ case 18: {
+ setEndRow(input.readBytes());
+ break;
+ }
+ case 26: {
+ addColumns(input.readBytes());
+ break;
+ }
+ case 32: {
+ setBatch(input.readInt32());
+ break;
+ }
+ case 40: {
+ setStartTime(input.readInt64());
+ break;
+ }
+ case 48: {
+ setEndTime(input.readInt64());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // optional bytes startRow = 1;
+ public boolean hasStartRow() {
+ return result.hasStartRow();
+ }
+ public com.google.protobuf.ByteString getStartRow() {
+ return result.getStartRow();
+ }
+ public Builder setStartRow(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasStartRow = true;
+ result.startRow_ = value;
+ return this;
+ }
+ public Builder clearStartRow() {
+ result.hasStartRow = false;
+ result.startRow_ = com.google.protobuf.ByteString.EMPTY;
+ return this;
+ }
+
+ // optional bytes endRow = 2;
+ public boolean hasEndRow() {
+ return result.hasEndRow();
+ }
+ public com.google.protobuf.ByteString getEndRow() {
+ return result.getEndRow();
+ }
+ public Builder setEndRow(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasEndRow = true;
+ result.endRow_ = value;
+ return this;
+ }
+ public Builder clearEndRow() {
+ result.hasEndRow = false;
+ result.endRow_ = com.google.protobuf.ByteString.EMPTY;
+ return this;
+ }
+
+ // repeated bytes columns = 3;
+ public java.util.List getColumnsList() {
+ return java.util.Collections.unmodifiableList(result.columns_);
+ }
+ public int getColumnsCount() {
+ return result.getColumnsCount();
+ }
+ public com.google.protobuf.ByteString getColumns(int index) {
+ return result.getColumns(index);
+ }
+ public Builder setColumns(int index, com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.columns_.set(index, value);
+ return this;
+ }
+ public Builder addColumns(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.columns_.isEmpty()) {
+ result.columns_ = new java.util.ArrayList();
+ }
+ result.columns_.add(value);
+ return this;
+ }
+ public Builder addAllColumns(
+ java.lang.Iterable extends com.google.protobuf.ByteString> values) {
+ if (result.columns_.isEmpty()) {
+ result.columns_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.columns_);
+ return this;
+ }
+ public Builder clearColumns() {
+ result.columns_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // optional int32 batch = 4;
+ public boolean hasBatch() {
+ return result.hasBatch();
+ }
+ public int getBatch() {
+ return result.getBatch();
+ }
+ public Builder setBatch(int value) {
+ result.hasBatch = true;
+ result.batch_ = value;
+ return this;
+ }
+ public Builder clearBatch() {
+ result.hasBatch = false;
+ result.batch_ = 0;
+ return this;
+ }
+
+ // optional int64 startTime = 5;
+ public boolean hasStartTime() {
+ return result.hasStartTime();
+ }
+ public long getStartTime() {
+ return result.getStartTime();
+ }
+ public Builder setStartTime(long value) {
+ result.hasStartTime = true;
+ result.startTime_ = value;
+ return this;
+ }
+ public Builder clearStartTime() {
+ result.hasStartTime = false;
+ result.startTime_ = 0L;
+ return this;
+ }
+
+ // optional int64 endTime = 6;
+ public boolean hasEndTime() {
+ return result.hasEndTime();
+ }
+ public long getEndTime() {
+ return result.getEndTime();
+ }
+ public Builder setEndTime(long value) {
+ result.hasEndTime = true;
+ result.endTime_ = value;
+ return this;
+ }
+ public Builder clearEndTime() {
+ result.hasEndTime = false;
+ result.endTime_ = 0L;
+ return this;
+ }
+ }
+
+ static {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.getDescriptor();
+ }
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String descriptorData =
+ "\n\024ScannerMessage.proto\0223org.apache.hadoo" +
+ "p.hbase.stargate.protobuf.generated\"o\n\007S" +
+ "canner\022\020\n\010startRow\030\001 \001(\014\022\016\n\006endRow\030\002 \001(\014" +
+ "\022\017\n\007columns\030\003 \003(\014\022\r\n\005batch\030\004 \001(\005\022\021\n\tstar" +
+ "tTime\030\005 \001(\003\022\017\n\007endTime\030\006 \001(\003";
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_descriptor,
+ new java.lang.String[] { "StartRow", "EndRow", "Columns", "Batch", "StartTime", "EndTime", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/StorageClusterStatusMessage.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/StorageClusterStatusMessage.java
new file mode 100644
index 00000000000..d6dcea1aa81
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/StorageClusterStatusMessage.java
@@ -0,0 +1,1009 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class StorageClusterStatusMessage {
+ private StorageClusterStatusMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class StorageClusterStatus extends
+ com.google.protobuf.GeneratedMessage {
+ // Use StorageClusterStatus.newBuilder() to construct.
+ private StorageClusterStatus() {}
+
+ private static final StorageClusterStatus defaultInstance = new StorageClusterStatus();
+ public static StorageClusterStatus getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public StorageClusterStatus getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_descriptor;
+ }
+
+ @Override
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_fieldAccessorTable;
+ }
+
+ public static final class Node extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Node.newBuilder() to construct.
+ private Node() {}
+
+ private static final Node defaultInstance = new Node();
+ public static Node getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Node getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_descriptor;
+ }
+
+ @Override
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable;
+ }
+
+ // required string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private boolean hasName;
+ private java.lang.String name_ = "";
+ public boolean hasName() { return hasName; }
+ public java.lang.String getName() { return name_; }
+
+ // optional int64 startCode = 4;
+ public static final int STARTCODE_FIELD_NUMBER = 4;
+ private boolean hasStartCode;
+ private long startCode_ = 0L;
+ public boolean hasStartCode() { return hasStartCode; }
+ public long getStartCode() { return startCode_; }
+
+ // optional int32 requests = 2;
+ public static final int REQUESTS_FIELD_NUMBER = 2;
+ private boolean hasRequests;
+ private int requests_ = 0;
+ public boolean hasRequests() { return hasRequests; }
+ public int getRequests() { return requests_; }
+
+ // repeated bytes regions = 3;
+ public static final int REGIONS_FIELD_NUMBER = 3;
+ private java.util.List regions_ =
+ java.util.Collections.emptyList();
+ public java.util.List getRegionsList() {
+ return regions_;
+ }
+ public int getRegionsCount() { return regions_.size(); }
+ public com.google.protobuf.ByteString getRegions(int index) {
+ return regions_.get(index);
+ }
+
+ @Override
+ public final boolean isInitialized() {
+ if (!hasName) return false;
+ return true;
+ }
+
+ @Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (hasName()) {
+ output.writeString(1, getName());
+ }
+ if (hasRequests()) {
+ output.writeInt32(2, getRequests());
+ }
+ for (com.google.protobuf.ByteString element : getRegionsList()) {
+ output.writeBytes(3, element);
+ }
+ if (hasStartCode()) {
+ output.writeInt64(4, getStartCode());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ @Override
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasName()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(1, getName());
+ }
+ if (hasRequests()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(2, getRequests());
+ }
+ {
+ int dataSize = 0;
+ for (com.google.protobuf.ByteString element : getRegionsList()) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeBytesSizeNoTag(element);
+ }
+ size += dataSize;
+ size += 1 * getRegionsList().size();
+ }
+ if (hasStartCode()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(4, getStartCode());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return new Builder(); }
+ public Builder newBuilderForType() { return new Builder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node prototype) {
+ return new Builder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.newBuilder()
+ private Builder() {}
+
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node result = new org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node();
+
+ @Override
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node internalGetResult() {
+ return result;
+ }
+
+ @Override
+ public Builder clear() {
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node();
+ return this;
+ }
+
+ @Override
+ public Builder clone() {
+ return new Builder().mergeFrom(result);
+ }
+
+ @Override
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node build() {
+ if (result != null && !isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder."); }
+ if (result.regions_ != java.util.Collections.EMPTY_LIST) {
+ result.regions_ =
+ java.util.Collections.unmodifiableList(result.regions_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ @Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (other.hasStartCode()) {
+ setStartCode(other.getStartCode());
+ }
+ if (other.hasRequests()) {
+ setRequests(other.getRequests());
+ }
+ if (!other.regions_.isEmpty()) {
+ if (result.regions_.isEmpty()) {
+ result.regions_ = new java.util.ArrayList();
+ }
+ result.regions_.addAll(other.regions_);
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return mergeFrom(input,
+ com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setName(input.readString());
+ break;
+ }
+ case 16: {
+ setRequests(input.readInt32());
+ break;
+ }
+ case 26: {
+ addRegions(input.readBytes());
+ break;
+ }
+ case 32: {
+ setStartCode(input.readInt64());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // required string name = 1;
+ public boolean hasName() {
+ return result.hasName();
+ }
+ public java.lang.String getName() {
+ return result.getName();
+ }
+ public Builder setName(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasName = true;
+ result.name_ = value;
+ return this;
+ }
+ public Builder clearName() {
+ result.hasName = false;
+ result.name_ = "";
+ return this;
+ }
+
+ // optional int64 startCode = 4;
+ public boolean hasStartCode() {
+ return result.hasStartCode();
+ }
+ public long getStartCode() {
+ return result.getStartCode();
+ }
+ public Builder setStartCode(long value) {
+ result.hasStartCode = true;
+ result.startCode_ = value;
+ return this;
+ }
+ public Builder clearStartCode() {
+ result.hasStartCode = false;
+ result.startCode_ = 0L;
+ return this;
+ }
+
+ // optional int32 requests = 2;
+ public boolean hasRequests() {
+ return result.hasRequests();
+ }
+ public int getRequests() {
+ return result.getRequests();
+ }
+ public Builder setRequests(int value) {
+ result.hasRequests = true;
+ result.requests_ = value;
+ return this;
+ }
+ public Builder clearRequests() {
+ result.hasRequests = false;
+ result.requests_ = 0;
+ return this;
+ }
+
+ // repeated bytes regions = 3;
+ public java.util.List getRegionsList() {
+ return java.util.Collections.unmodifiableList(result.regions_);
+ }
+ public int getRegionsCount() {
+ return result.getRegionsCount();
+ }
+ public com.google.protobuf.ByteString getRegions(int index) {
+ return result.getRegions(index);
+ }
+ public Builder setRegions(int index, com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.regions_.set(index, value);
+ return this;
+ }
+ public Builder addRegions(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.regions_.isEmpty()) {
+ result.regions_ = new java.util.ArrayList();
+ }
+ result.regions_.add(value);
+ return this;
+ }
+ public Builder addAllRegions(
+ java.lang.Iterable extends com.google.protobuf.ByteString> values) {
+ if (result.regions_.isEmpty()) {
+ result.regions_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.regions_);
+ return this;
+ }
+ public Builder clearRegions() {
+ result.regions_ = java.util.Collections.emptyList();
+ return this;
+ }
+ }
+
+ static {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.getDescriptor();
+ }
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
+ public static final int LIVENODES_FIELD_NUMBER = 1;
+ private java.util.List liveNodes_ =
+ java.util.Collections.emptyList();
+ public java.util.List getLiveNodesList() {
+ return liveNodes_;
+ }
+ public int getLiveNodesCount() { return liveNodes_.size(); }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getLiveNodes(int index) {
+ return liveNodes_.get(index);
+ }
+
+ // repeated string deadNodes = 2;
+ public static final int DEADNODES_FIELD_NUMBER = 2;
+ private java.util.List deadNodes_ =
+ java.util.Collections.emptyList();
+ public java.util.List getDeadNodesList() {
+ return deadNodes_;
+ }
+ public int getDeadNodesCount() { return deadNodes_.size(); }
+ public java.lang.String getDeadNodes(int index) {
+ return deadNodes_.get(index);
+ }
+
+ // optional int32 regions = 3;
+ public static final int REGIONS_FIELD_NUMBER = 3;
+ private boolean hasRegions;
+ private int regions_ = 0;
+ public boolean hasRegions() { return hasRegions; }
+ public int getRegions() { return regions_; }
+
+ // optional int32 requests = 4;
+ public static final int REQUESTS_FIELD_NUMBER = 4;
+ private boolean hasRequests;
+ private int requests_ = 0;
+ public boolean hasRequests() { return hasRequests; }
+ public int getRequests() { return requests_; }
+
+ // optional double averageLoad = 5;
+ public static final int AVERAGELOAD_FIELD_NUMBER = 5;
+ private boolean hasAverageLoad;
+ private double averageLoad_ = 0D;
+ public boolean hasAverageLoad() { return hasAverageLoad; }
+ public double getAverageLoad() { return averageLoad_; }
+
+ @Override
+ public final boolean isInitialized() {
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node element : getLiveNodesList()) {
+ if (!element.isInitialized()) return false;
+ }
+ return true;
+ }
+
+ @Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node element : getLiveNodesList()) {
+ output.writeMessage(1, element);
+ }
+ for (java.lang.String element : getDeadNodesList()) {
+ output.writeString(2, element);
+ }
+ if (hasRegions()) {
+ output.writeInt32(3, getRegions());
+ }
+ if (hasRequests()) {
+ output.writeInt32(4, getRequests());
+ }
+ if (hasAverageLoad()) {
+ output.writeDouble(5, getAverageLoad());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ @Override
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node element : getLiveNodesList()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, element);
+ }
+ {
+ int dataSize = 0;
+ for (java.lang.String element : getDeadNodesList()) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeStringSizeNoTag(element);
+ }
+ size += dataSize;
+ size += 1 * getDeadNodesList().size();
+ }
+ if (hasRegions()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(3, getRegions());
+ }
+ if (hasRequests()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(4, getRequests());
+ }
+ if (hasAverageLoad()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeDoubleSize(5, getAverageLoad());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return new Builder(); }
+ public Builder newBuilderForType() { return new Builder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus prototype) {
+ return new Builder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.newBuilder()
+ private Builder() {}
+
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus result = new org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus();
+
+ @Override
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus internalGetResult() {
+ return result;
+ }
+
+ @Override
+ public Builder clear() {
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus();
+ return this;
+ }
+
+ @Override
+ public Builder clone() {
+ return new Builder().mergeFrom(result);
+ }
+
+ @Override
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus build() {
+ if (result != null && !isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder."); }
+ if (result.liveNodes_ != java.util.Collections.EMPTY_LIST) {
+ result.liveNodes_ =
+ java.util.Collections.unmodifiableList(result.liveNodes_);
+ }
+ if (result.deadNodes_ != java.util.Collections.EMPTY_LIST) {
+ result.deadNodes_ =
+ java.util.Collections.unmodifiableList(result.deadNodes_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ @Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.getDefaultInstance()) return this;
+ if (!other.liveNodes_.isEmpty()) {
+ if (result.liveNodes_.isEmpty()) {
+ result.liveNodes_ = new java.util.ArrayList();
+ }
+ result.liveNodes_.addAll(other.liveNodes_);
+ }
+ if (!other.deadNodes_.isEmpty()) {
+ if (result.deadNodes_.isEmpty()) {
+ result.deadNodes_ = new java.util.ArrayList();
+ }
+ result.deadNodes_.addAll(other.deadNodes_);
+ }
+ if (other.hasRegions()) {
+ setRegions(other.getRegions());
+ }
+ if (other.hasRequests()) {
+ setRequests(other.getRequests());
+ }
+ if (other.hasAverageLoad()) {
+ setAverageLoad(other.getAverageLoad());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return mergeFrom(input,
+ com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addLiveNodes(subBuilder.buildPartial());
+ break;
+ }
+ case 18: {
+ addDeadNodes(input.readString());
+ break;
+ }
+ case 24: {
+ setRegions(input.readInt32());
+ break;
+ }
+ case 32: {
+ setRequests(input.readInt32());
+ break;
+ }
+ case 41: {
+ setAverageLoad(input.readDouble());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
+ public java.util.List getLiveNodesList() {
+ return java.util.Collections.unmodifiableList(result.liveNodes_);
+ }
+ public int getLiveNodesCount() {
+ return result.getLiveNodesCount();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getLiveNodes(int index) {
+ return result.getLiveNodes(index);
+ }
+ public Builder setLiveNodes(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.liveNodes_.set(index, value);
+ return this;
+ }
+ public Builder setLiveNodes(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder builderForValue) {
+ result.liveNodes_.set(index, builderForValue.build());
+ return this;
+ }
+ public Builder addLiveNodes(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.liveNodes_.isEmpty()) {
+ result.liveNodes_ = new java.util.ArrayList();
+ }
+ result.liveNodes_.add(value);
+ return this;
+ }
+ public Builder addLiveNodes(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder builderForValue) {
+ if (result.liveNodes_.isEmpty()) {
+ result.liveNodes_ = new java.util.ArrayList();
+ }
+ result.liveNodes_.add(builderForValue.build());
+ return this;
+ }
+ public Builder addAllLiveNodes(
+ java.lang.Iterable extends org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> values) {
+ if (result.liveNodes_.isEmpty()) {
+ result.liveNodes_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.liveNodes_);
+ return this;
+ }
+ public Builder clearLiveNodes() {
+ result.liveNodes_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // repeated string deadNodes = 2;
+ public java.util.List getDeadNodesList() {
+ return java.util.Collections.unmodifiableList(result.deadNodes_);
+ }
+ public int getDeadNodesCount() {
+ return result.getDeadNodesCount();
+ }
+ public java.lang.String getDeadNodes(int index) {
+ return result.getDeadNodes(index);
+ }
+ public Builder setDeadNodes(int index, java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.deadNodes_.set(index, value);
+ return this;
+ }
+ public Builder addDeadNodes(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.deadNodes_.isEmpty()) {
+ result.deadNodes_ = new java.util.ArrayList();
+ }
+ result.deadNodes_.add(value);
+ return this;
+ }
+ public Builder addAllDeadNodes(
+ java.lang.Iterable extends java.lang.String> values) {
+ if (result.deadNodes_.isEmpty()) {
+ result.deadNodes_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.deadNodes_);
+ return this;
+ }
+ public Builder clearDeadNodes() {
+ result.deadNodes_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // optional int32 regions = 3;
+ public boolean hasRegions() {
+ return result.hasRegions();
+ }
+ public int getRegions() {
+ return result.getRegions();
+ }
+ public Builder setRegions(int value) {
+ result.hasRegions = true;
+ result.regions_ = value;
+ return this;
+ }
+ public Builder clearRegions() {
+ result.hasRegions = false;
+ result.regions_ = 0;
+ return this;
+ }
+
+ // optional int32 requests = 4;
+ public boolean hasRequests() {
+ return result.hasRequests();
+ }
+ public int getRequests() {
+ return result.getRequests();
+ }
+ public Builder setRequests(int value) {
+ result.hasRequests = true;
+ result.requests_ = value;
+ return this;
+ }
+ public Builder clearRequests() {
+ result.hasRequests = false;
+ result.requests_ = 0;
+ return this;
+ }
+
+ // optional double averageLoad = 5;
+ public boolean hasAverageLoad() {
+ return result.hasAverageLoad();
+ }
+ public double getAverageLoad() {
+ return result.getAverageLoad();
+ }
+ public Builder setAverageLoad(double value) {
+ result.hasAverageLoad = true;
+ result.averageLoad_ = value;
+ return this;
+ }
+ public Builder clearAverageLoad() {
+ result.hasAverageLoad = false;
+ result.averageLoad_ = 0D;
+ return this;
+ }
+ }
+
+ static {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.getDescriptor();
+ }
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String descriptorData =
+ "\n!StorageClusterStatusMessage.proto\0223org" +
+ ".apache.hadoop.hbase.stargate.protobuf.g" +
+ "enerated\"\220\002\n\024StorageClusterStatus\022a\n\tliv" +
+ "eNodes\030\001 \003(\0132N.org.apache.hadoop.hbase.s" +
+ "targate.protobuf.generated.StorageCluste" +
+ "rStatus.Node\022\021\n\tdeadNodes\030\002 \003(\t\022\017\n\007regio" +
+ "ns\030\003 \001(\005\022\020\n\010requests\030\004 \001(\005\022\023\n\013averageLoa" +
+ "d\030\005 \001(\001\032J\n\004Node\022\014\n\004name\030\001 \002(\t\022\021\n\tstartCo" +
+ "de\030\004 \001(\003\022\020\n\010requests\030\002 \001(\005\022\017\n\007regions\030\003 " +
+ "\003(\014";
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_descriptor,
+ new java.lang.String[] { "LiveNodes", "DeadNodes", "Regions", "Requests", "AverageLoad", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder.class);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_descriptor =
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_descriptor.getNestedTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_descriptor,
+ new java.lang.String[] { "Name", "StartCode", "Requests", "Regions", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableInfoMessage.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableInfoMessage.java
new file mode 100644
index 00000000000..b48dc0643d6
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableInfoMessage.java
@@ -0,0 +1,864 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class TableInfoMessage {
+ private TableInfoMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class TableInfo extends
+ com.google.protobuf.GeneratedMessage {
+ // Use TableInfo.newBuilder() to construct.
+ private TableInfo() {}
+
+ private static final TableInfo defaultInstance = new TableInfo();
+ public static TableInfo getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TableInfo getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_descriptor;
+ }
+
+ @Override
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_fieldAccessorTable;
+ }
+
+ public static final class Region extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Region.newBuilder() to construct.
+ private Region() {}
+
+ private static final Region defaultInstance = new Region();
+ public static Region getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Region getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_descriptor;
+ }
+
+ @Override
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_fieldAccessorTable;
+ }
+
+ // required string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private boolean hasName;
+ private java.lang.String name_ = "";
+ public boolean hasName() { return hasName; }
+ public java.lang.String getName() { return name_; }
+
+ // optional bytes startKey = 2;
+ public static final int STARTKEY_FIELD_NUMBER = 2;
+ private boolean hasStartKey;
+ private com.google.protobuf.ByteString startKey_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasStartKey() { return hasStartKey; }
+ public com.google.protobuf.ByteString getStartKey() { return startKey_; }
+
+ // optional bytes endKey = 3;
+ public static final int ENDKEY_FIELD_NUMBER = 3;
+ private boolean hasEndKey;
+ private com.google.protobuf.ByteString endKey_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasEndKey() { return hasEndKey; }
+ public com.google.protobuf.ByteString getEndKey() { return endKey_; }
+
+ // optional int64 id = 4;
+ public static final int ID_FIELD_NUMBER = 4;
+ private boolean hasId;
+ private long id_ = 0L;
+ public boolean hasId() { return hasId; }
+ public long getId() { return id_; }
+
+ // optional string location = 5;
+ public static final int LOCATION_FIELD_NUMBER = 5;
+ private boolean hasLocation;
+ private java.lang.String location_ = "";
+ public boolean hasLocation() { return hasLocation; }
+ public java.lang.String getLocation() { return location_; }
+
+ @Override
+ public final boolean isInitialized() {
+ if (!hasName) return false;
+ return true;
+ }
+
+ @Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (hasName()) {
+ output.writeString(1, getName());
+ }
+ if (hasStartKey()) {
+ output.writeBytes(2, getStartKey());
+ }
+ if (hasEndKey()) {
+ output.writeBytes(3, getEndKey());
+ }
+ if (hasId()) {
+ output.writeInt64(4, getId());
+ }
+ if (hasLocation()) {
+ output.writeString(5, getLocation());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ @Override
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasName()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(1, getName());
+ }
+ if (hasStartKey()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getStartKey());
+ }
+ if (hasEndKey()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, getEndKey());
+ }
+ if (hasId()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(4, getId());
+ }
+ if (hasLocation()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(5, getLocation());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return new Builder(); }
+ public Builder newBuilderForType() { return new Builder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region prototype) {
+ return new Builder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.newBuilder()
+ private Builder() {}
+
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region();
+
+ @Override
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region internalGetResult() {
+ return result;
+ }
+
+ @Override
+ public Builder clear() {
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region();
+ return this;
+ }
+
+ @Override
+ public Builder clone() {
+ return new Builder().mergeFrom(result);
+ }
+
+ @Override
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region build() {
+ if (result != null && !isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder."); }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ @Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (other.hasStartKey()) {
+ setStartKey(other.getStartKey());
+ }
+ if (other.hasEndKey()) {
+ setEndKey(other.getEndKey());
+ }
+ if (other.hasId()) {
+ setId(other.getId());
+ }
+ if (other.hasLocation()) {
+ setLocation(other.getLocation());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return mergeFrom(input,
+ com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setName(input.readString());
+ break;
+ }
+ case 18: {
+ setStartKey(input.readBytes());
+ break;
+ }
+ case 26: {
+ setEndKey(input.readBytes());
+ break;
+ }
+ case 32: {
+ setId(input.readInt64());
+ break;
+ }
+ case 42: {
+ setLocation(input.readString());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // required string name = 1;
+ public boolean hasName() {
+ return result.hasName();
+ }
+ public java.lang.String getName() {
+ return result.getName();
+ }
+ public Builder setName(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasName = true;
+ result.name_ = value;
+ return this;
+ }
+ public Builder clearName() {
+ result.hasName = false;
+ result.name_ = "";
+ return this;
+ }
+
+ // optional bytes startKey = 2;
+ public boolean hasStartKey() {
+ return result.hasStartKey();
+ }
+ public com.google.protobuf.ByteString getStartKey() {
+ return result.getStartKey();
+ }
+ public Builder setStartKey(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasStartKey = true;
+ result.startKey_ = value;
+ return this;
+ }
+ public Builder clearStartKey() {
+ result.hasStartKey = false;
+ result.startKey_ = com.google.protobuf.ByteString.EMPTY;
+ return this;
+ }
+
+ // optional bytes endKey = 3;
+ public boolean hasEndKey() {
+ return result.hasEndKey();
+ }
+ public com.google.protobuf.ByteString getEndKey() {
+ return result.getEndKey();
+ }
+ public Builder setEndKey(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasEndKey = true;
+ result.endKey_ = value;
+ return this;
+ }
+ public Builder clearEndKey() {
+ result.hasEndKey = false;
+ result.endKey_ = com.google.protobuf.ByteString.EMPTY;
+ return this;
+ }
+
+ // optional int64 id = 4;
+ public boolean hasId() {
+ return result.hasId();
+ }
+ public long getId() {
+ return result.getId();
+ }
+ public Builder setId(long value) {
+ result.hasId = true;
+ result.id_ = value;
+ return this;
+ }
+ public Builder clearId() {
+ result.hasId = false;
+ result.id_ = 0L;
+ return this;
+ }
+
+ // optional string location = 5;
+ public boolean hasLocation() {
+ return result.hasLocation();
+ }
+ public java.lang.String getLocation() {
+ return result.getLocation();
+ }
+ public Builder setLocation(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasLocation = true;
+ result.location_ = value;
+ return this;
+ }
+ public Builder clearLocation() {
+ result.hasLocation = false;
+ result.location_ = "";
+ return this;
+ }
+ }
+
+ static {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.getDescriptor();
+ }
+ }
+
+ // required string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private boolean hasName;
+ private java.lang.String name_ = "";
+ public boolean hasName() { return hasName; }
+ public java.lang.String getName() { return name_; }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfo.Region regions = 2;
+ public static final int REGIONS_FIELD_NUMBER = 2;
+ private java.util.List regions_ =
+ java.util.Collections.emptyList();
+ public java.util.List getRegionsList() {
+ return regions_;
+ }
+ public int getRegionsCount() { return regions_.size(); }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region getRegions(int index) {
+ return regions_.get(index);
+ }
+
+ @Override
+ public final boolean isInitialized() {
+ if (!hasName) return false;
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region element : getRegionsList()) {
+ if (!element.isInitialized()) return false;
+ }
+ return true;
+ }
+
+ @Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (hasName()) {
+ output.writeString(1, getName());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region element : getRegionsList()) {
+ output.writeMessage(2, element);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ @Override
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasName()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(1, getName());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region element : getRegionsList()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, element);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return new Builder(); }
+ public Builder newBuilderForType() { return new Builder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo prototype) {
+ return new Builder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.newBuilder()
+ private Builder() {}
+
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo();
+
+ @Override
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo internalGetResult() {
+ return result;
+ }
+
+ @Override
+ public Builder clear() {
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo();
+ return this;
+ }
+
+ @Override
+ public Builder clone() {
+ return new Builder().mergeFrom(result);
+ }
+
+ @Override
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo build() {
+ if (result != null && !isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder."); }
+ if (result.regions_ != java.util.Collections.EMPTY_LIST) {
+ result.regions_ =
+ java.util.Collections.unmodifiableList(result.regions_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ @Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (!other.regions_.isEmpty()) {
+ if (result.regions_.isEmpty()) {
+ result.regions_ = new java.util.ArrayList();
+ }
+ result.regions_.addAll(other.regions_);
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return mergeFrom(input,
+ com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setName(input.readString());
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addRegions(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // required string name = 1;
+ public boolean hasName() {
+ return result.hasName();
+ }
+ public java.lang.String getName() {
+ return result.getName();
+ }
+ public Builder setName(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasName = true;
+ result.name_ = value;
+ return this;
+ }
+ public Builder clearName() {
+ result.hasName = false;
+ result.name_ = "";
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfo.Region regions = 2;
+ public java.util.List getRegionsList() {
+ return java.util.Collections.unmodifiableList(result.regions_);
+ }
+ public int getRegionsCount() {
+ return result.getRegionsCount();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region getRegions(int index) {
+ return result.getRegions(index);
+ }
+ public Builder setRegions(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.regions_.set(index, value);
+ return this;
+ }
+ public Builder setRegions(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder builderForValue) {
+ result.regions_.set(index, builderForValue.build());
+ return this;
+ }
+ public Builder addRegions(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.regions_.isEmpty()) {
+ result.regions_ = new java.util.ArrayList();
+ }
+ result.regions_.add(value);
+ return this;
+ }
+ public Builder addRegions(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder builderForValue) {
+ if (result.regions_.isEmpty()) {
+ result.regions_ = new java.util.ArrayList();
+ }
+ result.regions_.add(builderForValue.build());
+ return this;
+ }
+ public Builder addAllRegions(
+ java.lang.Iterable extends org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region> values) {
+ if (result.regions_.isEmpty()) {
+ result.regions_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.regions_);
+ return this;
+ }
+ public Builder clearRegions() {
+ result.regions_ = java.util.Collections.emptyList();
+ return this;
+ }
+ }
+
+ static {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.getDescriptor();
+ }
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String descriptorData =
+ "\n\026TableInfoMessage.proto\0223org.apache.had" +
+ "oop.hbase.stargate.protobuf.generated\"\311\001" +
+ "\n\tTableInfo\022\014\n\004name\030\001 \002(\t\022V\n\007regions\030\002 \003" +
+ "(\0132E.org.apache.hadoop.hbase.stargate.pr" +
+ "otobuf.generated.TableInfo.Region\032V\n\006Reg" +
+ "ion\022\014\n\004name\030\001 \002(\t\022\020\n\010startKey\030\002 \001(\014\022\016\n\006e" +
+ "ndKey\030\003 \001(\014\022\n\n\002id\030\004 \001(\003\022\020\n\010location\030\005 \001(" +
+ "\t";
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_descriptor,
+ new java.lang.String[] { "Name", "Regions", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Builder.class);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_descriptor =
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_descriptor.getNestedTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_descriptor,
+ new java.lang.String[] { "Name", "StartKey", "EndKey", "Id", "Location", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableListMessage.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableListMessage.java
new file mode 100644
index 00000000000..85a1690c6fa
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableListMessage.java
@@ -0,0 +1,355 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class TableListMessage {
+ private TableListMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class TableList extends
+ com.google.protobuf.GeneratedMessage {
+ // Use TableList.newBuilder() to construct.
+ private TableList() {}
+
+ private static final TableList defaultInstance = new TableList();
+ public static TableList getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TableList getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_descriptor;
+ }
+
+ @Override
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_fieldAccessorTable;
+ }
+
+ // repeated string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private java.util.List name_ =
+ java.util.Collections.emptyList();
+ public java.util.List getNameList() {
+ return name_;
+ }
+ public int getNameCount() { return name_.size(); }
+ public java.lang.String getName(int index) {
+ return name_.get(index);
+ }
+
+ @Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ for (java.lang.String element : getNameList()) {
+ output.writeString(1, element);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ @Override
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ {
+ int dataSize = 0;
+ for (java.lang.String element : getNameList()) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeStringSizeNoTag(element);
+ }
+ size += dataSize;
+ size += 1 * getNameList().size();
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return new Builder(); }
+ public Builder newBuilderForType() { return new Builder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList prototype) {
+ return new Builder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.newBuilder()
+ private Builder() {}
+
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList();
+
+ @Override
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList internalGetResult() {
+ return result;
+ }
+
+ @Override
+ public Builder clear() {
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList();
+ return this;
+ }
+
+ @Override
+ public Builder clone() {
+ return new Builder().mergeFrom(result);
+ }
+
+ @Override
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList build() {
+ if (result != null && !isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder."); }
+ if (result.name_ != java.util.Collections.EMPTY_LIST) {
+ result.name_ =
+ java.util.Collections.unmodifiableList(result.name_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ @Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.getDefaultInstance()) return this;
+ if (!other.name_.isEmpty()) {
+ if (result.name_.isEmpty()) {
+ result.name_ = new java.util.ArrayList();
+ }
+ result.name_.addAll(other.name_);
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return mergeFrom(input,
+ com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ addName(input.readString());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // repeated string name = 1;
+ public java.util.List getNameList() {
+ return java.util.Collections.unmodifiableList(result.name_);
+ }
+ public int getNameCount() {
+ return result.getNameCount();
+ }
+ public java.lang.String getName(int index) {
+ return result.getName(index);
+ }
+ public Builder setName(int index, java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.name_.set(index, value);
+ return this;
+ }
+ public Builder addName(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.name_.isEmpty()) {
+ result.name_ = new java.util.ArrayList();
+ }
+ result.name_.add(value);
+ return this;
+ }
+ public Builder addAllName(
+ java.lang.Iterable extends java.lang.String> values) {
+ if (result.name_.isEmpty()) {
+ result.name_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.name_);
+ return this;
+ }
+ public Builder clearName() {
+ result.name_ = java.util.Collections.emptyList();
+ return this;
+ }
+ }
+
+ static {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.getDescriptor();
+ }
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String descriptorData =
+ "\n\026TableListMessage.proto\0223org.apache.had" +
+ "oop.hbase.stargate.protobuf.generated\"\031\n" +
+ "\tTableList\022\014\n\004name\030\001 \003(\t";
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_descriptor,
+ new java.lang.String[] { "Name", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableSchemaMessage.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableSchemaMessage.java
new file mode 100644
index 00000000000..b14068c5294
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableSchemaMessage.java
@@ -0,0 +1,911 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class TableSchemaMessage {
+ private TableSchemaMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class TableSchema extends
+ com.google.protobuf.GeneratedMessage {
+ // Use TableSchema.newBuilder() to construct.
+ private TableSchema() {}
+
+ private static final TableSchema defaultInstance = new TableSchema();
+ public static TableSchema getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TableSchema getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor;
+ }
+
+ @Override
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_fieldAccessorTable;
+ }
+
+ public static final class Attribute extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Attribute.newBuilder() to construct.
+ private Attribute() {}
+
+ private static final Attribute defaultInstance = new Attribute();
+ public static Attribute getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Attribute getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_descriptor;
+ }
+
+ @Override
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_fieldAccessorTable;
+ }
+
+ // required string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private boolean hasName;
+ private java.lang.String name_ = "";
+ public boolean hasName() { return hasName; }
+ public java.lang.String getName() { return name_; }
+
+ // required string value = 2;
+ public static final int VALUE_FIELD_NUMBER = 2;
+ private boolean hasValue;
+ private java.lang.String value_ = "";
+ public boolean hasValue() { return hasValue; }
+ public java.lang.String getValue() { return value_; }
+
+ @Override
+ public final boolean isInitialized() {
+ if (!hasName) return false;
+ if (!hasValue) return false;
+ return true;
+ }
+
+ @Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (hasName()) {
+ output.writeString(1, getName());
+ }
+ if (hasValue()) {
+ output.writeString(2, getValue());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ @Override
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasName()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(1, getName());
+ }
+ if (hasValue()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(2, getValue());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return new Builder(); }
+ public Builder newBuilderForType() { return new Builder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute prototype) {
+ return new Builder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.newBuilder()
+ private Builder() {}
+
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute();
+
+ @Override
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute internalGetResult() {
+ return result;
+ }
+
+ @Override
+ public Builder clear() {
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute();
+ return this;
+ }
+
+ @Override
+ public Builder clone() {
+ return new Builder().mergeFrom(result);
+ }
+
+ @Override
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute build() {
+ if (result != null && !isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder."); }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ @Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (other.hasValue()) {
+ setValue(other.getValue());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return mergeFrom(input,
+ com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setName(input.readString());
+ break;
+ }
+ case 18: {
+ setValue(input.readString());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // required string name = 1;
+ public boolean hasName() {
+ return result.hasName();
+ }
+ public java.lang.String getName() {
+ return result.getName();
+ }
+ public Builder setName(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasName = true;
+ result.name_ = value;
+ return this;
+ }
+ public Builder clearName() {
+ result.hasName = false;
+ result.name_ = "";
+ return this;
+ }
+
+ // required string value = 2;
+ public boolean hasValue() {
+ return result.hasValue();
+ }
+ public java.lang.String getValue() {
+ return result.getValue();
+ }
+ public Builder setValue(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasValue = true;
+ result.value_ = value;
+ return this;
+ }
+ public Builder clearValue() {
+ result.hasValue = false;
+ result.value_ = "";
+ return this;
+ }
+ }
+
+ static {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.getDescriptor();
+ }
+ }
+
+ // optional string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private boolean hasName;
+ private java.lang.String name_ = "";
+ public boolean hasName() { return hasName; }
+ public java.lang.String getName() { return name_; }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchema.Attribute attrs = 2;
+ public static final int ATTRS_FIELD_NUMBER = 2;
+ private java.util.List attrs_ =
+ java.util.Collections.emptyList();
+ public java.util.List getAttrsList() {
+ return attrs_;
+ }
+ public int getAttrsCount() { return attrs_.size(); }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getAttrs(int index) {
+ return attrs_.get(index);
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema columns = 3;
+ public static final int COLUMNS_FIELD_NUMBER = 3;
+ private java.util.List columns_ =
+ java.util.Collections.emptyList();
+ public java.util.List getColumnsList() {
+ return columns_;
+ }
+ public int getColumnsCount() { return columns_.size(); }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema getColumns(int index) {
+ return columns_.get(index);
+ }
+
+ // optional bool inMemory = 4;
+ public static final int INMEMORY_FIELD_NUMBER = 4;
+ private boolean hasInMemory;
+ private boolean inMemory_ = false;
+ public boolean hasInMemory() { return hasInMemory; }
+ public boolean getInMemory() { return inMemory_; }
+
+ // optional bool readOnly = 5;
+ public static final int READONLY_FIELD_NUMBER = 5;
+ private boolean hasReadOnly;
+ private boolean readOnly_ = false;
+ public boolean hasReadOnly() { return hasReadOnly; }
+ public boolean getReadOnly() { return readOnly_; }
+
+ @Override
+ public final boolean isInitialized() {
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute element : getAttrsList()) {
+ if (!element.isInitialized()) return false;
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema element : getColumnsList()) {
+ if (!element.isInitialized()) return false;
+ }
+ return true;
+ }
+
+ @Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (hasName()) {
+ output.writeString(1, getName());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute element : getAttrsList()) {
+ output.writeMessage(2, element);
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema element : getColumnsList()) {
+ output.writeMessage(3, element);
+ }
+ if (hasInMemory()) {
+ output.writeBool(4, getInMemory());
+ }
+ if (hasReadOnly()) {
+ output.writeBool(5, getReadOnly());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ @Override
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasName()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(1, getName());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute element : getAttrsList()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, element);
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema element : getColumnsList()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, element);
+ }
+ if (hasInMemory()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(4, getInMemory());
+ }
+ if (hasReadOnly()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(5, getReadOnly());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return new Builder(); }
+ public Builder newBuilderForType() { return new Builder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema prototype) {
+ return new Builder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.newBuilder()
+ private Builder() {}
+
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema();
+
+ @Override
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema internalGetResult() {
+ return result;
+ }
+
+ @Override
+ public Builder clear() {
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema();
+ return this;
+ }
+
+ @Override
+ public Builder clone() {
+ return new Builder().mergeFrom(result);
+ }
+
+ @Override
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema build() {
+ if (result != null && !isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder."); }
+ if (result.attrs_ != java.util.Collections.EMPTY_LIST) {
+ result.attrs_ =
+ java.util.Collections.unmodifiableList(result.attrs_);
+ }
+ if (result.columns_ != java.util.Collections.EMPTY_LIST) {
+ result.columns_ =
+ java.util.Collections.unmodifiableList(result.columns_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ @Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (!other.attrs_.isEmpty()) {
+ if (result.attrs_.isEmpty()) {
+ result.attrs_ = new java.util.ArrayList();
+ }
+ result.attrs_.addAll(other.attrs_);
+ }
+ if (!other.columns_.isEmpty()) {
+ if (result.columns_.isEmpty()) {
+ result.columns_ = new java.util.ArrayList();
+ }
+ result.columns_.addAll(other.columns_);
+ }
+ if (other.hasInMemory()) {
+ setInMemory(other.getInMemory());
+ }
+ if (other.hasReadOnly()) {
+ setReadOnly(other.getReadOnly());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return mergeFrom(input,
+ com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setName(input.readString());
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addAttrs(subBuilder.buildPartial());
+ break;
+ }
+ case 26: {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addColumns(subBuilder.buildPartial());
+ break;
+ }
+ case 32: {
+ setInMemory(input.readBool());
+ break;
+ }
+ case 40: {
+ setReadOnly(input.readBool());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // optional string name = 1;
+ public boolean hasName() {
+ return result.hasName();
+ }
+ public java.lang.String getName() {
+ return result.getName();
+ }
+ public Builder setName(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasName = true;
+ result.name_ = value;
+ return this;
+ }
+ public Builder clearName() {
+ result.hasName = false;
+ result.name_ = "";
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchema.Attribute attrs = 2;
+ public java.util.List getAttrsList() {
+ return java.util.Collections.unmodifiableList(result.attrs_);
+ }
+ public int getAttrsCount() {
+ return result.getAttrsCount();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getAttrs(int index) {
+ return result.getAttrs(index);
+ }
+ public Builder setAttrs(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.attrs_.set(index, value);
+ return this;
+ }
+ public Builder setAttrs(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder builderForValue) {
+ result.attrs_.set(index, builderForValue.build());
+ return this;
+ }
+ public Builder addAttrs(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.attrs_.isEmpty()) {
+ result.attrs_ = new java.util.ArrayList();
+ }
+ result.attrs_.add(value);
+ return this;
+ }
+ public Builder addAttrs(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder builderForValue) {
+ if (result.attrs_.isEmpty()) {
+ result.attrs_ = new java.util.ArrayList();
+ }
+ result.attrs_.add(builderForValue.build());
+ return this;
+ }
+ public Builder addAllAttrs(
+ java.lang.Iterable extends org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> values) {
+ if (result.attrs_.isEmpty()) {
+ result.attrs_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.attrs_);
+ return this;
+ }
+ public Builder clearAttrs() {
+ result.attrs_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema columns = 3;
+ public java.util.List getColumnsList() {
+ return java.util.Collections.unmodifiableList(result.columns_);
+ }
+ public int getColumnsCount() {
+ return result.getColumnsCount();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema getColumns(int index) {
+ return result.getColumns(index);
+ }
+ public Builder setColumns(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.columns_.set(index, value);
+ return this;
+ }
+ public Builder setColumns(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder builderForValue) {
+ result.columns_.set(index, builderForValue.build());
+ return this;
+ }
+ public Builder addColumns(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.columns_.isEmpty()) {
+ result.columns_ = new java.util.ArrayList();
+ }
+ result.columns_.add(value);
+ return this;
+ }
+ public Builder addColumns(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder builderForValue) {
+ if (result.columns_.isEmpty()) {
+ result.columns_ = new java.util.ArrayList();
+ }
+ result.columns_.add(builderForValue.build());
+ return this;
+ }
+ public Builder addAllColumns(
+ java.lang.Iterable extends org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema> values) {
+ if (result.columns_.isEmpty()) {
+ result.columns_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.columns_);
+ return this;
+ }
+ public Builder clearColumns() {
+ result.columns_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // optional bool inMemory = 4;
+ public boolean hasInMemory() {
+ return result.hasInMemory();
+ }
+ public boolean getInMemory() {
+ return result.getInMemory();
+ }
+ public Builder setInMemory(boolean value) {
+ result.hasInMemory = true;
+ result.inMemory_ = value;
+ return this;
+ }
+ public Builder clearInMemory() {
+ result.hasInMemory = false;
+ result.inMemory_ = false;
+ return this;
+ }
+
+ // optional bool readOnly = 5;
+ public boolean hasReadOnly() {
+ return result.hasReadOnly();
+ }
+ public boolean getReadOnly() {
+ return result.getReadOnly();
+ }
+ public Builder setReadOnly(boolean value) {
+ result.hasReadOnly = true;
+ result.readOnly_ = value;
+ return this;
+ }
+ public Builder clearReadOnly() {
+ result.hasReadOnly = false;
+ result.readOnly_ = false;
+ return this;
+ }
+ }
+
+ static {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.getDescriptor();
+ }
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String descriptorData =
+ "\n\030TableSchemaMessage.proto\0223org.apache.h" +
+ "adoop.hbase.stargate.protobuf.generated\032" +
+ "\031ColumnSchemaMessage.proto\"\230\002\n\013TableSche" +
+ "ma\022\014\n\004name\030\001 \001(\t\022Y\n\005attrs\030\002 \003(\0132J.org.ap" +
+ "ache.hadoop.hbase.stargate.protobuf.gene" +
+ "rated.TableSchema.Attribute\022R\n\007columns\030\003" +
+ " \003(\0132A.org.apache.hadoop.hbase.stargate." +
+ "protobuf.generated.ColumnSchema\022\020\n\010inMem" +
+ "ory\030\004 \001(\010\022\020\n\010readOnly\030\005 \001(\010\032(\n\tAttribute" +
+ "\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t";
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor,
+ new java.lang.String[] { "Name", "Attrs", "Columns", "InMemory", "ReadOnly", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Builder.class);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_descriptor =
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor.getNestedTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_descriptor,
+ new java.lang.String[] { "Name", "Value", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.getDescriptor(),
+ }, assigner);
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/VersionMessage.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/VersionMessage.java
new file mode 100644
index 00000000000..8dcfcb9c4a3
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/protobuf/generated/VersionMessage.java
@@ -0,0 +1,489 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class VersionMessage {
+ private VersionMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class Version extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Version.newBuilder() to construct.
+ private Version() {}
+
+ private static final Version defaultInstance = new Version();
+ public static Version getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Version getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_descriptor;
+ }
+
+ @Override
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_fieldAccessorTable;
+ }
+
+ // optional string stargateVersion = 1;
+ public static final int STARGATEVERSION_FIELD_NUMBER = 1;
+ private boolean hasStargateVersion;
+ private java.lang.String stargateVersion_ = "";
+ public boolean hasStargateVersion() { return hasStargateVersion; }
+ public java.lang.String getStargateVersion() { return stargateVersion_; }
+
+ // optional string jvmVersion = 2;
+ public static final int JVMVERSION_FIELD_NUMBER = 2;
+ private boolean hasJvmVersion;
+ private java.lang.String jvmVersion_ = "";
+ public boolean hasJvmVersion() { return hasJvmVersion; }
+ public java.lang.String getJvmVersion() { return jvmVersion_; }
+
+ // optional string osVersion = 3;
+ public static final int OSVERSION_FIELD_NUMBER = 3;
+ private boolean hasOsVersion;
+ private java.lang.String osVersion_ = "";
+ public boolean hasOsVersion() { return hasOsVersion; }
+ public java.lang.String getOsVersion() { return osVersion_; }
+
+ // optional string serverVersion = 4;
+ public static final int SERVERVERSION_FIELD_NUMBER = 4;
+ private boolean hasServerVersion;
+ private java.lang.String serverVersion_ = "";
+ public boolean hasServerVersion() { return hasServerVersion; }
+ public java.lang.String getServerVersion() { return serverVersion_; }
+
+ // optional string jerseyVersion = 5;
+ public static final int JERSEYVERSION_FIELD_NUMBER = 5;
+ private boolean hasJerseyVersion;
+ private java.lang.String jerseyVersion_ = "";
+ public boolean hasJerseyVersion() { return hasJerseyVersion; }
+ public java.lang.String getJerseyVersion() { return jerseyVersion_; }
+
+ @Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (hasStargateVersion()) {
+ output.writeString(1, getStargateVersion());
+ }
+ if (hasJvmVersion()) {
+ output.writeString(2, getJvmVersion());
+ }
+ if (hasOsVersion()) {
+ output.writeString(3, getOsVersion());
+ }
+ if (hasServerVersion()) {
+ output.writeString(4, getServerVersion());
+ }
+ if (hasJerseyVersion()) {
+ output.writeString(5, getJerseyVersion());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ @Override
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasStargateVersion()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(1, getStargateVersion());
+ }
+ if (hasJvmVersion()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(2, getJvmVersion());
+ }
+ if (hasOsVersion()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(3, getOsVersion());
+ }
+ if (hasServerVersion()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(4, getServerVersion());
+ }
+ if (hasJerseyVersion()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(5, getJerseyVersion());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return new Builder(); }
+ public Builder newBuilderForType() { return new Builder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version prototype) {
+ return new Builder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.newBuilder()
+ private Builder() {}
+
+ org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version result = new org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version();
+
+ @Override
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version internalGetResult() {
+ return result;
+ }
+
+ @Override
+ public Builder clear() {
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version();
+ return this;
+ }
+
+ @Override
+ public Builder clone() {
+ return new Builder().mergeFrom(result);
+ }
+
+ @Override
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version build() {
+ if (result != null && !isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw new com.google.protobuf.UninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder."); }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ @Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.getDefaultInstance()) return this;
+ if (other.hasStargateVersion()) {
+ setStargateVersion(other.getStargateVersion());
+ }
+ if (other.hasJvmVersion()) {
+ setJvmVersion(other.getJvmVersion());
+ }
+ if (other.hasOsVersion()) {
+ setOsVersion(other.getOsVersion());
+ }
+ if (other.hasServerVersion()) {
+ setServerVersion(other.getServerVersion());
+ }
+ if (other.hasJerseyVersion()) {
+ setJerseyVersion(other.getJerseyVersion());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return mergeFrom(input,
+ com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
+ }
+
+ @Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistry extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setStargateVersion(input.readString());
+ break;
+ }
+ case 18: {
+ setJvmVersion(input.readString());
+ break;
+ }
+ case 26: {
+ setOsVersion(input.readString());
+ break;
+ }
+ case 34: {
+ setServerVersion(input.readString());
+ break;
+ }
+ case 42: {
+ setJerseyVersion(input.readString());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // optional string stargateVersion = 1;
+ public boolean hasStargateVersion() {
+ return result.hasStargateVersion();
+ }
+ public java.lang.String getStargateVersion() {
+ return result.getStargateVersion();
+ }
+ public Builder setStargateVersion(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasStargateVersion = true;
+ result.stargateVersion_ = value;
+ return this;
+ }
+ public Builder clearStargateVersion() {
+ result.hasStargateVersion = false;
+ result.stargateVersion_ = "";
+ return this;
+ }
+
+ // optional string jvmVersion = 2;
+ public boolean hasJvmVersion() {
+ return result.hasJvmVersion();
+ }
+ public java.lang.String getJvmVersion() {
+ return result.getJvmVersion();
+ }
+ public Builder setJvmVersion(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasJvmVersion = true;
+ result.jvmVersion_ = value;
+ return this;
+ }
+ public Builder clearJvmVersion() {
+ result.hasJvmVersion = false;
+ result.jvmVersion_ = "";
+ return this;
+ }
+
+ // optional string osVersion = 3;
+ public boolean hasOsVersion() {
+ return result.hasOsVersion();
+ }
+ public java.lang.String getOsVersion() {
+ return result.getOsVersion();
+ }
+ public Builder setOsVersion(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasOsVersion = true;
+ result.osVersion_ = value;
+ return this;
+ }
+ public Builder clearOsVersion() {
+ result.hasOsVersion = false;
+ result.osVersion_ = "";
+ return this;
+ }
+
+ // optional string serverVersion = 4;
+ public boolean hasServerVersion() {
+ return result.hasServerVersion();
+ }
+ public java.lang.String getServerVersion() {
+ return result.getServerVersion();
+ }
+ public Builder setServerVersion(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasServerVersion = true;
+ result.serverVersion_ = value;
+ return this;
+ }
+ public Builder clearServerVersion() {
+ result.hasServerVersion = false;
+ result.serverVersion_ = "";
+ return this;
+ }
+
+ // optional string jerseyVersion = 5;
+ public boolean hasJerseyVersion() {
+ return result.hasJerseyVersion();
+ }
+ public java.lang.String getJerseyVersion() {
+ return result.getJerseyVersion();
+ }
+ public Builder setJerseyVersion(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasJerseyVersion = true;
+ result.jerseyVersion_ = value;
+ return this;
+ }
+ public Builder clearJerseyVersion() {
+ result.hasJerseyVersion = false;
+ result.jerseyVersion_ = "";
+ return this;
+ }
+ }
+
+ static {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.getDescriptor();
+ }
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String descriptorData =
+ "\n\024VersionMessage.proto\0223org.apache.hadoo" +
+ "p.hbase.stargate.protobuf.generated\"w\n\007V" +
+ "ersion\022\027\n\017stargateVersion\030\001 \001(\t\022\022\n\njvmVe" +
+ "rsion\030\002 \001(\t\022\021\n\tosVersion\030\003 \001(\t\022\025\n\rserver" +
+ "Version\030\004 \001(\t\022\025\n\rjerseyVersion\030\005 \001(\t";
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_descriptor,
+ new java.lang.String[] { "StargateVersion", "JvmVersion", "OsVersion", "ServerVersion", "JerseyVersion", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/JAXBContextResolver.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/JAXBContextResolver.java
new file mode 100644
index 00000000000..c4946380012
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/JAXBContextResolver.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.provider;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import javax.ws.rs.ext.ContextResolver;
+import javax.ws.rs.ext.Provider;
+import javax.xml.bind.JAXBContext;
+
+import org.apache.hadoop.hbase.stargate.model.CellModel;
+import org.apache.hadoop.hbase.stargate.model.CellSetModel;
+import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
+import org.apache.hadoop.hbase.stargate.model.RowModel;
+import org.apache.hadoop.hbase.stargate.model.ScannerModel;
+import org.apache.hadoop.hbase.stargate.model.StorageClusterStatusModel;
+import org.apache.hadoop.hbase.stargate.model.StorageClusterVersionModel;
+import org.apache.hadoop.hbase.stargate.model.TableInfoModel;
+import org.apache.hadoop.hbase.stargate.model.TableListModel;
+import org.apache.hadoop.hbase.stargate.model.TableModel;
+import org.apache.hadoop.hbase.stargate.model.TableRegionModel;
+import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
+import org.apache.hadoop.hbase.stargate.model.VersionModel;
+
+import com.sun.jersey.api.json.JSONConfiguration;
+import com.sun.jersey.api.json.JSONJAXBContext;
+
+@Provider
+public class JAXBContextResolver implements ContextResolver {
+
+ private final JAXBContext context;
+
+ private final Set> types;
+
+ private final Class>[] cTypes = {
+ CellModel.class,
+ CellSetModel.class,
+ ColumnSchemaModel.class,
+ RowModel.class,
+ ScannerModel.class,
+ StorageClusterStatusModel.class,
+ StorageClusterVersionModel.class,
+ TableInfoModel.class,
+ TableListModel.class,
+ TableModel.class,
+ TableRegionModel.class,
+ TableSchemaModel.class,
+ VersionModel.class
+ };
+
+ @SuppressWarnings("unchecked")
+ public JAXBContextResolver() throws Exception {
+ this.types = new HashSet(Arrays.asList(cTypes));
+ this.context = new JSONJAXBContext(JSONConfiguration.natural().build(),
+ cTypes);
+ }
+
+ @Override
+ public JAXBContext getContext(Class> objectType) {
+ System.out.println("Executed getContext");
+ return (types.contains(objectType)) ? context : null;
+ }
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/consumer/ProtobufMessageBodyConsumer.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/consumer/ProtobufMessageBodyConsumer.java
new file mode 100644
index 00000000000..44be5813bad
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/consumer/ProtobufMessageBodyConsumer.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.provider.consumer;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyReader;
+import javax.ws.rs.ext.Provider;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.stargate.Constants;
+import org.apache.hadoop.hbase.stargate.model.IProtobufWrapper;
+
+@Provider
+@Consumes(Constants.MIMETYPE_PROTOBUF)
+public class ProtobufMessageBodyConsumer implements MessageBodyReader {
+ private static final Log LOG =
+ LogFactory.getLog(ProtobufMessageBodyConsumer.class);
+
+ @Override
+ public boolean isReadable(Class> type, Type genericType,
+ Annotation[] annotations, MediaType mediaType) {
+ return IProtobufWrapper.class.isAssignableFrom(type);
+ }
+
+ @Override
+ public IProtobufWrapper readFrom(Class type, Type genericType,
+ Annotation[] annotations, MediaType mediaType,
+ MultivaluedMap httpHeaders, InputStream inputStream)
+ throws IOException, WebApplicationException {
+ IProtobufWrapper obj = null;
+ try {
+ obj = type.newInstance();
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ byte[] buffer = new byte[4096];
+ int read;
+ do {
+ read = inputStream.read(buffer, 0, buffer.length);
+ if (read > 0) {
+ baos.write(buffer, 0, read);
+ }
+ } while (read > 0);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(getClass() + ": read " + baos.size() + " bytes from " +
+ inputStream);
+ }
+ obj = obj.getObjectFromMessage(baos.toByteArray());
+ } catch (InstantiationException e) {
+ throw new WebApplicationException(e);
+ } catch (IllegalAccessException e) {
+ throw new WebApplicationException(e);
+ }
+ return obj;
+ }
+
+}
diff --git a/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/producer/PlainTextMessageBodyProducer.java b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/producer/PlainTextMessageBodyProducer.java
new file mode 100644
index 00000000000..e0dd6478b7c
--- /dev/null
+++ b/src/contrib/stargate/src/java/org/apache/hadoop/hbase/stargate/provider/producer/PlainTextMessageBodyProducer.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.provider.producer;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyWriter;
+import javax.ws.rs.ext.Provider;
+
+import org.apache.hadoop.hbase.stargate.Constants;
+
+@Provider
+@Produces(Constants.MIMETYPE_TEXT)
+public class PlainTextMessageBodyProducer implements MessageBodyWriter