added stargate contrib
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@789136 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
184c5c2903
commit
0ff28bdd32
|
@ -70,13 +70,12 @@
|
|||
|
||||
<!-- the unit test classpath -->
|
||||
<path id="test.classpath">
|
||||
<path refid="classpath"/>
|
||||
<pathelement location="${build.test}" />
|
||||
<pathelement location="${hadoop.root}/build/test/classes"/>
|
||||
<pathelement location="${hadoop.root}/build/test"/>
|
||||
<pathelement location="${hadoop.root}/build"/>
|
||||
<pathelement location="${hadoop.root}/src/contrib/test"/>
|
||||
<pathelement location="${conf.dir}"/>
|
||||
<pathelement location="${hadoop.root}/build"/>
|
||||
<pathelement location="${build.examples}"/>
|
||||
<path refid="classpath"/>
|
||||
</path>
|
||||
|
||||
|
||||
|
|
|
@ -31,6 +31,12 @@
|
|||
<fileset dir="." includes="*/build.xml"/>
|
||||
</subant>
|
||||
</target>
|
||||
|
||||
<target name="compile-test">
|
||||
<subant target="compile-test">
|
||||
<fileset dir="." includes="*/build.xml"/>
|
||||
</subant>
|
||||
</target>
|
||||
|
||||
<!-- ====================================================== -->
|
||||
<!-- Package contrib jars. -->
|
||||
|
@ -46,13 +52,10 @@
|
|||
<!-- ====================================================== -->
|
||||
<target name="test">
|
||||
<subant target="test">
|
||||
<fileset dir="." includes="streaming/build.xml"/>
|
||||
<fileset dir="." includes="fairscheduler/build.xml"/>
|
||||
<fileset dir="." includes="capacity-scheduler/build.xml"/>
|
||||
<fileset dir="." includes="*/build.xml"/>
|
||||
</subant>
|
||||
</target>
|
||||
|
||||
|
||||
<!-- ====================================================== -->
|
||||
<!-- Clean all the contribs. -->
|
||||
<!-- ====================================================== -->
|
||||
|
|
|
@ -0,0 +1,120 @@
|
|||
<project name="stargate" default="archives" basedir=".">
|
||||
|
||||
<import file="../build-contrib.xml"/>
|
||||
|
||||
<property name="version" value="0.0.1"/>
|
||||
<property name="build.war.classes" location="${build.dir}/warClasses"/>
|
||||
|
||||
<property name="war.file" value="stargate-${version}.war"/>
|
||||
<property name="web.xml.file" value="${conf.dir}/web.xml"/>
|
||||
|
||||
<property name="jar.file" value="stargate-${version}.jar"/>
|
||||
<property name="test.jar.file" value="stargate-${version}-test.jar"/>
|
||||
|
||||
<property name="javac.debug" value="on"/>
|
||||
<property name="javac.source" value="1.6"/>
|
||||
|
||||
<target name="init">
|
||||
<tstamp/>
|
||||
<mkdir dir="${build.dir}"/>
|
||||
<mkdir dir="${build.classes}"/>
|
||||
<mkdir dir="${build.war.classes}"/>
|
||||
<mkdir dir="${build.test}"/>
|
||||
</target>
|
||||
|
||||
<target name="javadoc">
|
||||
<javadoc access="public" destdir="${docs.dir}" source="${javac.source}" sourcepath="${src.dir}" splitindex="true" use="true" version="true">
|
||||
<classpath refid="classpath"/>
|
||||
</javadoc>
|
||||
</target>
|
||||
|
||||
<target name="compile" depends="compile-jar, compile-war"/>
|
||||
|
||||
<target name="compile-jar" depends="init">
|
||||
<javac srcdir="${src.dir}" destdir="${build.classes}" debug="${javac.debug}" source="${javac.source}">
|
||||
<classpath refid="classpath"/>
|
||||
</javac>
|
||||
</target>
|
||||
|
||||
<target name="compile-war" depends="init">
|
||||
<javac srcdir="${src.dir}" destdir="${build.war.classes}" debug="${javac.debug}" source="${javac.source}">
|
||||
<classpath refid="classpath"/>
|
||||
<exclude name="**/Main.java"/>
|
||||
</javac>
|
||||
</target>
|
||||
|
||||
<target name="compile-test" depends="compile-war">
|
||||
<javac srcdir="${src.test}" includes="**/*.java" destdir="${build.test}" debug="${javac.debug}" source="1.6">
|
||||
<classpath refid="test.classpath"/>
|
||||
</javac>
|
||||
</target>
|
||||
|
||||
<target name="test" depends="compile-test" description="Build test jar and run tests">
|
||||
<delete dir="${test.log.dir}"/>
|
||||
<mkdir dir="${test.log.dir}"/>
|
||||
<junit printsummary="yes" showoutput="${test.output}" haltonfailure="no" fork="yes" forkmode="once" maxmemory="1000m" errorProperty="tests.failed" failureProperty="tests.failed" >
|
||||
<sysproperty key="test.build.data" value="${build.test}/data"/>
|
||||
<sysproperty key="build.test" value="${build.test}"/>
|
||||
<sysproperty key="user.dir" value="${build.test}/data"/>
|
||||
<sysproperty key="test.log.dir" value="${hadoop.log.dir}"/>
|
||||
<classpath refid="test.classpath"/>
|
||||
<formatter type="${test.junit.output.format}"/>
|
||||
<batchtest todir="${build.test}" unless="testcase">
|
||||
<fileset dir="${src.test}" includes="**/Test*.java"/>
|
||||
</batchtest>
|
||||
<batchtest todir="${build.test}" if="testcase">
|
||||
<fileset dir="${src.test}" includes="**/${testcase}.java"/>
|
||||
</batchtest>
|
||||
</junit>
|
||||
<fail if="tests.failed">
|
||||
Tests failed!
|
||||
</fail>
|
||||
</target>
|
||||
|
||||
<target name="war" depends="compile-war">
|
||||
<copy todir="${build.dir}/lib" overwrite="true">
|
||||
<fileset dir="${lib.dir}"/>
|
||||
<mapper type="flatten"/>
|
||||
</copy>
|
||||
<copy todir="${build.war.classes}" overwrite="true">
|
||||
<fileset dir="${conf.dir}">
|
||||
<include name="zoo.cfg"/>
|
||||
</fileset>
|
||||
</copy>
|
||||
|
||||
<war destfile="${build.dir}/${war.file}" webxml="${web.xml.file}">
|
||||
<lib dir="${build.dir}/lib"/>
|
||||
<classes dir="${build.war.classes}"/>
|
||||
</war>
|
||||
</target>
|
||||
|
||||
<target name="jar" depends="compile-jar">
|
||||
<jar jarfile="${build.dir}/${jar.file}" basedir="${build.classes}"/>
|
||||
</target>
|
||||
|
||||
<target name="createDist" depends="jar, war">
|
||||
<mkdir dir="${dist.dir}"/>
|
||||
<mkdir dir="${dist.dir}/webapps"/>
|
||||
<mkdir dir="${dist.dir}/logs"/>
|
||||
<copy todir="${dist.dir}/lib" overwrite="true">
|
||||
<fileset dir="${lib.dir}/jetty"/>
|
||||
<fileset dir="${lib.dir}/general"/>
|
||||
<mapper type="flatten"/>
|
||||
</copy>
|
||||
<copy todir="${dist.dir}/bin" overwrite="true">
|
||||
<fileset dir="${basedir}/bin"/>
|
||||
<mapper type="flatten"/>
|
||||
</copy>
|
||||
<chmod perm="ugo+x" type="file">
|
||||
<fileset dir="${dist.dir}/bin"/>
|
||||
</chmod>
|
||||
<copy todir="${dist.dir}" overwrite="true" file="${build.dir}/${jar.file}"/>
|
||||
<copy todir="${dist.dir}/webapps" overwrite="true" file="${build.dir}/${war.file}"/>
|
||||
</target>
|
||||
|
||||
<target name="clean">
|
||||
<delete dir="build"/>
|
||||
<delete dir="dist"/>
|
||||
</target>
|
||||
|
||||
</project>
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
public interface Constants {
|
||||
public static final String MIMETYPE_TEXT = "text/plain";
|
||||
public static final String MIMETYPE_XML = "text/xml";
|
||||
public static final String MIMETYPE_BINARY = "application/octet-stream";
|
||||
public static final String MIMETYPE_PROTOBUF = "application/x-protobuf";
|
||||
public static final String MIMETYPE_JSON = "application/json";
|
||||
public static final String MIMETYPE_JAVASCRIPT = "application/x-javascript";
|
||||
|
||||
public static final String PATH_STATUS_CLUSTER = "/status/cluster";
|
||||
public static final String PATH_VERSION = "/version";
|
||||
public static final String PATH_VERSION_CLUSTER = "/version/cluster";
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.commons.cli.CommandLineParser;
|
||||
import org.apache.commons.cli.Options;
|
||||
import org.apache.commons.cli.PosixParser;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.mortbay.jetty.Connector;
|
||||
import org.mortbay.jetty.Handler;
|
||||
import org.mortbay.jetty.NCSARequestLog;
|
||||
import org.mortbay.jetty.Server;
|
||||
import org.mortbay.jetty.deployer.WebAppDeployer;
|
||||
import org.mortbay.jetty.handler.ContextHandlerCollection;
|
||||
import org.mortbay.jetty.handler.DefaultHandler;
|
||||
import org.mortbay.jetty.handler.HandlerCollection;
|
||||
import org.mortbay.jetty.handler.RequestLogHandler;
|
||||
import org.mortbay.jetty.nio.SelectChannelConnector;
|
||||
import org.mortbay.thread.QueuedThreadPool;
|
||||
|
||||
public class Main {
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
// process command line
|
||||
Options options = new Options();
|
||||
options.addOption("p", "port", true, "service port");
|
||||
CommandLineParser parser = new PosixParser();
|
||||
CommandLine cmd = parser.parse(options, args);
|
||||
int port = 8080;
|
||||
if (cmd.hasOption("p")) {
|
||||
port = Integer.valueOf(cmd.getOptionValue("p"));
|
||||
}
|
||||
|
||||
HBaseConfiguration conf = new HBaseConfiguration();
|
||||
if (cmd.hasOption("m")) {
|
||||
conf.set("hbase.master", cmd.getOptionValue("m"));
|
||||
}
|
||||
|
||||
/*
|
||||
* RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean(); if (runtime
|
||||
* != null) { LOG.info("vmName=" + runtime.getVmName() + ", vmVendor=" +
|
||||
* runtime.getVmVendor() + ", vmVersion=" + runtime.getVmVersion());
|
||||
* LOG.info("vmInputArguments=" + runtime.getInputArguments()); }
|
||||
*/
|
||||
/*
|
||||
* poached from:
|
||||
* http://jetty.mortbay.org/xref/org/mortbay/jetty/example/LikeJettyXml.html
|
||||
*/
|
||||
String jetty_home = ".";
|
||||
Server server = new Server();
|
||||
|
||||
QueuedThreadPool threadPool = new QueuedThreadPool();
|
||||
threadPool.setMaxThreads(100);
|
||||
server.setThreadPool(threadPool);
|
||||
|
||||
Connector connector = new SelectChannelConnector();
|
||||
connector.setPort(port);
|
||||
connector.setMaxIdleTime(30000);
|
||||
server.setConnectors(new Connector[] { connector });
|
||||
|
||||
HandlerCollection handlers = new HandlerCollection();
|
||||
ContextHandlerCollection contexts = new ContextHandlerCollection();
|
||||
RequestLogHandler requestLogHandler = new RequestLogHandler();
|
||||
handlers.setHandlers(new Handler[] { contexts, new DefaultHandler(),
|
||||
requestLogHandler });
|
||||
server.setHandler(handlers);
|
||||
|
||||
WebAppDeployer deployer1 = new WebAppDeployer();
|
||||
deployer1.setContexts(contexts);
|
||||
deployer1.setWebAppDir(jetty_home + "/webapps");
|
||||
deployer1.setParentLoaderPriority(false);
|
||||
deployer1.setExtract(true);
|
||||
deployer1.setAllowDuplicates(false);
|
||||
// deployer1.setDefaultsDescriptor(jetty_home + "/etc/webdefault.xml");
|
||||
server.addLifeCycle(deployer1);
|
||||
|
||||
NCSARequestLog requestLog = new NCSARequestLog(jetty_home
|
||||
+ "/logs/jetty-yyyy_mm_dd.log");
|
||||
requestLog.setExtended(false);
|
||||
requestLogHandler.setRequestLog(requestLog);
|
||||
|
||||
server.setStopAtShutdown(true);
|
||||
server.setSendServerVersion(true);
|
||||
server.start();
|
||||
server.join();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,126 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HTablePool;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import com.sun.jersey.server.impl.container.servlet.ServletAdaptor;
|
||||
import com.sun.jersey.spi.container.servlet.ServletContainer;
|
||||
|
||||
public class RESTServlet extends ServletAdaptor {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
public static final int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours
|
||||
public static final String VERSION_STRING = "0.0.1";
|
||||
|
||||
private static RESTServlet instance;
|
||||
|
||||
private final HBaseConfiguration conf;
|
||||
protected Map<String,Integer> maxAgeMap =
|
||||
Collections.synchronizedMap(new HashMap<String,Integer>());
|
||||
|
||||
public synchronized static RESTServlet getInstance() throws IOException {
|
||||
if (instance == null) {
|
||||
instance = new RESTServlet();
|
||||
}
|
||||
return instance;
|
||||
}
|
||||
|
||||
public RESTServlet() throws IOException {
|
||||
this.conf = new HBaseConfiguration();
|
||||
}
|
||||
|
||||
|
||||
protected HTablePool getTablePool(String name) {
|
||||
return HTablePool.getPool(conf, Bytes.toBytes(name));
|
||||
}
|
||||
|
||||
protected HBaseConfiguration getConfiguration() {
|
||||
return conf;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param tableName
|
||||
* @return the maximum cache age suitable for use with this table, in
|
||||
* seconds
|
||||
* @throws IOException
|
||||
*/
|
||||
public int getMaxAge(String tableName) throws IOException {
|
||||
Integer i = maxAgeMap.get(tableName);
|
||||
if (i != null) {
|
||||
return i.intValue();
|
||||
}
|
||||
HTablePool pool = this.getTablePool(tableName);
|
||||
HTable table = pool.get();
|
||||
if (table != null) {
|
||||
int maxAge = DEFAULT_MAX_AGE;
|
||||
for (HColumnDescriptor family:
|
||||
table.getTableDescriptor().getFamilies()) {
|
||||
int ttl = family.getTimeToLive();
|
||||
if (ttl < 0) {
|
||||
continue;
|
||||
}
|
||||
if (ttl < maxAge) {
|
||||
maxAge = ttl;
|
||||
}
|
||||
}
|
||||
maxAgeMap.put(tableName, maxAge);
|
||||
return maxAge;
|
||||
}
|
||||
return DEFAULT_MAX_AGE;
|
||||
}
|
||||
|
||||
public void invalidateMaxAge(String tableName) {
|
||||
maxAgeMap.remove(tableName);
|
||||
}
|
||||
|
||||
public static final String getVersion() {
|
||||
StringBuilder version = new StringBuilder();
|
||||
version.append("Stargate ");
|
||||
version.append(VERSION_STRING);
|
||||
version.append(" [JVM: ");
|
||||
version.append(System.getProperty("java.vm.vendor"));
|
||||
version.append(' ');
|
||||
version.append(System.getProperty("java.version"));
|
||||
version.append('-');
|
||||
version.append(System.getProperty("java.vm.version"));
|
||||
version.append("] [OS: ");
|
||||
version.append(System.getProperty("os.name"));
|
||||
version.append(' ');
|
||||
version.append(System.getProperty("os.version"));
|
||||
version.append(' ');
|
||||
version.append(System.getProperty("os.arch"));
|
||||
version.append("] [Jersey: ");
|
||||
version.append(ServletContainer.class.getPackage()
|
||||
.getImplementationVersion());
|
||||
version.append(']');
|
||||
return version.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Map;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.CacheControl;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.core.Response.ResponseBuilder;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HTablePool;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableInfoModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableRegionModel;
|
||||
|
||||
public class RegionsResource implements Constants {
|
||||
private static final Log LOG = LogFactory.getLog(RegionsResource.class);
|
||||
|
||||
private String table;
|
||||
private CacheControl cacheControl;
|
||||
|
||||
public RegionsResource(String table) {
|
||||
this.table = table;
|
||||
cacheControl = new CacheControl();
|
||||
cacheControl.setNoCache(true);
|
||||
cacheControl.setNoTransform(false);
|
||||
}
|
||||
|
||||
private Map<HRegionInfo,HServerAddress> getTableRegions()
|
||||
throws IOException {
|
||||
HTablePool pool = RESTServlet.getInstance().getTablePool(this.table);
|
||||
HTable table = pool.get();
|
||||
try {
|
||||
return table.getRegionsInfo();
|
||||
} finally {
|
||||
pool.put(table);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
|
||||
MIMETYPE_PROTOBUF})
|
||||
public Response get(@Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("GET " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
try {
|
||||
TableInfoModel model = new TableInfoModel(table);
|
||||
Map<HRegionInfo,HServerAddress> regions = getTableRegions();
|
||||
for (Map.Entry<HRegionInfo,HServerAddress> e: regions.entrySet()) {
|
||||
HRegionInfo hri = e.getKey();
|
||||
HServerAddress addr = e.getValue();
|
||||
InetSocketAddress sa = addr.getInetSocketAddress();
|
||||
model.add(
|
||||
new TableRegionModel(table, hri.getRegionId(), hri.getStartKey(),
|
||||
hri.getEndKey(),
|
||||
sa.getHostName() + ":" + Integer.valueOf(sa.getPort())));
|
||||
}
|
||||
ResponseBuilder response = Response.ok(model);
|
||||
response.cacheControl(cacheControl);
|
||||
return response.build();
|
||||
} catch (TableNotFoundException e) {
|
||||
throw new WebApplicationException(Response.Status.NOT_FOUND);
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import com.sun.jersey.api.core.PackagesResourceConfig;
|
||||
|
||||
public class ResourceConfig extends PackagesResourceConfig {
|
||||
public ResourceConfig() {
|
||||
super("org.apache.hadoop.hbase.stargate");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
||||
public abstract class ResultGenerator implements Iterator<KeyValue> {
|
||||
public static ResultGenerator fromRowSpec(String table, RowSpec rowspec)
|
||||
throws IOException {
|
||||
if (rowspec.isSingleRow()) {
|
||||
return new RowResultGenerator(table, rowspec);
|
||||
} else {
|
||||
return new ScannerResultGenerator(table, rowspec);
|
||||
}
|
||||
}
|
||||
|
||||
public abstract void close();
|
||||
}
|
|
@ -0,0 +1,342 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import javax.ws.rs.DELETE;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.POST;
|
||||
import javax.ws.rs.PUT;
|
||||
import javax.ws.rs.Consumes;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.CacheControl;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.HttpHeaders;
|
||||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.core.Response.ResponseBuilder;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HTablePool;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.stargate.model.CellModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.CellSetModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.RowModel;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
public class RowResource implements Constants {
|
||||
private static final Log LOG = LogFactory.getLog(RowResource.class);
|
||||
|
||||
private String table;
|
||||
private RowSpec rowspec;
|
||||
private CacheControl cacheControl;
|
||||
|
||||
public RowResource(String table, String rowspec, String versions)
|
||||
throws IOException {
|
||||
this.table = table;
|
||||
this.rowspec = new RowSpec(rowspec);
|
||||
if (versions != null) {
|
||||
this.rowspec.setMaxVersions(Integer.valueOf(versions));
|
||||
}
|
||||
cacheControl = new CacheControl();
|
||||
cacheControl.setMaxAge(RESTServlet.getInstance().getMaxAge(table));
|
||||
cacheControl.setNoTransform(false);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
|
||||
MIMETYPE_PROTOBUF})
|
||||
public Response get(@Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("GET " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
try {
|
||||
ResultGenerator generator = ResultGenerator.fromRowSpec(table, rowspec);
|
||||
if (!generator.hasNext()) {
|
||||
throw new WebApplicationException(Response.Status.NOT_FOUND);
|
||||
}
|
||||
CellSetModel model = new CellSetModel();
|
||||
KeyValue value = generator.next();
|
||||
byte[] rowKey = value.getRow();
|
||||
RowModel rowModel = new RowModel(rowKey);
|
||||
do {
|
||||
if (!Bytes.equals(value.getRow(), rowKey)) {
|
||||
model.addRow(rowModel);
|
||||
rowKey = value.getRow();
|
||||
rowModel = new RowModel(rowKey);
|
||||
}
|
||||
rowModel.addCell(
|
||||
new CellModel(value.getColumn(), value.getTimestamp(),
|
||||
value.getValue()));
|
||||
value = generator.next();
|
||||
} while (value != null);
|
||||
model.addRow(rowModel);
|
||||
ResponseBuilder response = Response.ok(model);
|
||||
response.cacheControl(cacheControl);
|
||||
return response.build();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces(MIMETYPE_BINARY)
|
||||
public Response getBinary(@Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
|
||||
}
|
||||
// doesn't make sense to use a non specific coordinate as this can only
|
||||
// return a single cell
|
||||
if (!rowspec.hasColumns() || rowspec.getColumns().length > 1) {
|
||||
throw new WebApplicationException(Response.Status.BAD_REQUEST);
|
||||
}
|
||||
try {
|
||||
ResultGenerator generator = ResultGenerator.fromRowSpec(table, rowspec);
|
||||
if (!generator.hasNext()) {
|
||||
throw new WebApplicationException(Response.Status.NOT_FOUND);
|
||||
}
|
||||
KeyValue value = generator.next();
|
||||
ResponseBuilder response = Response.ok(value.getValue());
|
||||
response.cacheControl(cacheControl);
|
||||
response.header("X-Timestamp", value.getTimestamp());
|
||||
return response.build();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteRow() {
|
||||
HTablePool pool;
|
||||
try {
|
||||
pool = RESTServlet.getInstance().getTablePool(this.table);
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.INTERNAL_SERVER_ERROR);
|
||||
}
|
||||
HTable table = null;
|
||||
try {
|
||||
table = pool.get();
|
||||
table.delete(new Delete(rowspec.getRow()));
|
||||
table.flushCommits();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
} finally {
|
||||
if (table != null) {
|
||||
pool.put(table);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private Response update(CellSetModel model, boolean replace) {
|
||||
if (replace) {
|
||||
deleteRow();
|
||||
}
|
||||
HTablePool pool;
|
||||
try {
|
||||
pool = RESTServlet.getInstance().getTablePool(this.table);
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.INTERNAL_SERVER_ERROR);
|
||||
}
|
||||
HTable table = null;
|
||||
try {
|
||||
table = pool.get();
|
||||
for (RowModel row: model.getRows()) {
|
||||
Put put = new Put(row.getKey());
|
||||
for (CellModel cell: row.getCells()) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("update cell '" +
|
||||
Bytes.toStringBinary(cell.getColumn()) + "' @" +
|
||||
cell.getTimestamp() + " length " + cell.getValue().length);
|
||||
}
|
||||
byte [][] parts = KeyValue.parseColumn(cell.getColumn());
|
||||
if (cell.hasUserTimestamp()) {
|
||||
put.add(parts[0], parts[1], cell.getTimestamp(), cell.getValue());
|
||||
} else {
|
||||
put.add(parts[0], parts[1], cell.getValue());
|
||||
}
|
||||
}
|
||||
table.put(put);
|
||||
}
|
||||
table.flushCommits();
|
||||
ResponseBuilder response = Response.ok();
|
||||
return response.build();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
} finally {
|
||||
if (table != null) {
|
||||
pool.put(table);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private Response updateBinary(byte[] message, HttpHeaders headers,
|
||||
boolean replace) {
|
||||
if (replace) {
|
||||
deleteRow();
|
||||
}
|
||||
HTablePool pool;
|
||||
try {
|
||||
pool = RESTServlet.getInstance().getTablePool(this.table);
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.INTERNAL_SERVER_ERROR);
|
||||
}
|
||||
HTable table = null;
|
||||
try {
|
||||
byte[] row = rowspec.getRow();
|
||||
byte[][] columns = rowspec.getColumns();
|
||||
byte[] column = null;
|
||||
if (columns != null) {
|
||||
column = columns[0];
|
||||
}
|
||||
long timestamp = -1;
|
||||
List<String> vals = headers.getRequestHeader("X-Row");
|
||||
if (vals != null && !vals.isEmpty()) {
|
||||
row = Bytes.toBytes(vals.get(0));
|
||||
}
|
||||
vals = headers.getRequestHeader("X-Column");
|
||||
if (vals != null && !vals.isEmpty()) {
|
||||
column = Bytes.toBytes(vals.get(0));
|
||||
}
|
||||
vals = headers.getRequestHeader("X-Timestamp");
|
||||
if (vals != null && !vals.isEmpty()) {
|
||||
timestamp = Long.valueOf(vals.get(0));
|
||||
}
|
||||
if (column == null) {
|
||||
throw new WebApplicationException(Response.Status.BAD_REQUEST);
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("update cell '" + Bytes.toStringBinary(column) + "' @" +
|
||||
timestamp + " length " + message.length);
|
||||
}
|
||||
Put put = new Put(row);
|
||||
byte parts[][] = KeyValue.parseColumn(column);
|
||||
if (timestamp >= 0) {
|
||||
put.add(parts[0], parts[1], timestamp, message);
|
||||
} else {
|
||||
put.add(parts[0], parts[1], message);
|
||||
}
|
||||
table = pool.get();
|
||||
table.put(put);
|
||||
table.flushCommits();
|
||||
return Response.ok().build();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
} finally {
|
||||
if (table != null) {
|
||||
pool.put(table);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@PUT
|
||||
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
|
||||
MIMETYPE_PROTOBUF})
|
||||
public Response put(CellSetModel model, @Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("PUT " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
return update(model, true);
|
||||
}
|
||||
|
||||
@PUT
|
||||
@Consumes(MIMETYPE_BINARY)
|
||||
public Response putBinary(byte[] message, @Context UriInfo uriInfo,
|
||||
@Context HttpHeaders headers)
|
||||
{
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
|
||||
}
|
||||
return updateBinary(message, headers, true);
|
||||
}
|
||||
|
||||
@POST
|
||||
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
|
||||
MIMETYPE_PROTOBUF})
|
||||
public Response post(CellSetModel model, @Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("POST " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
return update(model, false);
|
||||
}
|
||||
|
||||
@POST
|
||||
@Consumes(MIMETYPE_BINARY)
|
||||
public Response postBinary(byte[] message, @Context UriInfo uriInfo,
|
||||
@Context HttpHeaders headers)
|
||||
{
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("POST " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
|
||||
}
|
||||
return updateBinary(message, headers, false);
|
||||
}
|
||||
|
||||
@DELETE
|
||||
public Response delete(@Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("DELETE " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
Delete delete = new Delete(rowspec.getRow());
|
||||
for (byte[] column: rowspec.getColumns()) {
|
||||
byte[][] split = KeyValue.parseColumn(column);
|
||||
if (rowspec.hasTimestamp()) {
|
||||
delete.deleteColumns(split[0], split[1], rowspec.getTimestamp());
|
||||
} else {
|
||||
delete.deleteColumns(split[0], split[1]);
|
||||
}
|
||||
}
|
||||
HTablePool pool;
|
||||
try {
|
||||
pool = RESTServlet.getInstance().getTablePool(this.table);
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.INTERNAL_SERVER_ERROR);
|
||||
}
|
||||
HTable table = null;
|
||||
try {
|
||||
table = pool.get();
|
||||
table.delete(delete);
|
||||
table.flushCommits();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
} finally {
|
||||
if (table != null) {
|
||||
pool.put(table);
|
||||
}
|
||||
}
|
||||
return Response.ok().build();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HTablePool;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
|
||||
public class RowResultGenerator extends ResultGenerator {
|
||||
private Iterator<KeyValue> valuesI;
|
||||
|
||||
public RowResultGenerator(String tableName, RowSpec rowspec)
|
||||
throws IllegalArgumentException, IOException {
|
||||
HTablePool pool = RESTServlet.getInstance().getTablePool(tableName);
|
||||
HTable table = pool.get();
|
||||
try {
|
||||
Get get = new Get(rowspec.getRow());
|
||||
if (rowspec.hasColumns()) {
|
||||
get.addColumns(rowspec.getColumns());
|
||||
} else {
|
||||
// rowspec does not explicitly specify columns, return them all
|
||||
for (HColumnDescriptor family:
|
||||
table.getTableDescriptor().getFamilies()) {
|
||||
get.addFamily(family.getName());
|
||||
}
|
||||
}
|
||||
get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
|
||||
get.setMaxVersions(rowspec.getMaxVersions());
|
||||
Result result = table.get(get);
|
||||
if (result != null && !result.isEmpty()) {
|
||||
valuesI = result.list().iterator();
|
||||
}
|
||||
} finally {
|
||||
pool.put(table);
|
||||
}
|
||||
}
|
||||
|
||||
public void close() {
|
||||
}
|
||||
|
||||
public boolean hasNext() {
|
||||
if (valuesI == null) {
|
||||
return false;
|
||||
}
|
||||
return valuesI.hasNext();
|
||||
}
|
||||
|
||||
public KeyValue next() {
|
||||
if (valuesI == null) {
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
return valuesI.next();
|
||||
} catch (NoSuchElementException e) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public void remove() {
|
||||
throw new UnsupportedOperationException("remove not supported");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,310 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
public class RowSpec {
|
||||
public static final long DEFAULT_START_TIMESTAMP = 0;
|
||||
public static final long DEFAULT_END_TIMESTAMP = Long.MAX_VALUE;
|
||||
|
||||
private byte[] row = HConstants.EMPTY_START_ROW;
|
||||
private byte[] endRow = null;
|
||||
private TreeSet<byte[]> columns =
|
||||
new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
|
||||
private long startTime = DEFAULT_START_TIMESTAMP;
|
||||
private long endTime = DEFAULT_END_TIMESTAMP;
|
||||
private int maxVersions = HColumnDescriptor.DEFAULT_VERSIONS;
|
||||
|
||||
public RowSpec(String path) throws IllegalArgumentException {
|
||||
int i = 0;
|
||||
while (path.charAt(i) == '/') {
|
||||
i++;
|
||||
}
|
||||
i = parseRowKeys(path, i);
|
||||
i = parseColumns(path, i);
|
||||
i = parseTimestamp(path, i);
|
||||
}
|
||||
|
||||
private int parseRowKeys(String path, int i)
|
||||
throws IllegalArgumentException {
|
||||
StringBuilder startRow = new StringBuilder();
|
||||
StringBuilder endRow = null;
|
||||
try {
|
||||
char c;
|
||||
boolean doEndRow = false;
|
||||
while (i < path.length() && (c = path.charAt(i)) != '/') {
|
||||
if (c == ',') {
|
||||
doEndRow = true;
|
||||
i++;
|
||||
break;
|
||||
}
|
||||
startRow.append(c);
|
||||
i++;
|
||||
}
|
||||
i++;
|
||||
this.row = Bytes.toBytes(startRow.toString());
|
||||
if (doEndRow) {
|
||||
endRow = new StringBuilder();
|
||||
while ((c = path.charAt(i)) != '/') {
|
||||
endRow.append(c);
|
||||
i++;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
} catch (IndexOutOfBoundsException e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
// HBase does not support wildcards on row keys so we will emulate a
|
||||
// suffix glob by synthesizing appropriate start and end row keys for
|
||||
// table scanning
|
||||
if (startRow.charAt(startRow.length() - 1) == '*') {
|
||||
if (endRow != null)
|
||||
throw new IllegalArgumentException("invalid path: start row "+
|
||||
"specified with wildcard");
|
||||
this.row = Bytes.toBytes(startRow.substring(0,
|
||||
startRow.lastIndexOf("*")));
|
||||
this.endRow = new byte[this.row.length + 1];
|
||||
System.arraycopy(this.row, 0, this.endRow, 0, this.row.length);
|
||||
this.endRow[this.row.length] = (byte)255;
|
||||
} else {
|
||||
this.row = Bytes.toBytes(startRow.toString());
|
||||
if (endRow != null) {
|
||||
this.endRow = Bytes.toBytes(endRow.toString());
|
||||
}
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
private int parseColumns(String path, int i)
|
||||
throws IllegalArgumentException {
|
||||
if (i >= path.length()) {
|
||||
return i;
|
||||
}
|
||||
try {
|
||||
char c;
|
||||
StringBuilder column = new StringBuilder();
|
||||
boolean hasColon = false;
|
||||
while (i < path.length() && (c = path.charAt(i)) != '/') {
|
||||
if (c == ',') {
|
||||
if (column.length() < 1) {
|
||||
throw new IllegalArgumentException("invalid path");
|
||||
}
|
||||
if (!hasColon) {
|
||||
column.append(':');
|
||||
}
|
||||
this.columns.add(Bytes.toBytes(column.toString()));
|
||||
column = new StringBuilder();
|
||||
hasColon = false;
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
if (c == ':') {
|
||||
hasColon = true;
|
||||
}
|
||||
column.append(c);
|
||||
i++;
|
||||
}
|
||||
i++;
|
||||
// trailing list entry
|
||||
if (column.length() > 1) {
|
||||
if (!hasColon) {
|
||||
column.append(':');
|
||||
}
|
||||
this.columns.add(Bytes.toBytes(column.toString()));
|
||||
}
|
||||
} catch (IndexOutOfBoundsException e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
private int parseTimestamp(String path, int i)
|
||||
throws IllegalArgumentException {
|
||||
if (i >= path.length()) {
|
||||
return i;
|
||||
}
|
||||
long time0 = 0, time1 = 0;
|
||||
try {
|
||||
char c = 0;
|
||||
StringBuilder stamp = new StringBuilder();
|
||||
while (i < path.length()) {
|
||||
c = path.charAt(i);
|
||||
if (c == '/' || c == ',') {
|
||||
break;
|
||||
}
|
||||
stamp.append(c);
|
||||
i++;
|
||||
}
|
||||
try {
|
||||
time0 = Long.valueOf(stamp.toString());
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
if (c == ',') {
|
||||
stamp = new StringBuilder();
|
||||
i++;
|
||||
while (i < path.length() && ((c = path.charAt(i)) != '/')) {
|
||||
stamp.append(c);
|
||||
i++;
|
||||
}
|
||||
try {
|
||||
time1 = Long.valueOf(stamp.toString());
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
}
|
||||
if (c == '/') {
|
||||
i++;
|
||||
}
|
||||
} catch (IndexOutOfBoundsException e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
if (time1 != 0) {
|
||||
startTime = time0;
|
||||
endTime = time1;
|
||||
} else {
|
||||
endTime = time0;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns,
|
||||
long startTime, long endTime, int maxVersions) {
|
||||
this.row = startRow;
|
||||
this.endRow = endRow;
|
||||
if (columns != null) {
|
||||
for (byte[] col: columns) {
|
||||
this.columns.add(col);
|
||||
}
|
||||
}
|
||||
this.startTime = startTime;
|
||||
this.endTime = endTime;
|
||||
this.maxVersions = maxVersions;
|
||||
}
|
||||
|
||||
public RowSpec(byte[] startRow, byte[] endRow, Collection<byte[]> columns,
|
||||
long startTime, long endTime, int maxVersions) {
|
||||
this.row = startRow;
|
||||
this.endRow = endRow;
|
||||
if (columns != null) {
|
||||
this.columns.addAll(columns);
|
||||
}
|
||||
this.startTime = startTime;
|
||||
this.endTime = endTime;
|
||||
this.maxVersions = maxVersions;
|
||||
}
|
||||
|
||||
public boolean isSingleRow() {
|
||||
return endRow == null;
|
||||
}
|
||||
|
||||
public int getMaxVersions() {
|
||||
return maxVersions;
|
||||
}
|
||||
|
||||
public void setMaxVersions(int maxVersions) {
|
||||
this.maxVersions = maxVersions;
|
||||
}
|
||||
|
||||
public boolean hasColumns() {
|
||||
return !columns.isEmpty();
|
||||
}
|
||||
|
||||
public byte[] getRow() {
|
||||
return row;
|
||||
}
|
||||
|
||||
public byte[] getStartRow() {
|
||||
return row;
|
||||
}
|
||||
|
||||
public boolean hasEndRow() {
|
||||
return endRow != null;
|
||||
}
|
||||
|
||||
public byte[] getEndRow() {
|
||||
return endRow;
|
||||
}
|
||||
|
||||
public void addColumn(byte[] column) {
|
||||
columns.add(column);
|
||||
}
|
||||
|
||||
public byte[][] getColumns() {
|
||||
return columns.toArray(new byte[columns.size()][]);
|
||||
}
|
||||
|
||||
public boolean hasTimestamp() {
|
||||
return (startTime == 0) && (endTime != Long.MAX_VALUE);
|
||||
}
|
||||
|
||||
public long getTimestamp() {
|
||||
return endTime;
|
||||
}
|
||||
|
||||
public long getStartTime() {
|
||||
return startTime;
|
||||
}
|
||||
|
||||
public void setStartTime(long startTime) {
|
||||
this.startTime = startTime;
|
||||
}
|
||||
|
||||
public long getEndTime() {
|
||||
return endTime;
|
||||
}
|
||||
|
||||
public void setEndTime(long endTime) {
|
||||
this.endTime = endTime;
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
StringBuilder result = new StringBuilder();
|
||||
result.append("{startRow => '");
|
||||
if (row != null) {
|
||||
result.append(Bytes.toString(row));
|
||||
}
|
||||
result.append("', endRow => '");
|
||||
if (endRow != null) {
|
||||
result.append(Bytes.toString(endRow));
|
||||
}
|
||||
result.append("', columns => [");
|
||||
for (byte[] col: columns) {
|
||||
result.append(" '");
|
||||
result.append(Bytes.toString(col));
|
||||
result.append("'");
|
||||
}
|
||||
result.append(" ], startTime => ");
|
||||
result.append(Long.toString(startTime));
|
||||
result.append(", endTime => ");
|
||||
result.append(Long.toString(endTime));
|
||||
result.append(", maxVersions => ");
|
||||
result.append(Integer.toString(maxVersions));
|
||||
result.append("}");
|
||||
return result.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,145 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import javax.ws.rs.DELETE;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.CacheControl;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.core.Response.ResponseBuilder;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.stargate.model.CellModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.CellSetModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.RowModel;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import com.sun.jersey.core.util.Base64;
|
||||
|
||||
public class ScannerInstanceResource implements Constants {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(ScannerInstanceResource.class);
|
||||
|
||||
protected ResultGenerator generator;
|
||||
private String id;
|
||||
private int batch;
|
||||
private CacheControl cacheControl;
|
||||
|
||||
public ScannerInstanceResource(String table, String id,
|
||||
ResultGenerator generator, int batch) throws IOException {
|
||||
this.id = id;
|
||||
this.generator = generator;
|
||||
this.batch = batch;
|
||||
cacheControl = new CacheControl();
|
||||
cacheControl.setNoCache(true);
|
||||
cacheControl.setNoTransform(false);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
|
||||
MIMETYPE_PROTOBUF})
|
||||
public Response get(@Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("GET " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
CellSetModel model = new CellSetModel();
|
||||
RowModel rowModel = null;
|
||||
byte[] rowKey = null;
|
||||
int count = batch;
|
||||
do {
|
||||
KeyValue value = null;
|
||||
try {
|
||||
value = generator.next();
|
||||
} catch (IllegalStateException e) {
|
||||
ScannerResource.delete(id);
|
||||
throw new WebApplicationException(Response.Status.GONE);
|
||||
}
|
||||
if (value == null) {
|
||||
LOG.info("generator exhausted");
|
||||
// respond with 204 (No Content) if an empty cell set would be
|
||||
// returned
|
||||
if (count == batch) {
|
||||
return Response.noContent().build();
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (rowKey == null) {
|
||||
rowKey = value.getRow();
|
||||
rowModel = new RowModel(rowKey);
|
||||
}
|
||||
if (!Bytes.equals(value.getRow(), rowKey)) {
|
||||
model.addRow(rowModel);
|
||||
rowKey = value.getRow();
|
||||
rowModel = new RowModel(rowKey);
|
||||
}
|
||||
rowModel.addCell(
|
||||
new CellModel(value.getColumn(), value.getTimestamp(),
|
||||
value.getValue()));
|
||||
} while (--count > 0);
|
||||
model.addRow(rowModel);
|
||||
ResponseBuilder response = Response.ok(model);
|
||||
response.cacheControl(cacheControl);
|
||||
return response.build();
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces(MIMETYPE_BINARY)
|
||||
public Response getBinary(@Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("GET " + uriInfo.getAbsolutePath() + " as " +
|
||||
MIMETYPE_BINARY);
|
||||
}
|
||||
try {
|
||||
KeyValue value = generator.next();
|
||||
if (value == null) {
|
||||
LOG.info("generator exhausted");
|
||||
return Response.noContent().build();
|
||||
}
|
||||
ResponseBuilder response = Response.ok(value.getValue());
|
||||
response.cacheControl(cacheControl);
|
||||
response.header("X-Row", Base64.encode(value.getRow()));
|
||||
response.header("X-Column", Base64.encode(value.getColumn()));
|
||||
response.header("X-Timestamp", value.getTimestamp());
|
||||
return response.build();
|
||||
} catch (IllegalStateException e) {
|
||||
ScannerResource.delete(id);
|
||||
throw new WebApplicationException(Response.Status.GONE);
|
||||
}
|
||||
}
|
||||
|
||||
@DELETE
|
||||
public Response delete(@Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("DELETE " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
ScannerResource.delete(id);
|
||||
return Response.ok().build();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,125 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import com.google.protobuf.InvalidProtocolBufferException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.ws.rs.Consumes;
|
||||
import javax.ws.rs.POST;
|
||||
import javax.ws.rs.PUT;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.PathParam;
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.core.UriBuilder;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.hbase.stargate.model.ScannerModel;
|
||||
|
||||
public class ScannerResource implements Constants {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(ScannerResource.class);
|
||||
protected static final Map<String,ScannerInstanceResource> scanners =
|
||||
new HashMap<String,ScannerInstanceResource>();
|
||||
|
||||
private String table;
|
||||
|
||||
public ScannerResource(String table) {
|
||||
this.table = table;
|
||||
}
|
||||
|
||||
private Response update(ScannerModel model, boolean replace,
|
||||
UriInfo uriInfo) {
|
||||
try {
|
||||
byte[] endRow = model.hasEndRow() ? model.getEndRow() : null;
|
||||
RowSpec spec = new RowSpec(model.getStartRow(), endRow,
|
||||
model.getColumns(), model.getStartTime(), model.getEndTime(), 1);
|
||||
ScannerResultGenerator gen = new ScannerResultGenerator(table, spec);
|
||||
String id = gen.getID();
|
||||
ScannerInstanceResource instance =
|
||||
new ScannerInstanceResource(table, id, gen, model.getBatch());
|
||||
synchronized (scanners) {
|
||||
scanners.put(id, instance);
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("new scanner: " + id);
|
||||
}
|
||||
UriBuilder builder = uriInfo.getAbsolutePathBuilder();
|
||||
URI uri = builder.path(id).build();
|
||||
return Response.created(uri).build();
|
||||
} catch (InvalidProtocolBufferException e) {
|
||||
throw new WebApplicationException(e, Response.Status.BAD_REQUEST);
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
}
|
||||
}
|
||||
|
||||
@PUT
|
||||
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
|
||||
MIMETYPE_PROTOBUF})
|
||||
public Response put(ScannerModel model, @Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("PUT " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
return update(model, true, uriInfo);
|
||||
}
|
||||
|
||||
@POST
|
||||
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
|
||||
MIMETYPE_PROTOBUF})
|
||||
public Response post(ScannerModel model, @Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("POST " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
return update(model, false, uriInfo);
|
||||
}
|
||||
|
||||
@Path("{scanner: .+}")
|
||||
public ScannerInstanceResource getScannerInstanceResource(
|
||||
@PathParam("scanner") String id) {
|
||||
synchronized (scanners) {
|
||||
ScannerInstanceResource instance = scanners.get(id);
|
||||
if (instance == null) {
|
||||
throw new WebApplicationException(Response.Status.NOT_FOUND);
|
||||
}
|
||||
return instance;
|
||||
}
|
||||
}
|
||||
|
||||
static void delete(String id) {
|
||||
synchronized (scanners) {
|
||||
ScannerInstanceResource instance = scanners.remove(id);
|
||||
if (instance != null) {
|
||||
instance.generator.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.UnknownScannerException;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HTablePool;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
public class ScannerResultGenerator extends ResultGenerator {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(ScannerResultGenerator.class);
|
||||
|
||||
private String id;
|
||||
private Iterator<KeyValue> rowI;
|
||||
private ResultScanner scanner;
|
||||
private Result cached;
|
||||
|
||||
public ScannerResultGenerator(String tableName, RowSpec rowspec)
|
||||
throws IllegalArgumentException, IOException {
|
||||
HTablePool pool = RESTServlet.getInstance().getTablePool(tableName);
|
||||
HTable table = pool.get();
|
||||
try {
|
||||
Scan scan;
|
||||
if (rowspec.hasEndRow()) {
|
||||
scan = new Scan(rowspec.getStartRow(), rowspec.getEndRow());
|
||||
} else {
|
||||
scan = new Scan(rowspec.getStartRow());
|
||||
}
|
||||
if (rowspec.hasColumns()) {
|
||||
scan.addColumns(rowspec.getColumns());
|
||||
} else {
|
||||
for (HColumnDescriptor family:
|
||||
table.getTableDescriptor().getFamilies()) {
|
||||
scan.addFamily(family.getName());
|
||||
}
|
||||
}
|
||||
scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
|
||||
scan.setMaxVersions(rowspec.getMaxVersions());
|
||||
scanner = table.getScanner(scan);
|
||||
cached = null;
|
||||
id = Long.toString(System.currentTimeMillis()) +
|
||||
Integer.toHexString(scanner.hashCode());
|
||||
} finally {
|
||||
pool.put(table);
|
||||
}
|
||||
}
|
||||
|
||||
public String getID() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void close() {
|
||||
}
|
||||
|
||||
public boolean hasNext() {
|
||||
if (rowI != null && rowI.hasNext()) {
|
||||
return true;
|
||||
}
|
||||
if (cached != null) {
|
||||
return true;
|
||||
}
|
||||
try {
|
||||
Result result = scanner.next();
|
||||
if (result != null && !result.isEmpty()) {
|
||||
cached = result;
|
||||
}
|
||||
} catch (UnknownScannerException e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
} catch (IOException e) {
|
||||
LOG.error(StringUtils.stringifyException(e));
|
||||
}
|
||||
return cached != null;
|
||||
}
|
||||
|
||||
public KeyValue next() {
|
||||
boolean loop;
|
||||
do {
|
||||
loop = false;
|
||||
if (rowI != null) {
|
||||
if (rowI.hasNext()) {
|
||||
return rowI.next();
|
||||
} else {
|
||||
rowI = null;
|
||||
}
|
||||
}
|
||||
if (cached != null) {
|
||||
rowI = cached.list().iterator();
|
||||
loop = true;
|
||||
cached = null;
|
||||
} else {
|
||||
Result result = null;
|
||||
try {
|
||||
result = scanner.next();
|
||||
} catch (UnknownScannerException e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
} catch (IOException e) {
|
||||
LOG.error(StringUtils.stringifyException(e));
|
||||
}
|
||||
if (result != null && !result.isEmpty()) {
|
||||
rowI = result.list().iterator();
|
||||
loop = true;
|
||||
}
|
||||
}
|
||||
} while (loop);
|
||||
return null;
|
||||
}
|
||||
|
||||
public void remove() {
|
||||
throw new UnsupportedOperationException("remove not supported");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,189 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.ws.rs.DELETE;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.POST;
|
||||
import javax.ws.rs.PUT;
|
||||
import javax.ws.rs.Consumes;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.CacheControl;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.core.Response.ResponseBuilder;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
import javax.xml.namespace.QName;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableExistsException;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HTablePool;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
public class SchemaResource implements Constants {
|
||||
private static final Log LOG = LogFactory.getLog(SchemaResource.class);
|
||||
|
||||
private String table;
|
||||
private CacheControl cacheControl;
|
||||
|
||||
public SchemaResource(String table) {
|
||||
this.table = table;
|
||||
cacheControl = new CacheControl();
|
||||
cacheControl.setNoCache(true);
|
||||
cacheControl.setNoTransform(false);
|
||||
}
|
||||
|
||||
private HTableDescriptor getTableSchema() throws IOException,
|
||||
TableNotFoundException {
|
||||
HTablePool pool = RESTServlet.getInstance().getTablePool(this.table);
|
||||
HTable table = pool.get();
|
||||
try {
|
||||
return table.getTableDescriptor();
|
||||
} finally {
|
||||
pool.put(table);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
|
||||
MIMETYPE_PROTOBUF})
|
||||
public Response get(@Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("GET " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
try {
|
||||
HTableDescriptor htd = getTableSchema();
|
||||
TableSchemaModel model = new TableSchemaModel();
|
||||
model.setName(htd.getNameAsString());
|
||||
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
|
||||
htd.getValues().entrySet()) {
|
||||
model.addAttribute(Bytes.toString(e.getKey().get()),
|
||||
Bytes.toString(e.getValue().get()));
|
||||
}
|
||||
for (HColumnDescriptor hcd: htd.getFamilies()) {
|
||||
ColumnSchemaModel columnModel = new ColumnSchemaModel();
|
||||
columnModel.setName(hcd.getNameAsString());
|
||||
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
|
||||
hcd.getValues().entrySet()) {
|
||||
columnModel.addAttribute(Bytes.toString(e.getKey().get()),
|
||||
Bytes.toString(e.getValue().get()));
|
||||
}
|
||||
model.addColumnFamily(columnModel);
|
||||
}
|
||||
ResponseBuilder response = Response.ok(model);
|
||||
response.cacheControl(cacheControl);
|
||||
return response.build();
|
||||
} catch (TableNotFoundException e) {
|
||||
throw new WebApplicationException(Response.Status.NOT_FOUND);
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
}
|
||||
}
|
||||
|
||||
private Response update(TableSchemaModel model, boolean replace,
|
||||
UriInfo uriInfo) {
|
||||
// NOTE: 'replace' is currently ignored... we always replace the schema
|
||||
try {
|
||||
HTableDescriptor htd = new HTableDescriptor(table);
|
||||
for (Map.Entry<QName,Object> e: model.getAny().entrySet()) {
|
||||
htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
|
||||
}
|
||||
for (ColumnSchemaModel family: model.getColumns()) {
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
|
||||
for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
|
||||
hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
|
||||
}
|
||||
htd.addFamily(hcd);
|
||||
}
|
||||
RESTServlet server = RESTServlet.getInstance();
|
||||
HBaseAdmin admin = new HBaseAdmin(server.getConfiguration());
|
||||
if (admin.tableExists(table)) {
|
||||
admin.disableTable(table);
|
||||
admin.modifyTable(Bytes.toBytes(table), htd);
|
||||
server.invalidateMaxAge(table);
|
||||
admin.enableTable(table);
|
||||
return Response.ok().build();
|
||||
} else {
|
||||
admin.createTable(htd);
|
||||
return Response.created(uriInfo.getAbsolutePath()).build();
|
||||
}
|
||||
} catch (TableExistsException e) {
|
||||
// race, someone else created a table with the same name
|
||||
throw new WebApplicationException(e, Response.Status.NOT_MODIFIED);
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
}
|
||||
}
|
||||
|
||||
@PUT
|
||||
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
|
||||
MIMETYPE_PROTOBUF})
|
||||
public Response put(TableSchemaModel model, @Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("PUT " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
return update(model, true, uriInfo);
|
||||
}
|
||||
|
||||
@POST
|
||||
@Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
|
||||
MIMETYPE_PROTOBUF})
|
||||
public Response post(TableSchemaModel model, @Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("PUT " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
return update(model, false, uriInfo);
|
||||
}
|
||||
|
||||
@DELETE
|
||||
public Response delete(@Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("DELETE " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
try {
|
||||
HBaseAdmin admin =
|
||||
new HBaseAdmin(RESTServlet.getInstance().getConfiguration());
|
||||
admin.disableTable(table);
|
||||
admin.deleteTable(table);
|
||||
return Response.ok().build();
|
||||
} catch (TableNotFoundException e) {
|
||||
throw new WebApplicationException(Response.Status.NOT_FOUND);
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.CacheControl;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.core.Response.ResponseBuilder;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.hbase.ClusterStatus;
|
||||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
import org.apache.hadoop.hbase.HServerLoad;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.stargate.model.StorageClusterStatusModel;
|
||||
|
||||
@Path(Constants.PATH_STATUS_CLUSTER)
|
||||
public class StorageClusterStatusResource implements Constants {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(StorageClusterStatusResource.class);
|
||||
|
||||
private CacheControl cacheControl;
|
||||
|
||||
public StorageClusterStatusResource() {
|
||||
cacheControl = new CacheControl();
|
||||
cacheControl.setNoCache(true);
|
||||
cacheControl.setNoTransform(false);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
|
||||
MIMETYPE_PROTOBUF})
|
||||
public Response get(@Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("GET " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
try {
|
||||
RESTServlet server = RESTServlet.getInstance();
|
||||
HBaseAdmin admin = new HBaseAdmin(server.getConfiguration());
|
||||
ClusterStatus status = admin.getClusterStatus();
|
||||
StorageClusterStatusModel model = new StorageClusterStatusModel();
|
||||
model.setRegions(status.getRegionsCount());
|
||||
model.setRequests(status.getRequestsCount());
|
||||
model.setAverageLoad(status.getAverageLoad());
|
||||
for (HServerInfo info: status.getServerInfo()) {
|
||||
StorageClusterStatusModel.Node node =
|
||||
model.addLiveNode(
|
||||
info.getServerAddress().getHostname() + ":" +
|
||||
Integer.toString(info.getServerAddress().getPort()),
|
||||
info.getStartCode());
|
||||
HServerLoad load = info.getLoad();
|
||||
node.setRequests(load.getNumberOfRequests());
|
||||
for (HServerLoad.RegionLoad region: load.getRegionsLoad()) {
|
||||
node.addRegion(region.getName());
|
||||
}
|
||||
}
|
||||
for (String name: status.getDeadServerNames()) {
|
||||
model.addDeadNode(name);
|
||||
}
|
||||
ResponseBuilder response = Response.ok(model);
|
||||
response.cacheControl(cacheControl);
|
||||
return response.build();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.CacheControl;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
import javax.ws.rs.core.Response.ResponseBuilder;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.stargate.model.StorageClusterVersionModel;
|
||||
|
||||
@Path(Constants.PATH_VERSION_CLUSTER)
|
||||
public class StorageClusterVersionResource implements Constants {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(StorageClusterVersionResource.class);
|
||||
|
||||
private CacheControl cacheControl;
|
||||
|
||||
public StorageClusterVersionResource() {
|
||||
cacheControl = new CacheControl();
|
||||
cacheControl.setNoCache(true);
|
||||
cacheControl.setNoTransform(false);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
|
||||
MIMETYPE_PROTOBUF})
|
||||
public Response get(@Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("GET " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
try {
|
||||
RESTServlet server = RESTServlet.getInstance();
|
||||
HBaseAdmin admin = new HBaseAdmin(server.getConfiguration());
|
||||
StorageClusterVersionModel model = new StorageClusterVersionModel();
|
||||
model.setVersion(admin.getClusterStatus().getHBaseVersion());
|
||||
ResponseBuilder response = Response.ok(model);
|
||||
response.cacheControl(cacheControl);
|
||||
return response.build();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.PathParam;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.QueryParam;
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.CacheControl;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
import javax.ws.rs.core.Response.ResponseBuilder;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableListModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableModel;
|
||||
|
||||
@Path("/")
|
||||
public class TableResource implements Constants {
|
||||
private static final Log LOG = LogFactory.getLog(TableResource.class);
|
||||
|
||||
private CacheControl cacheControl;
|
||||
|
||||
public TableResource() {
|
||||
cacheControl = new CacheControl();
|
||||
cacheControl.setNoCache(true);
|
||||
cacheControl.setNoTransform(false);
|
||||
}
|
||||
|
||||
private HTableDescriptor[] getTableList() throws IOException {
|
||||
HBaseAdmin admin =
|
||||
new HBaseAdmin(RESTServlet.getInstance().getConfiguration());
|
||||
HTableDescriptor[] list = admin.listTables();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("getTableList:");
|
||||
for (HTableDescriptor htd: list) {
|
||||
LOG.debug(htd.toString());
|
||||
}
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
|
||||
MIMETYPE_PROTOBUF})
|
||||
public Response get(@Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("GET " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
try {
|
||||
TableListModel tableList = new TableListModel();
|
||||
for (HTableDescriptor htd: getTableList()) {
|
||||
if (htd.isMetaRegion()) {
|
||||
continue;
|
||||
}
|
||||
tableList.add(new TableModel(htd.getNameAsString()));
|
||||
}
|
||||
ResponseBuilder response = Response.ok(tableList);
|
||||
response.cacheControl(cacheControl);
|
||||
return response.build();
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.SERVICE_UNAVAILABLE);
|
||||
}
|
||||
}
|
||||
|
||||
@Path("{table}/regions")
|
||||
public RegionsResource getRegionsResource(
|
||||
@PathParam("table") String table) {
|
||||
return new RegionsResource(table);
|
||||
}
|
||||
|
||||
@Path("{table}/scanner")
|
||||
public ScannerResource getScannerResource(
|
||||
@PathParam("table") String table) {
|
||||
return new ScannerResource(table);
|
||||
}
|
||||
|
||||
@Path("{table}/schema")
|
||||
public SchemaResource getSchemaResource(
|
||||
@PathParam("table") String table) {
|
||||
return new SchemaResource(table);
|
||||
}
|
||||
|
||||
@Path("{table}/{rowspec: .+}")
|
||||
public RowResource getRowResource(
|
||||
@PathParam("table") String table,
|
||||
@PathParam("rowspec") String rowspec,
|
||||
@QueryParam("v") String versions) {
|
||||
try {
|
||||
return new RowResource(table, rowspec, versions);
|
||||
} catch (IOException e) {
|
||||
throw new WebApplicationException(e,
|
||||
Response.Status.INTERNAL_SERVER_ERROR);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import javax.servlet.ServletContext;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.core.CacheControl;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
import javax.ws.rs.core.Response.ResponseBuilder;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.stargate.model.VersionModel;
|
||||
|
||||
@Path(Constants.PATH_VERSION)
|
||||
public class VersionResource implements Constants {
|
||||
private static final Log LOG = LogFactory.getLog(VersionResource.class);
|
||||
|
||||
private CacheControl cacheControl;
|
||||
|
||||
public VersionResource() {
|
||||
cacheControl = new CacheControl();
|
||||
cacheControl.setNoCache(true);
|
||||
cacheControl.setNoTransform(false);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_JAVASCRIPT,
|
||||
MIMETYPE_PROTOBUF})
|
||||
public Response get(@Context ServletContext context, @Context UriInfo uriInfo) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("GET " + uriInfo.getAbsolutePath());
|
||||
}
|
||||
ResponseBuilder response = Response.ok(new VersionModel(context));
|
||||
response.cacheControl(cacheControl);
|
||||
return response.build();
|
||||
}
|
||||
|
||||
// "/version/stargate" is an alias for "/version"
|
||||
@Path("stargate")
|
||||
public VersionResource getVersionResource() {
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,242 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.client;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.httpclient.Header;
|
||||
import org.apache.commons.httpclient.HttpClient;
|
||||
import org.apache.commons.httpclient.HttpMethod;
|
||||
import org.apache.commons.httpclient.HttpVersion;
|
||||
import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager;
|
||||
import org.apache.commons.httpclient.URI;
|
||||
import org.apache.commons.httpclient.methods.ByteArrayRequestEntity;
|
||||
import org.apache.commons.httpclient.methods.DeleteMethod;
|
||||
import org.apache.commons.httpclient.methods.GetMethod;
|
||||
import org.apache.commons.httpclient.methods.HeadMethod;
|
||||
import org.apache.commons.httpclient.methods.PostMethod;
|
||||
import org.apache.commons.httpclient.methods.PutMethod;
|
||||
import org.apache.commons.httpclient.params.HttpClientParams;
|
||||
import org.apache.commons.httpclient.params.HttpConnectionManagerParams;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
public class Client {
|
||||
public static final Header[] EMPTY_HEADER_ARRAY = new Header[0];
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(Client.class);
|
||||
|
||||
private HttpClient httpClient;
|
||||
private Cluster cluster;
|
||||
|
||||
public Client() {
|
||||
this(null);
|
||||
}
|
||||
|
||||
public Client(Cluster cluster) {
|
||||
this.cluster = cluster;
|
||||
httpClient = new HttpClient(new MultiThreadedHttpConnectionManager());
|
||||
HttpConnectionManagerParams managerParams =
|
||||
httpClient.getHttpConnectionManager().getParams();
|
||||
managerParams.setConnectionTimeout(2000); // 2 s
|
||||
HttpClientParams clientParams = httpClient.getParams();
|
||||
clientParams.setVersion(HttpVersion.HTTP_1_1);
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
MultiThreadedHttpConnectionManager manager =
|
||||
(MultiThreadedHttpConnectionManager) httpClient.getHttpConnectionManager();
|
||||
manager.shutdown();
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
public int executePathOnly(Cluster c, HttpMethod method, Header[] headers,
|
||||
String path) throws IOException {
|
||||
IOException lastException;
|
||||
if (c.nodes.size() < 1) {
|
||||
throw new IOException("Cluster is empty");
|
||||
}
|
||||
int start = (int)Math.round((c.nodes.size() - 1) * Math.random());
|
||||
int i = start;
|
||||
do {
|
||||
c.lastHost = c.nodes.get(i);
|
||||
try {
|
||||
StringBuffer sb = new StringBuffer();
|
||||
sb.append("http://");
|
||||
sb.append(c.lastHost);
|
||||
sb.append(path);
|
||||
URI uri = new URI(sb.toString());
|
||||
return executeURI(method, headers, uri.toString());
|
||||
} catch (IOException e) {
|
||||
lastException = e;
|
||||
}
|
||||
} while (++i != start && i < c.nodes.size());
|
||||
throw lastException;
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
public int executeURI(HttpMethod method, Header[] headers, String uri)
|
||||
throws IOException {
|
||||
method.setURI(new URI(uri));
|
||||
if (headers != null) {
|
||||
for (Header header: headers) {
|
||||
method.addRequestHeader(header);
|
||||
}
|
||||
}
|
||||
long startTime = System.currentTimeMillis();
|
||||
int code = httpClient.executeMethod(method);
|
||||
long endTime = System.currentTimeMillis();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(method.getName() + " " + uri + ": " + code + " " +
|
||||
method.getStatusText() + " in " + (endTime - startTime) + " ms");
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
public int execute(Cluster c, HttpMethod method, Header[] headers,
|
||||
String path) throws IOException {
|
||||
if (path.startsWith("/")) {
|
||||
return executePathOnly(c, method, headers, path);
|
||||
}
|
||||
return executeURI(method, headers, path);
|
||||
}
|
||||
|
||||
public Cluster getCluster() {
|
||||
return cluster;
|
||||
}
|
||||
|
||||
public void setCluster(Cluster cluster) {
|
||||
this.cluster = cluster;
|
||||
}
|
||||
|
||||
public Response head(String path) throws IOException {
|
||||
return head(cluster, path);
|
||||
}
|
||||
|
||||
public Response head(Cluster c, String path) throws IOException {
|
||||
HeadMethod method = new HeadMethod();
|
||||
int code = execute(c, method, null, path);
|
||||
Header[] headers = method.getResponseHeaders();
|
||||
method.releaseConnection();
|
||||
return new Response(code, headers, null);
|
||||
}
|
||||
|
||||
public Response get(String path) throws IOException {
|
||||
return get(cluster, path);
|
||||
}
|
||||
|
||||
public Response get(Cluster c, String path) throws IOException {
|
||||
return get(c, path, EMPTY_HEADER_ARRAY);
|
||||
}
|
||||
|
||||
public Response get(String path, String accept) throws IOException {
|
||||
return get(cluster, path, accept);
|
||||
}
|
||||
|
||||
public Response get(Cluster c, String path, String accept)
|
||||
throws IOException {
|
||||
Header[] headers = new Header[1];
|
||||
headers[0] = new Header("Accept", accept);
|
||||
return get(c, path, headers);
|
||||
}
|
||||
|
||||
public Response get(String path, Header[] headers) throws IOException {
|
||||
return get(cluster, path, headers);
|
||||
}
|
||||
|
||||
public Response get(Cluster c, String path, Header[] headers)
|
||||
throws IOException {
|
||||
GetMethod method = new GetMethod();
|
||||
int code = execute(c, method, headers, path);
|
||||
headers = method.getResponseHeaders();
|
||||
byte[] body = method.getResponseBody();
|
||||
method.releaseConnection();
|
||||
return new Response(code, headers, body);
|
||||
}
|
||||
|
||||
public Response put(String path, String contentType, byte[] content)
|
||||
throws IOException {
|
||||
return put(cluster, path, contentType, content);
|
||||
}
|
||||
|
||||
public Response put(Cluster c, String path, String contentType,
|
||||
byte[] content) throws IOException {
|
||||
Header[] headers = new Header[1];
|
||||
headers[0] = new Header("Content-Type", contentType);
|
||||
return put(c, path, headers, content);
|
||||
}
|
||||
|
||||
public Response put(String path, Header[] headers, byte[] body)
|
||||
throws IOException {
|
||||
return put(cluster, path, headers, body);
|
||||
}
|
||||
|
||||
public Response put(Cluster c, String path, Header[] headers,
|
||||
byte[] body) throws IOException {
|
||||
PutMethod method = new PutMethod();
|
||||
method.setRequestEntity(new ByteArrayRequestEntity(body));
|
||||
int code = execute(c, method, headers, path);
|
||||
headers = method.getResponseHeaders();
|
||||
body = method.getResponseBody();
|
||||
method.releaseConnection();
|
||||
return new Response(code, headers, body);
|
||||
}
|
||||
|
||||
public Response post(String path, String contentType, byte[] content)
|
||||
throws IOException {
|
||||
return post(cluster, path, contentType, content);
|
||||
}
|
||||
|
||||
public Response post(Cluster c, String path, String contentType,
|
||||
byte[] content) throws IOException {
|
||||
Header[] headers = new Header[1];
|
||||
headers[0] = new Header("Content-Type", contentType);
|
||||
return post(c, path, headers, content);
|
||||
}
|
||||
|
||||
public Response post(String path, Header[] headers, byte[] content)
|
||||
throws IOException {
|
||||
return post(cluster, path, headers, content);
|
||||
}
|
||||
|
||||
public Response post(Cluster c, String path, Header[] headers,
|
||||
byte[] content) throws IOException {
|
||||
PostMethod method = new PostMethod();
|
||||
method.setRequestEntity(new ByteArrayRequestEntity(content));
|
||||
int code = execute(c, method, headers, path);
|
||||
headers = method.getResponseHeaders();
|
||||
content = method.getResponseBody();
|
||||
method.releaseConnection();
|
||||
return new Response(code, headers, content);
|
||||
}
|
||||
|
||||
public Response delete(String path) throws IOException {
|
||||
return delete(cluster, path);
|
||||
}
|
||||
|
||||
public Response delete(Cluster c, String path) throws IOException {
|
||||
DeleteMethod method = new DeleteMethod();
|
||||
int code = execute(c, method, null, path);
|
||||
Header[] headers = method.getResponseHeaders();
|
||||
method.releaseConnection();
|
||||
return new Response(code, headers);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.client;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
public class Cluster {
|
||||
protected List<String> nodes =
|
||||
Collections.synchronizedList(new ArrayList<String>());
|
||||
protected String lastHost;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*/
|
||||
public Cluster() {}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param nodes a list of service locations, in 'host:port' format
|
||||
*/
|
||||
public Cluster(List<String> nodes) {
|
||||
nodes.addAll(nodes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a node to the cluster
|
||||
* @param name the service location in 'host:port' format
|
||||
*/
|
||||
public Cluster add(String node) {
|
||||
nodes.add(node);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a node to the cluster
|
||||
* @param name host name
|
||||
* @param port service port
|
||||
*/
|
||||
public Cluster add(String name, int port) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(name);
|
||||
sb.append(':');
|
||||
sb.append(port);
|
||||
return add(sb.toString());
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a node from the cluster
|
||||
* @param name the service location in 'host:port' format
|
||||
*/
|
||||
public Cluster remove(String node) {
|
||||
nodes.remove(node);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a node from the cluster
|
||||
* @param name host name
|
||||
* @param port service port
|
||||
*/
|
||||
public Cluster remove(String name, int port) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(name);
|
||||
sb.append(':');
|
||||
sb.append(port);
|
||||
return remove(sb.toString());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,119 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.client;
|
||||
|
||||
import org.apache.commons.httpclient.Header;
|
||||
|
||||
public class Response {
|
||||
private int code;
|
||||
private Header[] headers;
|
||||
private byte[] body;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param code the HTTP response code
|
||||
*/
|
||||
public Response(int code) {
|
||||
this(code, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param code the HTTP response code
|
||||
* @param headers the HTTP response headers
|
||||
*/
|
||||
public Response(int code, Header[] headers) {
|
||||
this(code, headers, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param code the HTTP response code
|
||||
* @param headers the HTTP response headers
|
||||
* @param body the response body, can be null
|
||||
*/
|
||||
public Response(int code, Header[] headers, byte[] body) {
|
||||
this.code = code;
|
||||
this.headers = headers;
|
||||
this.body = body;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the HTTP response code
|
||||
*/
|
||||
public int getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the HTTP response headers
|
||||
*/
|
||||
public Header[] getHeaders() {
|
||||
return headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value of the Location header
|
||||
*/
|
||||
public String getLocation() {
|
||||
for (Header header: headers) {
|
||||
if (header.getName().equals("Location")) {
|
||||
return header.getValue();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if a response body was sent
|
||||
*/
|
||||
public boolean hasBody() {
|
||||
return body != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the HTTP response body
|
||||
*/
|
||||
public byte[] getBody() {
|
||||
return body;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param code the HTTP response code
|
||||
*/
|
||||
public void setCode(int code) {
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param headers the HTTP response headers
|
||||
*/
|
||||
public void setHeaders(Header[] headers) {
|
||||
this.headers = headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param body the response body
|
||||
*/
|
||||
public void setBody(byte[] body) {
|
||||
this.body = body;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,145 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAttribute;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlType;
|
||||
import javax.xml.bind.annotation.XmlValue;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
|
||||
@XmlRootElement(name="Cell")
|
||||
@XmlType(propOrder={"column","timestamp"})
|
||||
public class CellModel implements IProtobufWrapper, Serializable {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private long timestamp = HConstants.LATEST_TIMESTAMP;
|
||||
private byte[] column;
|
||||
private byte[] value;
|
||||
|
||||
public CellModel() {}
|
||||
|
||||
/**
|
||||
* @param column
|
||||
* @param value
|
||||
*/
|
||||
public CellModel(byte[] column, byte[] value) {
|
||||
super();
|
||||
this.column = column;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param column
|
||||
* @param timestamp
|
||||
* @param value
|
||||
*/
|
||||
public CellModel(byte[] column, long timestamp, byte[] value) {
|
||||
super();
|
||||
this.column = column;
|
||||
this.timestamp = timestamp;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the column
|
||||
*/
|
||||
@XmlAttribute
|
||||
public byte[] getColumn() {
|
||||
return column;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param column the column to set
|
||||
*/
|
||||
public void setColumn(byte[] column) {
|
||||
this.column = column;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the timestamp property has been specified by the
|
||||
* user
|
||||
*/
|
||||
public boolean hasUserTimestamp() {
|
||||
return timestamp != HConstants.LATEST_TIMESTAMP;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the timestamp
|
||||
*/
|
||||
@XmlAttribute
|
||||
public long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param timestamp the timestamp to set
|
||||
*/
|
||||
public void setTimestamp(long timestamp) {
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value
|
||||
*/
|
||||
@XmlValue
|
||||
public byte[] getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param value the value to set
|
||||
*/
|
||||
public void setValue(byte[] value) {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] createProtobufOutput() {
|
||||
Cell.Builder builder = Cell.newBuilder();
|
||||
builder.setColumn(ByteString.copyFrom(getColumn()));
|
||||
builder.setData(ByteString.copyFrom(getValue()));
|
||||
if (hasUserTimestamp()) {
|
||||
builder.setTimestamp(getTimestamp());
|
||||
}
|
||||
return builder.build().toByteArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IProtobufWrapper getObjectFromMessage(byte[] message)
|
||||
throws IOException {
|
||||
Cell.Builder builder = Cell.newBuilder();
|
||||
builder.mergeFrom(message);
|
||||
setColumn(builder.getColumn().toByteArray());
|
||||
setValue(builder.getData().toByteArray());
|
||||
if (builder.hasTimestamp()) {
|
||||
setTimestamp(builder.getTimestamp());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,115 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell;
|
||||
import org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
|
||||
@XmlRootElement(name="CellSet")
|
||||
public class CellSetModel implements Serializable, IProtobufWrapper {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private List<RowModel> rows;
|
||||
|
||||
|
||||
public CellSetModel() {
|
||||
this.rows = new ArrayList<RowModel>();
|
||||
}
|
||||
|
||||
/**
|
||||
* @param rows
|
||||
*/
|
||||
|
||||
public CellSetModel(List<RowModel> rows) {
|
||||
super();
|
||||
this.rows = rows;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a row to this cell set
|
||||
*
|
||||
* @param row
|
||||
*/
|
||||
public void addRow(RowModel row) {
|
||||
rows.add(row);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the rows
|
||||
*/
|
||||
@XmlElement(name="Row")
|
||||
public List<RowModel> getRows() {
|
||||
return rows;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] createProtobufOutput() {
|
||||
CellSet.Builder builder = CellSet.newBuilder();
|
||||
for (RowModel row: getRows()) {
|
||||
CellSet.Row.Builder rowBuilder = CellSet.Row.newBuilder();
|
||||
rowBuilder.setKey(ByteString.copyFrom(row.getKey()));
|
||||
for (CellModel cell: row.getCells()) {
|
||||
Cell.Builder cellBuilder = Cell.newBuilder();
|
||||
cellBuilder.setColumn(ByteString.copyFrom(cell.getColumn()));
|
||||
cellBuilder.setData(ByteString.copyFrom(cell.getValue()));
|
||||
if (cell.hasUserTimestamp()) {
|
||||
cellBuilder.setTimestamp(cell.getTimestamp());
|
||||
}
|
||||
rowBuilder.addValues(cellBuilder);
|
||||
}
|
||||
builder.addRows(rowBuilder);
|
||||
}
|
||||
return builder.build().toByteArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IProtobufWrapper getObjectFromMessage(byte[] message)
|
||||
throws IOException {
|
||||
CellSet.Builder builder = CellSet.newBuilder();
|
||||
builder.mergeFrom(message);
|
||||
for (CellSet.Row row: builder.getRowsList()) {
|
||||
RowModel rowModel = new RowModel(row.getKey().toByteArray());
|
||||
for (Cell cell: row.getValuesList()) {
|
||||
long timestamp = HConstants.LATEST_TIMESTAMP;
|
||||
if (cell.hasTimestamp()) {
|
||||
timestamp = cell.getTimestamp();
|
||||
}
|
||||
rowModel.addCell(
|
||||
new CellModel(cell.getColumn().toByteArray(), timestamp,
|
||||
cell.getData().toByteArray()));
|
||||
}
|
||||
addRow(rowModel);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,177 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAnyAttribute;
|
||||
import javax.xml.bind.annotation.XmlAttribute;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlType;
|
||||
import javax.xml.namespace.QName;
|
||||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
||||
@XmlRootElement(name="ColumnSchema")
|
||||
@XmlType(propOrder = {"name"})
|
||||
public class ColumnSchemaModel implements Serializable {
|
||||
private static final long serialVersionUID = 1L;
|
||||
private static QName BLOCKCACHE = new QName(HColumnDescriptor.BLOCKCACHE);
|
||||
private static QName BLOCKSIZE = new QName(HColumnDescriptor.BLOCKSIZE);
|
||||
private static QName BLOOMFILTER = new QName(HColumnDescriptor.BLOOMFILTER);
|
||||
private static QName COMPRESSION = new QName(HColumnDescriptor.COMPRESSION);
|
||||
private static QName IN_MEMORY = new QName(HConstants.IN_MEMORY);
|
||||
private static QName TTL = new QName(HColumnDescriptor.TTL);
|
||||
private static QName VERSIONS = new QName(HConstants.VERSIONS);
|
||||
|
||||
private String name;
|
||||
private Map<QName,Object> attrs = new HashMap<QName,Object>();
|
||||
|
||||
public ColumnSchemaModel() {}
|
||||
|
||||
public void addAttribute(String name, Object value) {
|
||||
attrs.put(new QName(name), value);
|
||||
}
|
||||
|
||||
public String getAttribute(String name) {
|
||||
return attrs.get(new QName(name)).toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the column name
|
||||
*/
|
||||
@XmlAttribute
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the map for holding unspecified (user) attributes
|
||||
*/
|
||||
@XmlAnyAttribute
|
||||
public Map<QName,Object> getAny() {
|
||||
return attrs;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param the table name
|
||||
*/
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("{ NAME => '");
|
||||
sb.append(name);
|
||||
sb.append('\'');
|
||||
for (Map.Entry<QName,Object> e: attrs.entrySet()) {
|
||||
sb.append(", ");
|
||||
sb.append(e.getKey().getLocalPart());
|
||||
sb.append(" => '");
|
||||
sb.append(e.getValue().toString());
|
||||
sb.append('\'');
|
||||
}
|
||||
sb.append(" }");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
// getters and setters for common schema attributes
|
||||
|
||||
// cannot be standard bean type getters and setters, otherwise this would
|
||||
// confuse JAXB
|
||||
|
||||
public boolean __getBlockcache() {
|
||||
Object o = attrs.get(BLOCKCACHE);
|
||||
return o != null ?
|
||||
Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKCACHE;
|
||||
}
|
||||
|
||||
public int __getBlocksize() {
|
||||
Object o = attrs.get(BLOCKSIZE);
|
||||
return o != null ?
|
||||
Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKSIZE;
|
||||
}
|
||||
|
||||
public boolean __getBloomfilter() {
|
||||
Object o = attrs.get(BLOOMFILTER);
|
||||
return o != null ?
|
||||
Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOOMFILTER;
|
||||
}
|
||||
|
||||
public String __getCompression() {
|
||||
Object o = attrs.get(COMPRESSION);
|
||||
return o != null ? o.toString() : HColumnDescriptor.DEFAULT_COMPRESSION;
|
||||
}
|
||||
|
||||
public boolean __getInMemory() {
|
||||
Object o = attrs.get(IN_MEMORY);
|
||||
return o != null ?
|
||||
Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_IN_MEMORY;
|
||||
}
|
||||
|
||||
public int __getTTL() {
|
||||
Object o = attrs.get(TTL);
|
||||
return o != null ?
|
||||
Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_TTL;
|
||||
}
|
||||
|
||||
public int __getVersions() {
|
||||
Object o = attrs.get(VERSIONS);
|
||||
return o != null ?
|
||||
Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_VERSIONS;
|
||||
}
|
||||
|
||||
public void __setBlocksize(int value) {
|
||||
attrs.put(BLOCKSIZE, Integer.toString(value));
|
||||
}
|
||||
|
||||
public void __setBlockcache(boolean value) {
|
||||
attrs.put(BLOCKCACHE, Boolean.toString(value));
|
||||
}
|
||||
|
||||
public void __setBloomfilter(boolean value) {
|
||||
attrs.put(BLOOMFILTER, Boolean.toString(value));
|
||||
}
|
||||
|
||||
public void __setCompression(String value) {
|
||||
attrs.put(COMPRESSION, value);
|
||||
}
|
||||
|
||||
public void __setInMemory(boolean value) {
|
||||
attrs.put(IN_MEMORY, Boolean.toString(value));
|
||||
}
|
||||
|
||||
public void __setTTL(int value) {
|
||||
attrs.put(TTL, Integer.toString(value));
|
||||
}
|
||||
|
||||
public void __setVersions(int value) {
|
||||
attrs.put(VERSIONS, Integer.toString(value));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public abstract interface IProtobufWrapper {
|
||||
public byte[] createProtobufOutput();
|
||||
public IProtobufWrapper getObjectFromMessage(byte[] message)
|
||||
throws IOException;
|
||||
}
|
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAttribute;
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
@XmlRootElement(name="Row")
|
||||
public class RowModel implements IProtobufWrapper, Serializable {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private byte[] key;
|
||||
private List<CellModel> cells = new ArrayList<CellModel>();
|
||||
|
||||
public RowModel() { }
|
||||
|
||||
/**
|
||||
* @param key
|
||||
*/
|
||||
public RowModel(final String key) {
|
||||
this(key.getBytes());
|
||||
}
|
||||
|
||||
/**
|
||||
* @param key
|
||||
*/
|
||||
public RowModel(final byte[] key) {
|
||||
super();
|
||||
this.key = key;
|
||||
cells = new ArrayList<CellModel>();
|
||||
}
|
||||
|
||||
/**
|
||||
* @param key
|
||||
* @param cells
|
||||
*/
|
||||
public RowModel(final String key, final List<CellModel> cells) {
|
||||
this(key.getBytes(), cells);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param key
|
||||
* @param cells
|
||||
*/
|
||||
public RowModel(final byte[] key, final List<CellModel> cells) {
|
||||
super();
|
||||
this.key = key;
|
||||
this.cells = cells;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a cell to the list of cells for this row
|
||||
*
|
||||
* @param cell
|
||||
*/
|
||||
public void addCell(CellModel cell) {
|
||||
cells.add(cell);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the key
|
||||
*/
|
||||
@XmlAttribute
|
||||
public byte[] getKey() {
|
||||
return key;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param key the key to set
|
||||
*/
|
||||
public void setKey(byte[] key) {
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the cells
|
||||
*/
|
||||
@XmlElement(name="Cell")
|
||||
public List<CellModel> getCells() {
|
||||
return cells;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] createProtobufOutput() {
|
||||
// there is no standalone row protobuf message
|
||||
throw new UnsupportedOperationException(
|
||||
"no protobuf equivalent to RowModel");
|
||||
}
|
||||
|
||||
@Override
|
||||
public IProtobufWrapper getObjectFromMessage(byte[] message)
|
||||
throws IOException {
|
||||
// there is no standalone row protobuf message
|
||||
throw new UnsupportedOperationException(
|
||||
"no protobuf equivalent to RowModel");
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,220 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAttribute;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
|
||||
@XmlRootElement(name="Scanner")
|
||||
public class ScannerModel implements IProtobufWrapper, Serializable {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private byte[] startRow = HConstants.EMPTY_START_ROW;
|
||||
private byte[] endRow = HConstants.EMPTY_END_ROW;;
|
||||
private List<byte[]> columns = new ArrayList<byte[]>();
|
||||
private int batch = 1;
|
||||
private long startTime = 0;
|
||||
private long endTime = Long.MAX_VALUE;
|
||||
|
||||
public ScannerModel() {}
|
||||
|
||||
public ScannerModel(byte[] startRow, byte[] endRow, List<byte[]> columns,
|
||||
int batch, long endTime) {
|
||||
super();
|
||||
this.startRow = startRow;
|
||||
this.endRow = endRow;
|
||||
this.columns = columns;
|
||||
this.batch = batch;
|
||||
this.endTime = endTime;
|
||||
}
|
||||
|
||||
public ScannerModel(byte[] startRow, byte[] endRow, List<byte[]> columns,
|
||||
int batch, long startTime, long endTime) {
|
||||
super();
|
||||
this.startRow = startRow;
|
||||
this.endRow = endRow;
|
||||
this.columns = columns;
|
||||
this.batch = batch;
|
||||
this.startTime = startTime;
|
||||
this.endTime = endTime;
|
||||
}
|
||||
|
||||
public void addColumn(byte[] column) {
|
||||
columns.add(column);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if a start row was specified
|
||||
*/
|
||||
public boolean hasStartRow() {
|
||||
return !Bytes.equals(startRow, HConstants.EMPTY_START_ROW);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return start row
|
||||
*/
|
||||
@XmlAttribute
|
||||
public byte[] getStartRow() {
|
||||
return startRow;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if an end row was specified
|
||||
*/
|
||||
public boolean hasEndRow() {
|
||||
return !Bytes.equals(endRow, HConstants.EMPTY_END_ROW);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return end row
|
||||
*/
|
||||
@XmlAttribute
|
||||
public byte[] getEndRow() {
|
||||
return endRow;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return list of columns of interest, or empty for all
|
||||
*/
|
||||
@XmlAttribute(name="column")
|
||||
public List<byte[]> getColumns() {
|
||||
return columns;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the number of cells to return in batch
|
||||
*/
|
||||
@XmlAttribute
|
||||
public int getBatch() {
|
||||
return batch;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the lower bound on timestamps of items of interest
|
||||
*/
|
||||
@XmlAttribute
|
||||
public long getStartTime() {
|
||||
return startTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the upper bound on timestamps of items of interest
|
||||
*/
|
||||
@XmlAttribute
|
||||
public long getEndTime() {
|
||||
return endTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param startRow start row
|
||||
*/
|
||||
public void setStartRow(byte[] startRow) {
|
||||
this.startRow = startRow;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param endRow end row
|
||||
*/
|
||||
public void setEndRow(byte[] endRow) {
|
||||
this.endRow = endRow;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param batch the number of cells to return in batch
|
||||
*/
|
||||
public void setBatch(int batch) {
|
||||
this.batch = batch;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param startTime the lower bound on timestamps of items of interest
|
||||
*/
|
||||
public void setStartTime(long startTime) {
|
||||
this.startTime = startTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param endTime the upper bound on timestamps of items of interest
|
||||
*/
|
||||
public void setEndTime(long endTime) {
|
||||
this.endTime = endTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] createProtobufOutput() {
|
||||
Scanner.Builder builder = Scanner.newBuilder();
|
||||
if (!Bytes.equals(startRow, HConstants.EMPTY_START_ROW)) {
|
||||
builder.setStartRow(ByteString.copyFrom(startRow));
|
||||
}
|
||||
if (!Bytes.equals(endRow, HConstants.EMPTY_START_ROW)) {
|
||||
builder.setEndRow(ByteString.copyFrom(endRow));
|
||||
}
|
||||
for (byte[] column: columns) {
|
||||
builder.addColumns(ByteString.copyFrom(column));
|
||||
}
|
||||
builder.setBatch(batch);
|
||||
if (startTime != 0) {
|
||||
builder.setStartTime(startTime);
|
||||
}
|
||||
if (endTime != 0) {
|
||||
builder.setEndTime(endTime);
|
||||
}
|
||||
builder.setBatch(getBatch());
|
||||
return builder.build().toByteArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IProtobufWrapper getObjectFromMessage(byte[] message)
|
||||
throws IOException {
|
||||
Scanner.Builder builder = Scanner.newBuilder();
|
||||
builder.mergeFrom(message);
|
||||
if (builder.hasStartRow()) {
|
||||
startRow = builder.getStartRow().toByteArray();
|
||||
}
|
||||
if (builder.hasEndRow()) {
|
||||
endRow = builder.getEndRow().toByteArray();
|
||||
}
|
||||
for (ByteString column: builder.getColumnsList()) {
|
||||
addColumn(column.toByteArray());
|
||||
}
|
||||
if (builder.hasBatch()) {
|
||||
batch = builder.getBatch();
|
||||
}
|
||||
if (builder.hasStartTime()) {
|
||||
startTime = builder.getStartTime();
|
||||
}
|
||||
if (builder.hasEndTime()) {
|
||||
endTime = builder.getEndTime();
|
||||
}
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,289 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAttribute;
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
import javax.xml.bind.annotation.XmlElementWrapper;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
|
||||
@XmlRootElement(name="ClusterStatus")
|
||||
public class StorageClusterStatusModel
|
||||
implements Serializable, IProtobufWrapper {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public static class Node {
|
||||
|
||||
public static class Region {
|
||||
private byte[] name;
|
||||
|
||||
public Region() {}
|
||||
|
||||
public Region(byte[] name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
@XmlAttribute
|
||||
public byte[] getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(byte[] name) {
|
||||
this.name = name;
|
||||
}
|
||||
}
|
||||
|
||||
private String name;
|
||||
private long startCode;
|
||||
private int requests;
|
||||
private List<Region> regions = new ArrayList<Region>();
|
||||
|
||||
public void addRegion(byte[] name) {
|
||||
regions.add(new Region(name));
|
||||
}
|
||||
|
||||
public Region getRegion(int i) {
|
||||
return regions.get(i);
|
||||
}
|
||||
|
||||
public Node() {}
|
||||
|
||||
public Node(String name, long startCode) {
|
||||
this.name = name;
|
||||
this.startCode = startCode;
|
||||
}
|
||||
|
||||
@XmlAttribute
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@XmlAttribute
|
||||
public long getStartCode() {
|
||||
return startCode;
|
||||
}
|
||||
|
||||
@XmlElement(name="Region")
|
||||
public List<Region> getRegions() {
|
||||
return regions;
|
||||
}
|
||||
|
||||
@XmlAttribute
|
||||
public int getRequests() {
|
||||
return requests;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public void setStartCode(long startCode) {
|
||||
this.startCode = startCode;
|
||||
}
|
||||
|
||||
public void setRegions(List<Region> regions) {
|
||||
this.regions = regions;
|
||||
}
|
||||
|
||||
public void setRequests(int requests) {
|
||||
this.requests = requests;
|
||||
}
|
||||
}
|
||||
|
||||
private List<Node> liveNodes = new ArrayList<Node>();
|
||||
private List<String> deadNodes = new ArrayList<String>();
|
||||
private int regions;
|
||||
private int requests;
|
||||
private double averageLoad;
|
||||
|
||||
public Node addLiveNode(String name, long startCode) {
|
||||
Node node = new Node(name, startCode);
|
||||
liveNodes.add(node);
|
||||
return node;
|
||||
}
|
||||
|
||||
public Node getLiveNode(int i) {
|
||||
return liveNodes.get(i);
|
||||
}
|
||||
|
||||
public void addDeadNode(String node) {
|
||||
deadNodes.add(node);
|
||||
}
|
||||
|
||||
public String getDeadNode(int i) {
|
||||
return deadNodes.get(i);
|
||||
}
|
||||
|
||||
public StorageClusterStatusModel() {}
|
||||
|
||||
@XmlElement(name="Node")
|
||||
@XmlElementWrapper(name="LiveNodes")
|
||||
public List<Node> getLiveNodes() {
|
||||
return liveNodes;
|
||||
}
|
||||
|
||||
@XmlElement(name="Node")
|
||||
@XmlElementWrapper(name="DeadNodes")
|
||||
public List<String> getDeadNodes() {
|
||||
return deadNodes;
|
||||
}
|
||||
|
||||
@XmlAttribute
|
||||
public int getRegions() {
|
||||
return regions;
|
||||
}
|
||||
|
||||
@XmlAttribute
|
||||
public int getRequests() {
|
||||
return requests;
|
||||
}
|
||||
|
||||
@XmlAttribute
|
||||
public double getAverageLoad() {
|
||||
return averageLoad;
|
||||
}
|
||||
|
||||
public void setLiveNodes(List<Node> nodes) {
|
||||
this.liveNodes = nodes;
|
||||
}
|
||||
|
||||
public void setDeadNodes(List<String> nodes) {
|
||||
this.deadNodes = nodes;
|
||||
}
|
||||
|
||||
public void setRegions(int regions) {
|
||||
this.regions = regions;
|
||||
}
|
||||
|
||||
public void setRequests(int requests) {
|
||||
this.requests = requests;
|
||||
}
|
||||
|
||||
public void setAverageLoad(double averageLoad) {
|
||||
this.averageLoad = averageLoad;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(String.format("%d live servers, %d dead servers, " +
|
||||
"%.4f average load\n\n", liveNodes.size(), deadNodes.size(),
|
||||
averageLoad));
|
||||
if (!liveNodes.isEmpty()) {
|
||||
sb.append(liveNodes.size());
|
||||
sb.append(" live servers\n");
|
||||
for (Node node: liveNodes) {
|
||||
sb.append(" ");
|
||||
sb.append(node.name);
|
||||
sb.append(' ');
|
||||
sb.append(node.startCode);
|
||||
sb.append("\n requests=");
|
||||
sb.append(node.requests);
|
||||
sb.append(", regions=");
|
||||
sb.append(node.regions.size());
|
||||
sb.append("\n\n");
|
||||
for (Node.Region region: node.regions) {
|
||||
sb.append(" ");
|
||||
sb.append(Bytes.toString(region.name));
|
||||
sb.append('\n');
|
||||
}
|
||||
sb.append('\n');
|
||||
}
|
||||
}
|
||||
if (!deadNodes.isEmpty()) {
|
||||
sb.append('\n');
|
||||
sb.append(deadNodes.size());
|
||||
sb.append(" dead servers\n");
|
||||
for (String node: deadNodes) {
|
||||
sb.append(" ");
|
||||
sb.append(node);
|
||||
sb.append('\n');
|
||||
}
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] createProtobufOutput() {
|
||||
StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder();
|
||||
builder.setRegions(regions);
|
||||
builder.setRequests(requests);
|
||||
builder.setAverageLoad(averageLoad);
|
||||
for (Node node: liveNodes) {
|
||||
StorageClusterStatus.Node.Builder nodeBuilder =
|
||||
StorageClusterStatus.Node.newBuilder();
|
||||
nodeBuilder.setName(node.name);
|
||||
nodeBuilder.setStartCode(node.startCode);
|
||||
nodeBuilder.setRequests(node.requests);
|
||||
for (Node.Region region: node.regions) {
|
||||
nodeBuilder.addRegions(ByteString.copyFrom(region.name));
|
||||
}
|
||||
builder.addLiveNodes(nodeBuilder);
|
||||
}
|
||||
for (String node: deadNodes) {
|
||||
builder.addDeadNodes(node);
|
||||
}
|
||||
return builder.build().toByteArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IProtobufWrapper getObjectFromMessage(byte[] message)
|
||||
throws IOException {
|
||||
StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder();
|
||||
builder.mergeFrom(message);
|
||||
if (builder.hasRegions()) {
|
||||
regions = builder.getRegions();
|
||||
}
|
||||
if (builder.hasRequests()) {
|
||||
requests = builder.getRequests();
|
||||
}
|
||||
if (builder.hasAverageLoad()) {
|
||||
averageLoad = builder.getAverageLoad();
|
||||
}
|
||||
for (StorageClusterStatus.Node node: builder.getLiveNodesList()) {
|
||||
long startCode = node.hasStartCode() ? node.getStartCode() : -1;
|
||||
StorageClusterStatusModel.Node nodeModel =
|
||||
addLiveNode(node.getName(), startCode);
|
||||
int requests = node.hasRequests() ? node.getRequests() : 0;
|
||||
nodeModel.setRequests(requests);
|
||||
for (ByteString region: node.getRegionsList()) {
|
||||
nodeModel.addRegion(region.toByteArray());
|
||||
}
|
||||
}
|
||||
for (String node: builder.getDeadNodesList()) {
|
||||
addDeadNode(node);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlValue;
|
||||
|
||||
@XmlRootElement(name="ClusterVersion")
|
||||
public class StorageClusterVersionModel implements Serializable {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private String version;
|
||||
|
||||
@XmlValue
|
||||
public String getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public void setVersion(String version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return version;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAttribute;
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlType;
|
||||
|
||||
import org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
|
||||
@XmlRootElement(name="TableInfo")
|
||||
@XmlType(propOrder = {"name","regions"})
|
||||
public class TableInfoModel implements Serializable, IProtobufWrapper {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private String name;
|
||||
private List<TableRegionModel> regions = new ArrayList<TableRegionModel>();
|
||||
|
||||
public TableInfoModel() {}
|
||||
|
||||
public TableInfoModel(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public void add(TableRegionModel object) {
|
||||
regions.add(object);
|
||||
}
|
||||
|
||||
public TableRegionModel get(int index) {
|
||||
return regions.get(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the table name
|
||||
*/
|
||||
@XmlAttribute
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the regions
|
||||
*/
|
||||
@XmlElement(name="Region")
|
||||
public List<TableRegionModel> getRegions() {
|
||||
return regions;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param the table name
|
||||
*/
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param regions the regions to set
|
||||
*/
|
||||
public void setRegions(List<TableRegionModel> regions) {
|
||||
this.regions = regions;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for(TableRegionModel aRegion : regions) {
|
||||
sb.append(aRegion.toString());
|
||||
sb.append('\n');
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] createProtobufOutput() {
|
||||
TableInfo.Builder builder = TableInfo.newBuilder();
|
||||
builder.setName(name);
|
||||
for (TableRegionModel aRegion: regions) {
|
||||
TableInfo.Region.Builder regionBuilder = TableInfo.Region.newBuilder();
|
||||
regionBuilder.setName(aRegion.getName());
|
||||
regionBuilder.setId(aRegion.getId());
|
||||
regionBuilder.setStartKey(ByteString.copyFrom(aRegion.getStartKey()));
|
||||
regionBuilder.setEndKey(ByteString.copyFrom(aRegion.getEndKey()));
|
||||
regionBuilder.setLocation(aRegion.getLocation());
|
||||
builder.addRegions(regionBuilder);
|
||||
}
|
||||
return builder.build().toByteArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IProtobufWrapper getObjectFromMessage(byte[] message)
|
||||
throws IOException {
|
||||
TableInfo.Builder builder = TableInfo.newBuilder();
|
||||
builder.mergeFrom(message);
|
||||
setName(builder.getName());
|
||||
for (TableInfo.Region region: builder.getRegionsList()) {
|
||||
add(new TableRegionModel(builder.getName(), region.getId(),
|
||||
region.getStartKey().toByteArray(),
|
||||
region.getEndKey().toByteArray(),
|
||||
region.getLocation()));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import javax.xml.bind.annotation.XmlElementRef;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList;
|
||||
|
||||
@XmlRootElement(name="TableList")
|
||||
public class TableListModel implements Serializable, IProtobufWrapper {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private List<TableModel> tables = new ArrayList<TableModel>();
|
||||
|
||||
public TableListModel() {}
|
||||
|
||||
public void add(TableModel object) {
|
||||
tables.add(object);
|
||||
}
|
||||
|
||||
public TableModel get(int index) {
|
||||
return tables.get(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the tables
|
||||
*/
|
||||
@XmlElementRef(name="table")
|
||||
public List<TableModel> getTables() {
|
||||
return tables;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param tables the tables to set
|
||||
*/
|
||||
public void setTables(List<TableModel> tables) {
|
||||
this.tables = tables;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for(TableModel aTable : tables) {
|
||||
sb.append(aTable.toString());
|
||||
sb.append('\n');
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] createProtobufOutput() {
|
||||
TableList.Builder builder = TableList.newBuilder();
|
||||
for (TableModel aTable : tables) {
|
||||
builder.addName(aTable.getName());
|
||||
}
|
||||
return builder.build().toByteArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IProtobufWrapper getObjectFromMessage(byte[] message)
|
||||
throws IOException {
|
||||
TableList.Builder builder = TableList.newBuilder();
|
||||
builder.mergeFrom(message);
|
||||
for (String table: builder.getNameList()) {
|
||||
this.add(new TableModel(table));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAttribute;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
@XmlRootElement(name="table")
|
||||
public class TableModel implements Serializable {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private String name;
|
||||
|
||||
public TableModel() {}
|
||||
|
||||
/**
|
||||
* @param name
|
||||
*/
|
||||
public TableModel(String name) {
|
||||
super();
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the name
|
||||
*/
|
||||
@XmlAttribute
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param name the name to set
|
||||
*/
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return this.name;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAttribute;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlType;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
@XmlRootElement(name="Region")
|
||||
@XmlType(propOrder = {"name","id","startKey","endKey","location"})
|
||||
public class TableRegionModel implements Serializable {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private String table;
|
||||
private long id;
|
||||
private byte[] startKey;
|
||||
private byte[] endKey;
|
||||
private String location;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*/
|
||||
public TableRegionModel() {}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param table the table name
|
||||
* @param id the encoded id of the region
|
||||
* @param startKey the start key of the region
|
||||
* @param endKey the end key of the region
|
||||
* @param location the name and port of the region server hosting the region
|
||||
*/
|
||||
public TableRegionModel(String table, long id, byte[] startKey,
|
||||
byte[] endKey, String location) {
|
||||
this.table = table;
|
||||
this.id = id;
|
||||
this.startKey = startKey;
|
||||
this.endKey = endKey;
|
||||
this.location = location;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the region name
|
||||
*/
|
||||
@XmlAttribute
|
||||
public String getName() {
|
||||
StringBuffer sb = new StringBuffer();
|
||||
sb.append(table);
|
||||
sb.append(',');
|
||||
sb.append(Bytes.toString(startKey));
|
||||
sb.append(',');
|
||||
sb.append(id);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the encoded region id
|
||||
*/
|
||||
@XmlAttribute
|
||||
public long getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the start key
|
||||
*/
|
||||
@XmlAttribute
|
||||
public byte[] getStartKey() {
|
||||
return startKey;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the end key
|
||||
*/
|
||||
@XmlAttribute
|
||||
public byte[] getEndKey() {
|
||||
return endKey;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the name and port of the region server hosting the region
|
||||
*/
|
||||
@XmlAttribute
|
||||
public String getLocation() {
|
||||
return location;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param name region printable name
|
||||
*/
|
||||
public void setName(String name) {
|
||||
String split[] = name.split(",");
|
||||
table = split[0];
|
||||
startKey = Bytes.toBytes(split[1]);
|
||||
id = Long.valueOf(split[2]);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param id the region's encoded id
|
||||
*/
|
||||
public void setId(long id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param startKey the start key
|
||||
*/
|
||||
public void setStartKey(byte[] startKey) {
|
||||
this.startKey = startKey;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param endKey the end key
|
||||
*/
|
||||
public void setEndKey(byte[] endKey) {
|
||||
this.endKey = endKey;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param location the name and port of the region server hosting the region
|
||||
*/
|
||||
public void setLocation(String location) {
|
||||
this.location = location;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(getName());
|
||||
sb.append(" [\n id=");
|
||||
sb.append(id);
|
||||
sb.append("\n startKey='");
|
||||
sb.append(Bytes.toString(startKey));
|
||||
sb.append("'\n endKey='");
|
||||
sb.append(Bytes.toString(endKey));
|
||||
sb.append("'\n location='");
|
||||
sb.append(location);
|
||||
sb.append("'\n]\n");
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,272 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAnyAttribute;
|
||||
import javax.xml.bind.annotation.XmlAttribute;
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlType;
|
||||
import javax.xml.namespace.QName;
|
||||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema;
|
||||
import org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema;
|
||||
|
||||
@XmlRootElement(name="TableSchema")
|
||||
@XmlType(propOrder = {"name","columns"})
|
||||
public class TableSchemaModel implements Serializable, IProtobufWrapper {
|
||||
private static final long serialVersionUID = 1L;
|
||||
private static final QName IN_MEMORY = new QName(HConstants.IN_MEMORY);
|
||||
private static final QName IS_META = new QName(HTableDescriptor.IS_META);
|
||||
private static final QName IS_ROOT = new QName(HTableDescriptor.IS_ROOT);
|
||||
private static final QName READONLY = new QName(HTableDescriptor.READONLY);
|
||||
private static final QName TTL = new QName(HColumnDescriptor.TTL);
|
||||
private static final QName VERSIONS = new QName(HConstants.VERSIONS);
|
||||
private static final QName COMPRESSION =
|
||||
new QName(HColumnDescriptor.COMPRESSION);
|
||||
|
||||
private String name;
|
||||
private Map<QName,Object> attrs = new HashMap<QName,Object>();
|
||||
private List<ColumnSchemaModel> columns = new ArrayList<ColumnSchemaModel>();
|
||||
|
||||
public TableSchemaModel() {}
|
||||
|
||||
public void addAttribute(String name, Object value) {
|
||||
attrs.put(new QName(name), value);
|
||||
}
|
||||
|
||||
public String getAttribute(String name) {
|
||||
return attrs.get(new QName(name)).toString();
|
||||
}
|
||||
|
||||
public void addColumnFamily(ColumnSchemaModel object) {
|
||||
columns.add(object);
|
||||
}
|
||||
|
||||
public ColumnSchemaModel getColumnFamily(int index) {
|
||||
return columns.get(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the table name
|
||||
*/
|
||||
@XmlAttribute
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the map for holding unspecified (user) attributes
|
||||
*/
|
||||
@XmlAnyAttribute
|
||||
public Map<QName,Object> getAny() {
|
||||
return attrs;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the columns
|
||||
*/
|
||||
@XmlElement(name="ColumnSchema")
|
||||
public List<ColumnSchemaModel> getColumns() {
|
||||
return columns;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param name the table name
|
||||
*/
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param columns the columns to set
|
||||
*/
|
||||
public void setColumns(List<ColumnSchemaModel> columns) {
|
||||
this.columns = columns;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("{ NAME=> '");
|
||||
sb.append(name);
|
||||
sb.append('\'');
|
||||
for (Map.Entry<QName,Object> e: attrs.entrySet()) {
|
||||
sb.append(", ");
|
||||
sb.append(e.getKey().getLocalPart());
|
||||
sb.append(" => '");
|
||||
sb.append(e.getValue().toString());
|
||||
sb.append('\'');
|
||||
}
|
||||
sb.append(", COLUMNS => [ ");
|
||||
Iterator<ColumnSchemaModel> i = columns.iterator();
|
||||
while (i.hasNext()) {
|
||||
ColumnSchemaModel family = i.next();
|
||||
sb.append(family.toString());
|
||||
if (i.hasNext()) {
|
||||
sb.append(',');
|
||||
}
|
||||
sb.append(' ');
|
||||
}
|
||||
sb.append("] }");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
// getters and setters for common schema attributes
|
||||
|
||||
// cannot be standard bean type getters and setters, otherwise this would
|
||||
// confuse JAXB
|
||||
|
||||
public boolean __getInMemory() {
|
||||
Object o = attrs.get(IN_MEMORY);
|
||||
return o != null ?
|
||||
Boolean.valueOf(o.toString()) : HTableDescriptor.DEFAULT_IN_MEMORY;
|
||||
}
|
||||
|
||||
public boolean __getIsMeta() {
|
||||
Object o = attrs.get(IS_META);
|
||||
return o != null ? Boolean.valueOf(o.toString()) : false;
|
||||
}
|
||||
|
||||
public boolean __getIsRoot() {
|
||||
Object o = attrs.get(IS_ROOT);
|
||||
return o != null ? Boolean.valueOf(o.toString()) : false;
|
||||
}
|
||||
|
||||
public boolean __getReadOnly() {
|
||||
Object o = attrs.get(READONLY);
|
||||
return o != null ?
|
||||
Boolean.valueOf(o.toString()) : HTableDescriptor.DEFAULT_READONLY;
|
||||
}
|
||||
|
||||
public void __setInMemory(boolean value) {
|
||||
attrs.put(IN_MEMORY, Boolean.toString(value));
|
||||
}
|
||||
|
||||
public void __setIsMeta(boolean value) {
|
||||
attrs.put(IS_META, Boolean.toString(value));
|
||||
}
|
||||
|
||||
public void __setIsRoot(boolean value) {
|
||||
attrs.put(IS_ROOT, Boolean.toString(value));
|
||||
}
|
||||
|
||||
public void __setReadOnly(boolean value) {
|
||||
attrs.put(READONLY, Boolean.toString(value));
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] createProtobufOutput() {
|
||||
TableSchema.Builder builder = TableSchema.newBuilder();
|
||||
builder.setName(name);
|
||||
for (Map.Entry<QName, Object> e: attrs.entrySet()) {
|
||||
TableSchema.Attribute.Builder attrBuilder =
|
||||
TableSchema.Attribute.newBuilder();
|
||||
attrBuilder.setName(e.getKey().getLocalPart());
|
||||
attrBuilder.setValue(e.getValue().toString());
|
||||
builder.addAttrs(attrBuilder);
|
||||
}
|
||||
for (ColumnSchemaModel family: columns) {
|
||||
Map<QName, Object> familyAttrs = family.getAny();
|
||||
ColumnSchema.Builder familyBuilder = ColumnSchema.newBuilder();
|
||||
familyBuilder.setName(family.getName());
|
||||
for (Map.Entry<QName, Object> e: familyAttrs.entrySet()) {
|
||||
ColumnSchema.Attribute.Builder attrBuilder =
|
||||
ColumnSchema.Attribute.newBuilder();
|
||||
attrBuilder.setName(e.getKey().getLocalPart());
|
||||
attrBuilder.setValue(e.getValue().toString());
|
||||
familyBuilder.addAttrs(attrBuilder);
|
||||
}
|
||||
if (familyAttrs.containsKey(TTL)) {
|
||||
familyBuilder.setTtl(
|
||||
Integer.valueOf(familyAttrs.get(TTL).toString()));
|
||||
}
|
||||
if (familyAttrs.containsKey(VERSIONS)) {
|
||||
familyBuilder.setMaxVersions(
|
||||
Integer.valueOf(familyAttrs.get(VERSIONS).toString()));
|
||||
}
|
||||
if (familyAttrs.containsKey(COMPRESSION)) {
|
||||
familyBuilder.setCompression(familyAttrs.get(COMPRESSION).toString());
|
||||
}
|
||||
builder.addColumns(familyBuilder);
|
||||
}
|
||||
if (attrs.containsKey(IN_MEMORY)) {
|
||||
builder.setInMemory(
|
||||
Boolean.valueOf(attrs.get(IN_MEMORY).toString()));
|
||||
}
|
||||
if (attrs.containsKey(READONLY)) {
|
||||
builder.setReadOnly(
|
||||
Boolean.valueOf(attrs.get(READONLY).toString()));
|
||||
}
|
||||
return builder.build().toByteArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IProtobufWrapper getObjectFromMessage(byte[] message)
|
||||
throws IOException {
|
||||
TableSchema.Builder builder = TableSchema.newBuilder();
|
||||
builder.mergeFrom(message);
|
||||
this.setName(builder.getName());
|
||||
for (TableSchema.Attribute attr: builder.getAttrsList()) {
|
||||
this.addAttribute(attr.getName(), attr.getValue());
|
||||
}
|
||||
if (builder.hasInMemory()) {
|
||||
this.addAttribute(HConstants.IN_MEMORY, builder.getInMemory());
|
||||
}
|
||||
if (builder.hasReadOnly()) {
|
||||
this.addAttribute(HTableDescriptor.READONLY, builder.getReadOnly());
|
||||
}
|
||||
for (ColumnSchema family: builder.getColumnsList()) {
|
||||
ColumnSchemaModel familyModel = new ColumnSchemaModel();
|
||||
familyModel.setName(family.getName());
|
||||
for (ColumnSchema.Attribute attr: family.getAttrsList()) {
|
||||
familyModel.addAttribute(attr.getName(), attr.getValue());
|
||||
}
|
||||
if (family.hasTtl()) {
|
||||
familyModel.addAttribute(HColumnDescriptor.TTL, family.getTtl());
|
||||
}
|
||||
if (family.hasMaxVersions()) {
|
||||
familyModel.addAttribute(HConstants.VERSIONS,
|
||||
family.getMaxVersions());
|
||||
}
|
||||
if (family.hasCompression()) {
|
||||
familyModel.addAttribute(HColumnDescriptor.COMPRESSION,
|
||||
family.getCompression());
|
||||
}
|
||||
this.addColumnFamily(familyModel);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
|
||||
import javax.servlet.ServletContext;
|
||||
import javax.xml.bind.annotation.XmlAttribute;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.hbase.stargate.RESTServlet;
|
||||
import org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version;
|
||||
|
||||
import com.sun.jersey.spi.container.servlet.ServletContainer;
|
||||
|
||||
@XmlRootElement(name="Version")
|
||||
public class VersionModel implements Serializable, IProtobufWrapper {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private String stargateVersion;
|
||||
private String jvmVersion;
|
||||
private String osVersion;
|
||||
private String serverVersion;
|
||||
private String jerseyVersion;
|
||||
|
||||
public VersionModel() {}
|
||||
|
||||
public VersionModel(ServletContext context) {
|
||||
stargateVersion = RESTServlet.VERSION_STRING;
|
||||
jvmVersion = System.getProperty("java.vm.vendor") + ' ' +
|
||||
System.getProperty("java.version") + '-' +
|
||||
System.getProperty("java.vm.version");
|
||||
osVersion = System.getProperty("os.name") + ' ' +
|
||||
System.getProperty("os.version") + ' ' +
|
||||
System.getProperty("os.arch");
|
||||
serverVersion = context.getServerInfo();
|
||||
jerseyVersion = ServletContainer.class.getPackage()
|
||||
.getImplementationVersion();
|
||||
}
|
||||
|
||||
@XmlAttribute(name="Stargate")
|
||||
public String getStargateVersion() {
|
||||
return stargateVersion;
|
||||
}
|
||||
|
||||
@XmlAttribute(name="JVM")
|
||||
public String getJvmVersion() {
|
||||
return jvmVersion;
|
||||
}
|
||||
|
||||
@XmlAttribute(name="OS")
|
||||
public String getOsVersion() {
|
||||
return osVersion;
|
||||
}
|
||||
|
||||
@XmlAttribute(name="Server")
|
||||
public String getServerVersion() {
|
||||
return serverVersion;
|
||||
}
|
||||
|
||||
@XmlAttribute(name="Jersey")
|
||||
public String getJerseyVersion() {
|
||||
return jerseyVersion;
|
||||
}
|
||||
|
||||
public void setStargateVersion(String version) {
|
||||
this.stargateVersion = version;
|
||||
}
|
||||
|
||||
public void setOsVersion(String version) {
|
||||
this.osVersion = version;
|
||||
}
|
||||
|
||||
public void setJvmVersion(String version) {
|
||||
this.jvmVersion = version;
|
||||
}
|
||||
|
||||
public void setServerVersion(String version) {
|
||||
this.serverVersion = version;
|
||||
}
|
||||
|
||||
public void setJerseyVersion(String version) {
|
||||
this.jerseyVersion = version;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see java.lang.Object#toString()
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("Stargate ");
|
||||
sb.append(stargateVersion);
|
||||
sb.append(" [JVM: ");
|
||||
sb.append(jvmVersion);
|
||||
sb.append("] [OS: ");
|
||||
sb.append(osVersion);
|
||||
sb.append("] [Server: ");
|
||||
sb.append(serverVersion);
|
||||
sb.append("] [Jersey: ");
|
||||
sb.append(jerseyVersion);
|
||||
sb.append("]\n");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] createProtobufOutput() {
|
||||
Version.Builder builder = Version.newBuilder();
|
||||
builder.setStargateVersion(stargateVersion);
|
||||
builder.setJvmVersion(jvmVersion);
|
||||
builder.setOsVersion(osVersion);
|
||||
builder.setServerVersion(serverVersion);
|
||||
builder.setJerseyVersion(jerseyVersion);
|
||||
return builder.build().toByteArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IProtobufWrapper getObjectFromMessage(byte[] message)
|
||||
throws IOException {
|
||||
Version.Builder builder = Version.newBuilder();
|
||||
builder.mergeFrom(message);
|
||||
if (builder.hasStargateVersion()) {
|
||||
stargateVersion = builder.getStargateVersion();
|
||||
}
|
||||
if (builder.hasJvmVersion()) {
|
||||
jvmVersion = builder.getJvmVersion();
|
||||
}
|
||||
if (builder.hasOsVersion()) {
|
||||
osVersion = builder.getOsVersion();
|
||||
}
|
||||
if (builder.hasServerVersion()) {
|
||||
serverVersion = builder.getServerVersion();
|
||||
}
|
||||
if (builder.hasJerseyVersion()) {
|
||||
jerseyVersion = builder.getJerseyVersion();
|
||||
}
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
// Copyright 2009 The Apache Software Foundation
|
||||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
message Cell {
|
||||
optional bytes row = 1; // unused if Cell is in a CellSet
|
||||
optional bytes column = 2;
|
||||
optional int64 timestamp = 3;
|
||||
optional bytes data = 4;
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
// Copyright 2009 The Apache Software Foundation
|
||||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import "CellMessage.proto";
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
message CellSet {
|
||||
message Row {
|
||||
required bytes key = 1;
|
||||
repeated Cell values = 2;
|
||||
}
|
||||
repeated Row rows = 1;
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2009 The Apache Software Foundation
|
||||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
message ColumnSchema {
|
||||
optional string name = 1;
|
||||
message Attribute {
|
||||
required string name = 1;
|
||||
required string value = 2;
|
||||
}
|
||||
repeated Attribute attrs = 2;
|
||||
// optional helpful encodings of commonly used attributes
|
||||
optional int32 ttl = 3;
|
||||
optional int32 maxVersions = 4;
|
||||
optional string compression = 5;
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
// Copyright 2009 The Apache Software Foundation
|
||||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
message Scanner {
|
||||
optional bytes startRow = 1;
|
||||
optional bytes endRow = 2;
|
||||
repeated bytes columns = 3;
|
||||
optional int32 batch = 4;
|
||||
optional int64 startTime = 5;
|
||||
optional int64 endTime = 6;
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
// Copyright 2009 The Apache Software Foundation
|
||||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
message StorageClusterStatus {
|
||||
message Node {
|
||||
required string name = 1; // name:port
|
||||
optional int64 startCode = 4;
|
||||
optional int32 requests = 2;
|
||||
repeated bytes regions = 3;
|
||||
}
|
||||
// node status
|
||||
repeated Node liveNodes = 1;
|
||||
repeated string deadNodes = 2;
|
||||
// summary statistics
|
||||
optional int32 regions = 3;
|
||||
optional int32 requests = 4;
|
||||
optional double averageLoad = 5;
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
// Copyright 2009 The Apache Software Foundation
|
||||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
message TableInfo {
|
||||
required string name = 1;
|
||||
message Region {
|
||||
required string name = 1;
|
||||
optional bytes startKey = 2;
|
||||
optional bytes endKey = 3;
|
||||
optional int64 id = 4;
|
||||
optional string location = 5;
|
||||
}
|
||||
repeated Region regions = 2;
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
// Copyright 2009 The Apache Software Foundation
|
||||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
message TableList {
|
||||
repeated string name = 1;
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
// Copyright 2009 The Apache Software Foundation
|
||||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import "ColumnSchemaMessage.proto";
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
message TableSchema {
|
||||
optional string name = 1;
|
||||
message Attribute {
|
||||
required string name = 1;
|
||||
required string value = 2;
|
||||
}
|
||||
repeated Attribute attrs = 2;
|
||||
repeated ColumnSchema columns = 3;
|
||||
// optional helpful encodings of commonly used attributes
|
||||
optional bool inMemory = 4;
|
||||
optional bool readOnly = 5;
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
// Copyright 2009 The Apache Software Foundation
|
||||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
message Version {
|
||||
optional string stargateVersion = 1;
|
||||
optional string jvmVersion = 2;
|
||||
optional string osVersion = 3;
|
||||
optional string serverVersion = 4;
|
||||
optional string jerseyVersion = 5;
|
||||
}
|
|
@ -0,0 +1,443 @@
|
|||
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
public final class CellMessage {
|
||||
private CellMessage() {}
|
||||
public static void registerAllExtensions(
|
||||
com.google.protobuf.ExtensionRegistry registry) {
|
||||
}
|
||||
public static final class Cell extends
|
||||
com.google.protobuf.GeneratedMessage {
|
||||
// Use Cell.newBuilder() to construct.
|
||||
private Cell() {}
|
||||
|
||||
private static final Cell defaultInstance = new Cell();
|
||||
public static Cell getDefaultInstance() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public Cell getDefaultInstanceForType() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public static final com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptor() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_descriptor;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internalGetFieldAccessorTable() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_fieldAccessorTable;
|
||||
}
|
||||
|
||||
// optional bytes row = 1;
|
||||
public static final int ROW_FIELD_NUMBER = 1;
|
||||
private boolean hasRow;
|
||||
private com.google.protobuf.ByteString row_ = com.google.protobuf.ByteString.EMPTY;
|
||||
public boolean hasRow() { return hasRow; }
|
||||
public com.google.protobuf.ByteString getRow() { return row_; }
|
||||
|
||||
// optional bytes column = 2;
|
||||
public static final int COLUMN_FIELD_NUMBER = 2;
|
||||
private boolean hasColumn;
|
||||
private com.google.protobuf.ByteString column_ = com.google.protobuf.ByteString.EMPTY;
|
||||
public boolean hasColumn() { return hasColumn; }
|
||||
public com.google.protobuf.ByteString getColumn() { return column_; }
|
||||
|
||||
// optional int64 timestamp = 3;
|
||||
public static final int TIMESTAMP_FIELD_NUMBER = 3;
|
||||
private boolean hasTimestamp;
|
||||
private long timestamp_ = 0L;
|
||||
public boolean hasTimestamp() { return hasTimestamp; }
|
||||
public long getTimestamp() { return timestamp_; }
|
||||
|
||||
// optional bytes data = 4;
|
||||
public static final int DATA_FIELD_NUMBER = 4;
|
||||
private boolean hasData;
|
||||
private com.google.protobuf.ByteString data_ = com.google.protobuf.ByteString.EMPTY;
|
||||
public boolean hasData() { return hasData; }
|
||||
public com.google.protobuf.ByteString getData() { return data_; }
|
||||
|
||||
@Override
|
||||
public final boolean isInitialized() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(com.google.protobuf.CodedOutputStream output)
|
||||
throws java.io.IOException {
|
||||
if (hasRow()) {
|
||||
output.writeBytes(1, getRow());
|
||||
}
|
||||
if (hasColumn()) {
|
||||
output.writeBytes(2, getColumn());
|
||||
}
|
||||
if (hasTimestamp()) {
|
||||
output.writeInt64(3, getTimestamp());
|
||||
}
|
||||
if (hasData()) {
|
||||
output.writeBytes(4, getData());
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
private int memoizedSerializedSize = -1;
|
||||
@Override
|
||||
public int getSerializedSize() {
|
||||
int size = memoizedSerializedSize;
|
||||
if (size != -1) return size;
|
||||
|
||||
size = 0;
|
||||
if (hasRow()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBytesSize(1, getRow());
|
||||
}
|
||||
if (hasColumn()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBytesSize(2, getColumn());
|
||||
}
|
||||
if (hasTimestamp()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeInt64Size(3, getTimestamp());
|
||||
}
|
||||
if (hasData()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBytesSize(4, getData());
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
}
|
||||
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
|
||||
com.google.protobuf.ByteString data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
|
||||
com.google.protobuf.ByteString data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(byte[] data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
|
||||
byte[] data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseDelimitedFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseDelimitedFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
|
||||
public static Builder newBuilder() { return new Builder(); }
|
||||
public Builder newBuilderForType() { return new Builder(); }
|
||||
public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell prototype) {
|
||||
return new Builder().mergeFrom(prototype);
|
||||
}
|
||||
public Builder toBuilder() { return newBuilder(this); }
|
||||
|
||||
public static final class Builder extends
|
||||
com.google.protobuf.GeneratedMessage.Builder<Builder> {
|
||||
// Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.newBuilder()
|
||||
private Builder() {}
|
||||
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell();
|
||||
|
||||
@Override
|
||||
protected org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell internalGetResult() {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clear() {
|
||||
result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clone() {
|
||||
return new Builder().mergeFrom(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptorForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.getDescriptor();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell getDefaultInstanceForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.getDefaultInstance();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell build() {
|
||||
if (result != null && !isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result);
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
private org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell buildParsed()
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
if (!isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result).asInvalidProtocolBufferException();
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell buildPartial() {
|
||||
if (result == null) {
|
||||
throw new IllegalStateException(
|
||||
"build() has already been called on this Builder."); }
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell returnMe = result;
|
||||
result = null;
|
||||
return returnMe;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(com.google.protobuf.Message other) {
|
||||
if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell) {
|
||||
return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell)other);
|
||||
} else {
|
||||
super.mergeFrom(other);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell other) {
|
||||
if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.getDefaultInstance()) return this;
|
||||
if (other.hasRow()) {
|
||||
setRow(other.getRow());
|
||||
}
|
||||
if (other.hasColumn()) {
|
||||
setColumn(other.getColumn());
|
||||
}
|
||||
if (other.hasTimestamp()) {
|
||||
setTimestamp(other.getTimestamp());
|
||||
}
|
||||
if (other.hasData()) {
|
||||
setData(other.getData());
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return mergeFrom(input,
|
||||
com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
||||
com.google.protobuf.UnknownFieldSet.newBuilder(
|
||||
this.getUnknownFields());
|
||||
while (true) {
|
||||
int tag = input.readTag();
|
||||
switch (tag) {
|
||||
case 0:
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
default: {
|
||||
if (!parseUnknownField(input, unknownFields,
|
||||
extensionRegistry, tag)) {
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 10: {
|
||||
setRow(input.readBytes());
|
||||
break;
|
||||
}
|
||||
case 18: {
|
||||
setColumn(input.readBytes());
|
||||
break;
|
||||
}
|
||||
case 24: {
|
||||
setTimestamp(input.readInt64());
|
||||
break;
|
||||
}
|
||||
case 34: {
|
||||
setData(input.readBytes());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// optional bytes row = 1;
|
||||
public boolean hasRow() {
|
||||
return result.hasRow();
|
||||
}
|
||||
public com.google.protobuf.ByteString getRow() {
|
||||
return result.getRow();
|
||||
}
|
||||
public Builder setRow(com.google.protobuf.ByteString value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasRow = true;
|
||||
result.row_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearRow() {
|
||||
result.hasRow = false;
|
||||
result.row_ = com.google.protobuf.ByteString.EMPTY;
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional bytes column = 2;
|
||||
public boolean hasColumn() {
|
||||
return result.hasColumn();
|
||||
}
|
||||
public com.google.protobuf.ByteString getColumn() {
|
||||
return result.getColumn();
|
||||
}
|
||||
public Builder setColumn(com.google.protobuf.ByteString value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasColumn = true;
|
||||
result.column_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearColumn() {
|
||||
result.hasColumn = false;
|
||||
result.column_ = com.google.protobuf.ByteString.EMPTY;
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional int64 timestamp = 3;
|
||||
public boolean hasTimestamp() {
|
||||
return result.hasTimestamp();
|
||||
}
|
||||
public long getTimestamp() {
|
||||
return result.getTimestamp();
|
||||
}
|
||||
public Builder setTimestamp(long value) {
|
||||
result.hasTimestamp = true;
|
||||
result.timestamp_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearTimestamp() {
|
||||
result.hasTimestamp = false;
|
||||
result.timestamp_ = 0L;
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional bytes data = 4;
|
||||
public boolean hasData() {
|
||||
return result.hasData();
|
||||
}
|
||||
public com.google.protobuf.ByteString getData() {
|
||||
return result.getData();
|
||||
}
|
||||
public Builder setData(com.google.protobuf.ByteString value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasData = true;
|
||||
result.data_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearData() {
|
||||
result.hasData = false;
|
||||
result.data_ = com.google.protobuf.ByteString.EMPTY;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
static {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.getDescriptor();
|
||||
}
|
||||
}
|
||||
|
||||
private static com.google.protobuf.Descriptors.Descriptor
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_descriptor;
|
||||
private static
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_fieldAccessorTable;
|
||||
|
||||
public static com.google.protobuf.Descriptors.FileDescriptor
|
||||
getDescriptor() {
|
||||
return descriptor;
|
||||
}
|
||||
private static com.google.protobuf.Descriptors.FileDescriptor
|
||||
descriptor;
|
||||
static {
|
||||
java.lang.String descriptorData =
|
||||
"\n\021CellMessage.proto\0223org.apache.hadoop.h" +
|
||||
"base.stargate.protobuf.generated\"D\n\004Cell" +
|
||||
"\022\013\n\003row\030\001 \001(\014\022\016\n\006column\030\002 \001(\014\022\021\n\ttimesta" +
|
||||
"mp\030\003 \001(\003\022\014\n\004data\030\004 \001(\014";
|
||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||
public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
||||
com.google.protobuf.Descriptors.FileDescriptor root) {
|
||||
descriptor = root;
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_descriptor =
|
||||
getDescriptor().getMessageTypes().get(0);
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_descriptor,
|
||||
new java.lang.String[] { "Row", "Column", "Timestamp", "Data", },
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.class,
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.Builder.class);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
com.google.protobuf.Descriptors.FileDescriptor
|
||||
.internalBuildGeneratedFileFrom(descriptorData,
|
||||
new com.google.protobuf.Descriptors.FileDescriptor[] {
|
||||
}, assigner);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,743 @@
|
|||
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
public final class CellSetMessage {
|
||||
private CellSetMessage() {}
|
||||
public static void registerAllExtensions(
|
||||
com.google.protobuf.ExtensionRegistry registry) {
|
||||
}
|
||||
public static final class CellSet extends
|
||||
com.google.protobuf.GeneratedMessage {
|
||||
// Use CellSet.newBuilder() to construct.
|
||||
private CellSet() {}
|
||||
|
||||
private static final CellSet defaultInstance = new CellSet();
|
||||
public static CellSet getDefaultInstance() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public CellSet getDefaultInstanceForType() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public static final com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptor() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_descriptor;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internalGetFieldAccessorTable() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_fieldAccessorTable;
|
||||
}
|
||||
|
||||
public static final class Row extends
|
||||
com.google.protobuf.GeneratedMessage {
|
||||
// Use Row.newBuilder() to construct.
|
||||
private Row() {}
|
||||
|
||||
private static final Row defaultInstance = new Row();
|
||||
public static Row getDefaultInstance() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public Row getDefaultInstanceForType() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public static final com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptor() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_descriptor;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internalGetFieldAccessorTable() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_fieldAccessorTable;
|
||||
}
|
||||
|
||||
// required bytes key = 1;
|
||||
public static final int KEY_FIELD_NUMBER = 1;
|
||||
private boolean hasKey;
|
||||
private com.google.protobuf.ByteString key_ = com.google.protobuf.ByteString.EMPTY;
|
||||
public boolean hasKey() { return hasKey; }
|
||||
public com.google.protobuf.ByteString getKey() { return key_; }
|
||||
|
||||
// repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.Cell values = 2;
|
||||
public static final int VALUES_FIELD_NUMBER = 2;
|
||||
private java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell> values_ =
|
||||
java.util.Collections.emptyList();
|
||||
public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell> getValuesList() {
|
||||
return values_;
|
||||
}
|
||||
public int getValuesCount() { return values_.size(); }
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell getValues(int index) {
|
||||
return values_.get(index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isInitialized() {
|
||||
if (!hasKey) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(com.google.protobuf.CodedOutputStream output)
|
||||
throws java.io.IOException {
|
||||
if (hasKey()) {
|
||||
output.writeBytes(1, getKey());
|
||||
}
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell element : getValuesList()) {
|
||||
output.writeMessage(2, element);
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
private int memoizedSerializedSize = -1;
|
||||
@Override
|
||||
public int getSerializedSize() {
|
||||
int size = memoizedSerializedSize;
|
||||
if (size != -1) return size;
|
||||
|
||||
size = 0;
|
||||
if (hasKey()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBytesSize(1, getKey());
|
||||
}
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell element : getValuesList()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeMessageSize(2, element);
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
}
|
||||
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
|
||||
com.google.protobuf.ByteString data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
|
||||
com.google.protobuf.ByteString data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(byte[] data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
|
||||
byte[] data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseDelimitedFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseDelimitedFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
|
||||
public static Builder newBuilder() { return new Builder(); }
|
||||
public Builder newBuilderForType() { return new Builder(); }
|
||||
public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row prototype) {
|
||||
return new Builder().mergeFrom(prototype);
|
||||
}
|
||||
public Builder toBuilder() { return newBuilder(this); }
|
||||
|
||||
public static final class Builder extends
|
||||
com.google.protobuf.GeneratedMessage.Builder<Builder> {
|
||||
// Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.newBuilder()
|
||||
private Builder() {}
|
||||
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row();
|
||||
|
||||
@Override
|
||||
protected org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row internalGetResult() {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clear() {
|
||||
result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clone() {
|
||||
return new Builder().mergeFrom(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptorForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.getDescriptor();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row getDefaultInstanceForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.getDefaultInstance();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row build() {
|
||||
if (result != null && !isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result);
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
private org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row buildParsed()
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
if (!isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result).asInvalidProtocolBufferException();
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row buildPartial() {
|
||||
if (result == null) {
|
||||
throw new IllegalStateException(
|
||||
"build() has already been called on this Builder."); }
|
||||
if (result.values_ != java.util.Collections.EMPTY_LIST) {
|
||||
result.values_ =
|
||||
java.util.Collections.unmodifiableList(result.values_);
|
||||
}
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row returnMe = result;
|
||||
result = null;
|
||||
return returnMe;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(com.google.protobuf.Message other) {
|
||||
if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row) {
|
||||
return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row)other);
|
||||
} else {
|
||||
super.mergeFrom(other);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row other) {
|
||||
if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.getDefaultInstance()) return this;
|
||||
if (other.hasKey()) {
|
||||
setKey(other.getKey());
|
||||
}
|
||||
if (!other.values_.isEmpty()) {
|
||||
if (result.values_.isEmpty()) {
|
||||
result.values_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell>();
|
||||
}
|
||||
result.values_.addAll(other.values_);
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return mergeFrom(input,
|
||||
com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
||||
com.google.protobuf.UnknownFieldSet.newBuilder(
|
||||
this.getUnknownFields());
|
||||
while (true) {
|
||||
int tag = input.readTag();
|
||||
switch (tag) {
|
||||
case 0:
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
default: {
|
||||
if (!parseUnknownField(input, unknownFields,
|
||||
extensionRegistry, tag)) {
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 10: {
|
||||
setKey(input.readBytes());
|
||||
break;
|
||||
}
|
||||
case 18: {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.newBuilder();
|
||||
input.readMessage(subBuilder, extensionRegistry);
|
||||
addValues(subBuilder.buildPartial());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// required bytes key = 1;
|
||||
public boolean hasKey() {
|
||||
return result.hasKey();
|
||||
}
|
||||
public com.google.protobuf.ByteString getKey() {
|
||||
return result.getKey();
|
||||
}
|
||||
public Builder setKey(com.google.protobuf.ByteString value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasKey = true;
|
||||
result.key_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearKey() {
|
||||
result.hasKey = false;
|
||||
result.key_ = com.google.protobuf.ByteString.EMPTY;
|
||||
return this;
|
||||
}
|
||||
|
||||
// repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.Cell values = 2;
|
||||
public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell> getValuesList() {
|
||||
return java.util.Collections.unmodifiableList(result.values_);
|
||||
}
|
||||
public int getValuesCount() {
|
||||
return result.getValuesCount();
|
||||
}
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell getValues(int index) {
|
||||
return result.getValues(index);
|
||||
}
|
||||
public Builder setValues(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.values_.set(index, value);
|
||||
return this;
|
||||
}
|
||||
public Builder setValues(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.Builder builderForValue) {
|
||||
result.values_.set(index, builderForValue.build());
|
||||
return this;
|
||||
}
|
||||
public Builder addValues(org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
if (result.values_.isEmpty()) {
|
||||
result.values_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell>();
|
||||
}
|
||||
result.values_.add(value);
|
||||
return this;
|
||||
}
|
||||
public Builder addValues(org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.Builder builderForValue) {
|
||||
if (result.values_.isEmpty()) {
|
||||
result.values_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell>();
|
||||
}
|
||||
result.values_.add(builderForValue.build());
|
||||
return this;
|
||||
}
|
||||
public Builder addAllValues(
|
||||
java.lang.Iterable<? extends org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell> values) {
|
||||
if (result.values_.isEmpty()) {
|
||||
result.values_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell>();
|
||||
}
|
||||
super.addAll(values, result.values_);
|
||||
return this;
|
||||
}
|
||||
public Builder clearValues() {
|
||||
result.values_ = java.util.Collections.emptyList();
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
static {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.getDescriptor();
|
||||
}
|
||||
}
|
||||
|
||||
// repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.CellSet.Row rows = 1;
|
||||
public static final int ROWS_FIELD_NUMBER = 1;
|
||||
private java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row> rows_ =
|
||||
java.util.Collections.emptyList();
|
||||
public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row> getRowsList() {
|
||||
return rows_;
|
||||
}
|
||||
public int getRowsCount() { return rows_.size(); }
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row getRows(int index) {
|
||||
return rows_.get(index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isInitialized() {
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row element : getRowsList()) {
|
||||
if (!element.isInitialized()) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(com.google.protobuf.CodedOutputStream output)
|
||||
throws java.io.IOException {
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row element : getRowsList()) {
|
||||
output.writeMessage(1, element);
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
private int memoizedSerializedSize = -1;
|
||||
@Override
|
||||
public int getSerializedSize() {
|
||||
int size = memoizedSerializedSize;
|
||||
if (size != -1) return size;
|
||||
|
||||
size = 0;
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row element : getRowsList()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeMessageSize(1, element);
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
}
|
||||
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
|
||||
com.google.protobuf.ByteString data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
|
||||
com.google.protobuf.ByteString data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(byte[] data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
|
||||
byte[] data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseDelimitedFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseDelimitedFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
|
||||
public static Builder newBuilder() { return new Builder(); }
|
||||
public Builder newBuilderForType() { return new Builder(); }
|
||||
public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet prototype) {
|
||||
return new Builder().mergeFrom(prototype);
|
||||
}
|
||||
public Builder toBuilder() { return newBuilder(this); }
|
||||
|
||||
public static final class Builder extends
|
||||
com.google.protobuf.GeneratedMessage.Builder<Builder> {
|
||||
// Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.newBuilder()
|
||||
private Builder() {}
|
||||
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet();
|
||||
|
||||
@Override
|
||||
protected org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet internalGetResult() {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clear() {
|
||||
result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clone() {
|
||||
return new Builder().mergeFrom(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptorForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.getDescriptor();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet getDefaultInstanceForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.getDefaultInstance();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet build() {
|
||||
if (result != null && !isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result);
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
private org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet buildParsed()
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
if (!isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result).asInvalidProtocolBufferException();
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet buildPartial() {
|
||||
if (result == null) {
|
||||
throw new IllegalStateException(
|
||||
"build() has already been called on this Builder."); }
|
||||
if (result.rows_ != java.util.Collections.EMPTY_LIST) {
|
||||
result.rows_ =
|
||||
java.util.Collections.unmodifiableList(result.rows_);
|
||||
}
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet returnMe = result;
|
||||
result = null;
|
||||
return returnMe;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(com.google.protobuf.Message other) {
|
||||
if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet) {
|
||||
return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet)other);
|
||||
} else {
|
||||
super.mergeFrom(other);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet other) {
|
||||
if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.getDefaultInstance()) return this;
|
||||
if (!other.rows_.isEmpty()) {
|
||||
if (result.rows_.isEmpty()) {
|
||||
result.rows_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row>();
|
||||
}
|
||||
result.rows_.addAll(other.rows_);
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return mergeFrom(input,
|
||||
com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
||||
com.google.protobuf.UnknownFieldSet.newBuilder(
|
||||
this.getUnknownFields());
|
||||
while (true) {
|
||||
int tag = input.readTag();
|
||||
switch (tag) {
|
||||
case 0:
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
default: {
|
||||
if (!parseUnknownField(input, unknownFields,
|
||||
extensionRegistry, tag)) {
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 10: {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.newBuilder();
|
||||
input.readMessage(subBuilder, extensionRegistry);
|
||||
addRows(subBuilder.buildPartial());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.CellSet.Row rows = 1;
|
||||
public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row> getRowsList() {
|
||||
return java.util.Collections.unmodifiableList(result.rows_);
|
||||
}
|
||||
public int getRowsCount() {
|
||||
return result.getRowsCount();
|
||||
}
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row getRows(int index) {
|
||||
return result.getRows(index);
|
||||
}
|
||||
public Builder setRows(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.rows_.set(index, value);
|
||||
return this;
|
||||
}
|
||||
public Builder setRows(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.Builder builderForValue) {
|
||||
result.rows_.set(index, builderForValue.build());
|
||||
return this;
|
||||
}
|
||||
public Builder addRows(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
if (result.rows_.isEmpty()) {
|
||||
result.rows_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row>();
|
||||
}
|
||||
result.rows_.add(value);
|
||||
return this;
|
||||
}
|
||||
public Builder addRows(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.Builder builderForValue) {
|
||||
if (result.rows_.isEmpty()) {
|
||||
result.rows_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row>();
|
||||
}
|
||||
result.rows_.add(builderForValue.build());
|
||||
return this;
|
||||
}
|
||||
public Builder addAllRows(
|
||||
java.lang.Iterable<? extends org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row> values) {
|
||||
if (result.rows_.isEmpty()) {
|
||||
result.rows_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row>();
|
||||
}
|
||||
super.addAll(values, result.rows_);
|
||||
return this;
|
||||
}
|
||||
public Builder clearRows() {
|
||||
result.rows_ = java.util.Collections.emptyList();
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
static {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.getDescriptor();
|
||||
}
|
||||
}
|
||||
|
||||
private static com.google.protobuf.Descriptors.Descriptor
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_descriptor;
|
||||
private static
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_fieldAccessorTable;
|
||||
private static com.google.protobuf.Descriptors.Descriptor
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_descriptor;
|
||||
private static
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_fieldAccessorTable;
|
||||
|
||||
public static com.google.protobuf.Descriptors.FileDescriptor
|
||||
getDescriptor() {
|
||||
return descriptor;
|
||||
}
|
||||
private static com.google.protobuf.Descriptors.FileDescriptor
|
||||
descriptor;
|
||||
static {
|
||||
java.lang.String descriptorData =
|
||||
"\n\024CellSetMessage.proto\0223org.apache.hadoo" +
|
||||
"p.hbase.stargate.protobuf.generated\032\021Cel" +
|
||||
"lMessage.proto\"\270\001\n\007CellSet\022N\n\004rows\030\001 \003(\013" +
|
||||
"2@.org.apache.hadoop.hbase.stargate.prot" +
|
||||
"obuf.generated.CellSet.Row\032]\n\003Row\022\013\n\003key" +
|
||||
"\030\001 \002(\014\022I\n\006values\030\002 \003(\01329.org.apache.hado" +
|
||||
"op.hbase.stargate.protobuf.generated.Cel" +
|
||||
"l";
|
||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||
public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
||||
com.google.protobuf.Descriptors.FileDescriptor root) {
|
||||
descriptor = root;
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_descriptor =
|
||||
getDescriptor().getMessageTypes().get(0);
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_descriptor,
|
||||
new java.lang.String[] { "Rows", },
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.class,
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Builder.class);
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_descriptor =
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_descriptor.getNestedTypes().get(0);
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_descriptor,
|
||||
new java.lang.String[] { "Key", "Values", },
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.class,
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.Builder.class);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
com.google.protobuf.Descriptors.FileDescriptor
|
||||
.internalBuildGeneratedFileFrom(descriptorData,
|
||||
new com.google.protobuf.Descriptors.FileDescriptor[] {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.getDescriptor(),
|
||||
}, assigner);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,861 @@
|
|||
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
public final class ColumnSchemaMessage {
|
||||
private ColumnSchemaMessage() {}
|
||||
public static void registerAllExtensions(
|
||||
com.google.protobuf.ExtensionRegistry registry) {
|
||||
}
|
||||
public static final class ColumnSchema extends
|
||||
com.google.protobuf.GeneratedMessage {
|
||||
// Use ColumnSchema.newBuilder() to construct.
|
||||
private ColumnSchema() {}
|
||||
|
||||
private static final ColumnSchema defaultInstance = new ColumnSchema();
|
||||
public static ColumnSchema getDefaultInstance() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public ColumnSchema getDefaultInstanceForType() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public static final com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptor() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_descriptor;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internalGetFieldAccessorTable() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_fieldAccessorTable;
|
||||
}
|
||||
|
||||
public static final class Attribute extends
|
||||
com.google.protobuf.GeneratedMessage {
|
||||
// Use Attribute.newBuilder() to construct.
|
||||
private Attribute() {}
|
||||
|
||||
private static final Attribute defaultInstance = new Attribute();
|
||||
public static Attribute getDefaultInstance() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public Attribute getDefaultInstanceForType() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public static final com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptor() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_descriptor;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internalGetFieldAccessorTable() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable;
|
||||
}
|
||||
|
||||
// required string name = 1;
|
||||
public static final int NAME_FIELD_NUMBER = 1;
|
||||
private boolean hasName;
|
||||
private java.lang.String name_ = "";
|
||||
public boolean hasName() { return hasName; }
|
||||
public java.lang.String getName() { return name_; }
|
||||
|
||||
// required string value = 2;
|
||||
public static final int VALUE_FIELD_NUMBER = 2;
|
||||
private boolean hasValue;
|
||||
private java.lang.String value_ = "";
|
||||
public boolean hasValue() { return hasValue; }
|
||||
public java.lang.String getValue() { return value_; }
|
||||
|
||||
@Override
|
||||
public final boolean isInitialized() {
|
||||
if (!hasName) return false;
|
||||
if (!hasValue) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(com.google.protobuf.CodedOutputStream output)
|
||||
throws java.io.IOException {
|
||||
if (hasName()) {
|
||||
output.writeString(1, getName());
|
||||
}
|
||||
if (hasValue()) {
|
||||
output.writeString(2, getValue());
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
private int memoizedSerializedSize = -1;
|
||||
@Override
|
||||
public int getSerializedSize() {
|
||||
int size = memoizedSerializedSize;
|
||||
if (size != -1) return size;
|
||||
|
||||
size = 0;
|
||||
if (hasName()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeStringSize(1, getName());
|
||||
}
|
||||
if (hasValue()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeStringSize(2, getValue());
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
}
|
||||
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
|
||||
com.google.protobuf.ByteString data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
|
||||
com.google.protobuf.ByteString data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(byte[] data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
|
||||
byte[] data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseDelimitedFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseDelimitedFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
|
||||
public static Builder newBuilder() { return new Builder(); }
|
||||
public Builder newBuilderForType() { return new Builder(); }
|
||||
public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute prototype) {
|
||||
return new Builder().mergeFrom(prototype);
|
||||
}
|
||||
public Builder toBuilder() { return newBuilder(this); }
|
||||
|
||||
public static final class Builder extends
|
||||
com.google.protobuf.GeneratedMessage.Builder<Builder> {
|
||||
// Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.newBuilder()
|
||||
private Builder() {}
|
||||
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute();
|
||||
|
||||
@Override
|
||||
protected org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute internalGetResult() {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clear() {
|
||||
result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clone() {
|
||||
return new Builder().mergeFrom(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptorForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDescriptor();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getDefaultInstanceForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDefaultInstance();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute build() {
|
||||
if (result != null && !isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result);
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
private org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute buildParsed()
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
if (!isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result).asInvalidProtocolBufferException();
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute buildPartial() {
|
||||
if (result == null) {
|
||||
throw new IllegalStateException(
|
||||
"build() has already been called on this Builder."); }
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute returnMe = result;
|
||||
result = null;
|
||||
return returnMe;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(com.google.protobuf.Message other) {
|
||||
if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute) {
|
||||
return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute)other);
|
||||
} else {
|
||||
super.mergeFrom(other);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute other) {
|
||||
if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDefaultInstance()) return this;
|
||||
if (other.hasName()) {
|
||||
setName(other.getName());
|
||||
}
|
||||
if (other.hasValue()) {
|
||||
setValue(other.getValue());
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return mergeFrom(input,
|
||||
com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
||||
com.google.protobuf.UnknownFieldSet.newBuilder(
|
||||
this.getUnknownFields());
|
||||
while (true) {
|
||||
int tag = input.readTag();
|
||||
switch (tag) {
|
||||
case 0:
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
default: {
|
||||
if (!parseUnknownField(input, unknownFields,
|
||||
extensionRegistry, tag)) {
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 10: {
|
||||
setName(input.readString());
|
||||
break;
|
||||
}
|
||||
case 18: {
|
||||
setValue(input.readString());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// required string name = 1;
|
||||
public boolean hasName() {
|
||||
return result.hasName();
|
||||
}
|
||||
public java.lang.String getName() {
|
||||
return result.getName();
|
||||
}
|
||||
public Builder setName(java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasName = true;
|
||||
result.name_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearName() {
|
||||
result.hasName = false;
|
||||
result.name_ = "";
|
||||
return this;
|
||||
}
|
||||
|
||||
// required string value = 2;
|
||||
public boolean hasValue() {
|
||||
return result.hasValue();
|
||||
}
|
||||
public java.lang.String getValue() {
|
||||
return result.getValue();
|
||||
}
|
||||
public Builder setValue(java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasValue = true;
|
||||
result.value_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearValue() {
|
||||
result.hasValue = false;
|
||||
result.value_ = "";
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
static {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.getDescriptor();
|
||||
}
|
||||
}
|
||||
|
||||
// optional string name = 1;
|
||||
public static final int NAME_FIELD_NUMBER = 1;
|
||||
private boolean hasName;
|
||||
private java.lang.String name_ = "";
|
||||
public boolean hasName() { return hasName; }
|
||||
public java.lang.String getName() { return name_; }
|
||||
|
||||
// repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema.Attribute attrs = 2;
|
||||
public static final int ATTRS_FIELD_NUMBER = 2;
|
||||
private java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute> attrs_ =
|
||||
java.util.Collections.emptyList();
|
||||
public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute> getAttrsList() {
|
||||
return attrs_;
|
||||
}
|
||||
public int getAttrsCount() { return attrs_.size(); }
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getAttrs(int index) {
|
||||
return attrs_.get(index);
|
||||
}
|
||||
|
||||
// optional int32 ttl = 3;
|
||||
public static final int TTL_FIELD_NUMBER = 3;
|
||||
private boolean hasTtl;
|
||||
private int ttl_ = 0;
|
||||
public boolean hasTtl() { return hasTtl; }
|
||||
public int getTtl() { return ttl_; }
|
||||
|
||||
// optional int32 maxVersions = 4;
|
||||
public static final int MAXVERSIONS_FIELD_NUMBER = 4;
|
||||
private boolean hasMaxVersions;
|
||||
private int maxVersions_ = 0;
|
||||
public boolean hasMaxVersions() { return hasMaxVersions; }
|
||||
public int getMaxVersions() { return maxVersions_; }
|
||||
|
||||
// optional string compression = 5;
|
||||
public static final int COMPRESSION_FIELD_NUMBER = 5;
|
||||
private boolean hasCompression;
|
||||
private java.lang.String compression_ = "";
|
||||
public boolean hasCompression() { return hasCompression; }
|
||||
public java.lang.String getCompression() { return compression_; }
|
||||
|
||||
@Override
|
||||
public final boolean isInitialized() {
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute element : getAttrsList()) {
|
||||
if (!element.isInitialized()) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(com.google.protobuf.CodedOutputStream output)
|
||||
throws java.io.IOException {
|
||||
if (hasName()) {
|
||||
output.writeString(1, getName());
|
||||
}
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute element : getAttrsList()) {
|
||||
output.writeMessage(2, element);
|
||||
}
|
||||
if (hasTtl()) {
|
||||
output.writeInt32(3, getTtl());
|
||||
}
|
||||
if (hasMaxVersions()) {
|
||||
output.writeInt32(4, getMaxVersions());
|
||||
}
|
||||
if (hasCompression()) {
|
||||
output.writeString(5, getCompression());
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
private int memoizedSerializedSize = -1;
|
||||
@Override
|
||||
public int getSerializedSize() {
|
||||
int size = memoizedSerializedSize;
|
||||
if (size != -1) return size;
|
||||
|
||||
size = 0;
|
||||
if (hasName()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeStringSize(1, getName());
|
||||
}
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute element : getAttrsList()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeMessageSize(2, element);
|
||||
}
|
||||
if (hasTtl()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeInt32Size(3, getTtl());
|
||||
}
|
||||
if (hasMaxVersions()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeInt32Size(4, getMaxVersions());
|
||||
}
|
||||
if (hasCompression()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeStringSize(5, getCompression());
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
}
|
||||
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
|
||||
com.google.protobuf.ByteString data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
|
||||
com.google.protobuf.ByteString data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(byte[] data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
|
||||
byte[] data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseDelimitedFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseDelimitedFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
|
||||
public static Builder newBuilder() { return new Builder(); }
|
||||
public Builder newBuilderForType() { return new Builder(); }
|
||||
public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema prototype) {
|
||||
return new Builder().mergeFrom(prototype);
|
||||
}
|
||||
public Builder toBuilder() { return newBuilder(this); }
|
||||
|
||||
public static final class Builder extends
|
||||
com.google.protobuf.GeneratedMessage.Builder<Builder> {
|
||||
// Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.newBuilder()
|
||||
private Builder() {}
|
||||
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema();
|
||||
|
||||
@Override
|
||||
protected org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema internalGetResult() {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clear() {
|
||||
result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clone() {
|
||||
return new Builder().mergeFrom(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptorForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDescriptor();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema getDefaultInstanceForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDefaultInstance();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema build() {
|
||||
if (result != null && !isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result);
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
private org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema buildParsed()
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
if (!isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result).asInvalidProtocolBufferException();
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema buildPartial() {
|
||||
if (result == null) {
|
||||
throw new IllegalStateException(
|
||||
"build() has already been called on this Builder."); }
|
||||
if (result.attrs_ != java.util.Collections.EMPTY_LIST) {
|
||||
result.attrs_ =
|
||||
java.util.Collections.unmodifiableList(result.attrs_);
|
||||
}
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema returnMe = result;
|
||||
result = null;
|
||||
return returnMe;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(com.google.protobuf.Message other) {
|
||||
if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema) {
|
||||
return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema)other);
|
||||
} else {
|
||||
super.mergeFrom(other);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema other) {
|
||||
if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDefaultInstance()) return this;
|
||||
if (other.hasName()) {
|
||||
setName(other.getName());
|
||||
}
|
||||
if (!other.attrs_.isEmpty()) {
|
||||
if (result.attrs_.isEmpty()) {
|
||||
result.attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute>();
|
||||
}
|
||||
result.attrs_.addAll(other.attrs_);
|
||||
}
|
||||
if (other.hasTtl()) {
|
||||
setTtl(other.getTtl());
|
||||
}
|
||||
if (other.hasMaxVersions()) {
|
||||
setMaxVersions(other.getMaxVersions());
|
||||
}
|
||||
if (other.hasCompression()) {
|
||||
setCompression(other.getCompression());
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return mergeFrom(input,
|
||||
com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
||||
com.google.protobuf.UnknownFieldSet.newBuilder(
|
||||
this.getUnknownFields());
|
||||
while (true) {
|
||||
int tag = input.readTag();
|
||||
switch (tag) {
|
||||
case 0:
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
default: {
|
||||
if (!parseUnknownField(input, unknownFields,
|
||||
extensionRegistry, tag)) {
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 10: {
|
||||
setName(input.readString());
|
||||
break;
|
||||
}
|
||||
case 18: {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.newBuilder();
|
||||
input.readMessage(subBuilder, extensionRegistry);
|
||||
addAttrs(subBuilder.buildPartial());
|
||||
break;
|
||||
}
|
||||
case 24: {
|
||||
setTtl(input.readInt32());
|
||||
break;
|
||||
}
|
||||
case 32: {
|
||||
setMaxVersions(input.readInt32());
|
||||
break;
|
||||
}
|
||||
case 42: {
|
||||
setCompression(input.readString());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// optional string name = 1;
|
||||
public boolean hasName() {
|
||||
return result.hasName();
|
||||
}
|
||||
public java.lang.String getName() {
|
||||
return result.getName();
|
||||
}
|
||||
public Builder setName(java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasName = true;
|
||||
result.name_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearName() {
|
||||
result.hasName = false;
|
||||
result.name_ = "";
|
||||
return this;
|
||||
}
|
||||
|
||||
// repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema.Attribute attrs = 2;
|
||||
public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute> getAttrsList() {
|
||||
return java.util.Collections.unmodifiableList(result.attrs_);
|
||||
}
|
||||
public int getAttrsCount() {
|
||||
return result.getAttrsCount();
|
||||
}
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getAttrs(int index) {
|
||||
return result.getAttrs(index);
|
||||
}
|
||||
public Builder setAttrs(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.attrs_.set(index, value);
|
||||
return this;
|
||||
}
|
||||
public Builder setAttrs(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder builderForValue) {
|
||||
result.attrs_.set(index, builderForValue.build());
|
||||
return this;
|
||||
}
|
||||
public Builder addAttrs(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
if (result.attrs_.isEmpty()) {
|
||||
result.attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute>();
|
||||
}
|
||||
result.attrs_.add(value);
|
||||
return this;
|
||||
}
|
||||
public Builder addAttrs(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder builderForValue) {
|
||||
if (result.attrs_.isEmpty()) {
|
||||
result.attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute>();
|
||||
}
|
||||
result.attrs_.add(builderForValue.build());
|
||||
return this;
|
||||
}
|
||||
public Builder addAllAttrs(
|
||||
java.lang.Iterable<? extends org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute> values) {
|
||||
if (result.attrs_.isEmpty()) {
|
||||
result.attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute>();
|
||||
}
|
||||
super.addAll(values, result.attrs_);
|
||||
return this;
|
||||
}
|
||||
public Builder clearAttrs() {
|
||||
result.attrs_ = java.util.Collections.emptyList();
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional int32 ttl = 3;
|
||||
public boolean hasTtl() {
|
||||
return result.hasTtl();
|
||||
}
|
||||
public int getTtl() {
|
||||
return result.getTtl();
|
||||
}
|
||||
public Builder setTtl(int value) {
|
||||
result.hasTtl = true;
|
||||
result.ttl_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearTtl() {
|
||||
result.hasTtl = false;
|
||||
result.ttl_ = 0;
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional int32 maxVersions = 4;
|
||||
public boolean hasMaxVersions() {
|
||||
return result.hasMaxVersions();
|
||||
}
|
||||
public int getMaxVersions() {
|
||||
return result.getMaxVersions();
|
||||
}
|
||||
public Builder setMaxVersions(int value) {
|
||||
result.hasMaxVersions = true;
|
||||
result.maxVersions_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearMaxVersions() {
|
||||
result.hasMaxVersions = false;
|
||||
result.maxVersions_ = 0;
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional string compression = 5;
|
||||
public boolean hasCompression() {
|
||||
return result.hasCompression();
|
||||
}
|
||||
public java.lang.String getCompression() {
|
||||
return result.getCompression();
|
||||
}
|
||||
public Builder setCompression(java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasCompression = true;
|
||||
result.compression_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearCompression() {
|
||||
result.hasCompression = false;
|
||||
result.compression_ = "";
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
static {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.getDescriptor();
|
||||
}
|
||||
}
|
||||
|
||||
private static com.google.protobuf.Descriptors.Descriptor
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_descriptor;
|
||||
private static
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_fieldAccessorTable;
|
||||
private static com.google.protobuf.Descriptors.Descriptor
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_descriptor;
|
||||
private static
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable;
|
||||
|
||||
public static com.google.protobuf.Descriptors.FileDescriptor
|
||||
getDescriptor() {
|
||||
return descriptor;
|
||||
}
|
||||
private static com.google.protobuf.Descriptors.FileDescriptor
|
||||
descriptor;
|
||||
static {
|
||||
java.lang.String descriptorData =
|
||||
"\n\031ColumnSchemaMessage.proto\0223org.apache." +
|
||||
"hadoop.hbase.stargate.protobuf.generated" +
|
||||
"\"\331\001\n\014ColumnSchema\022\014\n\004name\030\001 \001(\t\022Z\n\005attrs" +
|
||||
"\030\002 \003(\0132K.org.apache.hadoop.hbase.stargat" +
|
||||
"e.protobuf.generated.ColumnSchema.Attrib" +
|
||||
"ute\022\013\n\003ttl\030\003 \001(\005\022\023\n\013maxVersions\030\004 \001(\005\022\023\n" +
|
||||
"\013compression\030\005 \001(\t\032(\n\tAttribute\022\014\n\004name\030" +
|
||||
"\001 \002(\t\022\r\n\005value\030\002 \002(\t";
|
||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||
public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
||||
com.google.protobuf.Descriptors.FileDescriptor root) {
|
||||
descriptor = root;
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_descriptor =
|
||||
getDescriptor().getMessageTypes().get(0);
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_descriptor,
|
||||
new java.lang.String[] { "Name", "Attrs", "Ttl", "MaxVersions", "Compression", },
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.class,
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder.class);
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_descriptor =
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_descriptor.getNestedTypes().get(0);
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_descriptor,
|
||||
new java.lang.String[] { "Name", "Value", },
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.class,
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder.class);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
com.google.protobuf.Descriptors.FileDescriptor
|
||||
.internalBuildGeneratedFileFrom(descriptorData,
|
||||
new com.google.protobuf.Descriptors.FileDescriptor[] {
|
||||
}, assigner);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,558 @@
|
|||
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
public final class ScannerMessage {
|
||||
private ScannerMessage() {}
|
||||
public static void registerAllExtensions(
|
||||
com.google.protobuf.ExtensionRegistry registry) {
|
||||
}
|
||||
public static final class Scanner extends
|
||||
com.google.protobuf.GeneratedMessage {
|
||||
// Use Scanner.newBuilder() to construct.
|
||||
private Scanner() {}
|
||||
|
||||
private static final Scanner defaultInstance = new Scanner();
|
||||
public static Scanner getDefaultInstance() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public Scanner getDefaultInstanceForType() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public static final com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptor() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_descriptor;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internalGetFieldAccessorTable() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_fieldAccessorTable;
|
||||
}
|
||||
|
||||
// optional bytes startRow = 1;
|
||||
public static final int STARTROW_FIELD_NUMBER = 1;
|
||||
private boolean hasStartRow;
|
||||
private com.google.protobuf.ByteString startRow_ = com.google.protobuf.ByteString.EMPTY;
|
||||
public boolean hasStartRow() { return hasStartRow; }
|
||||
public com.google.protobuf.ByteString getStartRow() { return startRow_; }
|
||||
|
||||
// optional bytes endRow = 2;
|
||||
public static final int ENDROW_FIELD_NUMBER = 2;
|
||||
private boolean hasEndRow;
|
||||
private com.google.protobuf.ByteString endRow_ = com.google.protobuf.ByteString.EMPTY;
|
||||
public boolean hasEndRow() { return hasEndRow; }
|
||||
public com.google.protobuf.ByteString getEndRow() { return endRow_; }
|
||||
|
||||
// repeated bytes columns = 3;
|
||||
public static final int COLUMNS_FIELD_NUMBER = 3;
|
||||
private java.util.List<com.google.protobuf.ByteString> columns_ =
|
||||
java.util.Collections.emptyList();
|
||||
public java.util.List<com.google.protobuf.ByteString> getColumnsList() {
|
||||
return columns_;
|
||||
}
|
||||
public int getColumnsCount() { return columns_.size(); }
|
||||
public com.google.protobuf.ByteString getColumns(int index) {
|
||||
return columns_.get(index);
|
||||
}
|
||||
|
||||
// optional int32 batch = 4;
|
||||
public static final int BATCH_FIELD_NUMBER = 4;
|
||||
private boolean hasBatch;
|
||||
private int batch_ = 0;
|
||||
public boolean hasBatch() { return hasBatch; }
|
||||
public int getBatch() { return batch_; }
|
||||
|
||||
// optional int64 startTime = 5;
|
||||
public static final int STARTTIME_FIELD_NUMBER = 5;
|
||||
private boolean hasStartTime;
|
||||
private long startTime_ = 0L;
|
||||
public boolean hasStartTime() { return hasStartTime; }
|
||||
public long getStartTime() { return startTime_; }
|
||||
|
||||
// optional int64 endTime = 6;
|
||||
public static final int ENDTIME_FIELD_NUMBER = 6;
|
||||
private boolean hasEndTime;
|
||||
private long endTime_ = 0L;
|
||||
public boolean hasEndTime() { return hasEndTime; }
|
||||
public long getEndTime() { return endTime_; }
|
||||
|
||||
@Override
|
||||
public final boolean isInitialized() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(com.google.protobuf.CodedOutputStream output)
|
||||
throws java.io.IOException {
|
||||
if (hasStartRow()) {
|
||||
output.writeBytes(1, getStartRow());
|
||||
}
|
||||
if (hasEndRow()) {
|
||||
output.writeBytes(2, getEndRow());
|
||||
}
|
||||
for (com.google.protobuf.ByteString element : getColumnsList()) {
|
||||
output.writeBytes(3, element);
|
||||
}
|
||||
if (hasBatch()) {
|
||||
output.writeInt32(4, getBatch());
|
||||
}
|
||||
if (hasStartTime()) {
|
||||
output.writeInt64(5, getStartTime());
|
||||
}
|
||||
if (hasEndTime()) {
|
||||
output.writeInt64(6, getEndTime());
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
private int memoizedSerializedSize = -1;
|
||||
@Override
|
||||
public int getSerializedSize() {
|
||||
int size = memoizedSerializedSize;
|
||||
if (size != -1) return size;
|
||||
|
||||
size = 0;
|
||||
if (hasStartRow()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBytesSize(1, getStartRow());
|
||||
}
|
||||
if (hasEndRow()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBytesSize(2, getEndRow());
|
||||
}
|
||||
{
|
||||
int dataSize = 0;
|
||||
for (com.google.protobuf.ByteString element : getColumnsList()) {
|
||||
dataSize += com.google.protobuf.CodedOutputStream
|
||||
.computeBytesSizeNoTag(element);
|
||||
}
|
||||
size += dataSize;
|
||||
size += 1 * getColumnsList().size();
|
||||
}
|
||||
if (hasBatch()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeInt32Size(4, getBatch());
|
||||
}
|
||||
if (hasStartTime()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeInt64Size(5, getStartTime());
|
||||
}
|
||||
if (hasEndTime()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeInt64Size(6, getEndTime());
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
}
|
||||
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
|
||||
com.google.protobuf.ByteString data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
|
||||
com.google.protobuf.ByteString data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(byte[] data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
|
||||
byte[] data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseDelimitedFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseDelimitedFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
|
||||
public static Builder newBuilder() { return new Builder(); }
|
||||
public Builder newBuilderForType() { return new Builder(); }
|
||||
public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner prototype) {
|
||||
return new Builder().mergeFrom(prototype);
|
||||
}
|
||||
public Builder toBuilder() { return newBuilder(this); }
|
||||
|
||||
public static final class Builder extends
|
||||
com.google.protobuf.GeneratedMessage.Builder<Builder> {
|
||||
// Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.newBuilder()
|
||||
private Builder() {}
|
||||
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner();
|
||||
|
||||
@Override
|
||||
protected org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner internalGetResult() {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clear() {
|
||||
result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clone() {
|
||||
return new Builder().mergeFrom(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptorForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.getDescriptor();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner getDefaultInstanceForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.getDefaultInstance();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner build() {
|
||||
if (result != null && !isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result);
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
private org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner buildParsed()
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
if (!isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result).asInvalidProtocolBufferException();
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner buildPartial() {
|
||||
if (result == null) {
|
||||
throw new IllegalStateException(
|
||||
"build() has already been called on this Builder."); }
|
||||
if (result.columns_ != java.util.Collections.EMPTY_LIST) {
|
||||
result.columns_ =
|
||||
java.util.Collections.unmodifiableList(result.columns_);
|
||||
}
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner returnMe = result;
|
||||
result = null;
|
||||
return returnMe;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(com.google.protobuf.Message other) {
|
||||
if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner) {
|
||||
return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner)other);
|
||||
} else {
|
||||
super.mergeFrom(other);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner other) {
|
||||
if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.getDefaultInstance()) return this;
|
||||
if (other.hasStartRow()) {
|
||||
setStartRow(other.getStartRow());
|
||||
}
|
||||
if (other.hasEndRow()) {
|
||||
setEndRow(other.getEndRow());
|
||||
}
|
||||
if (!other.columns_.isEmpty()) {
|
||||
if (result.columns_.isEmpty()) {
|
||||
result.columns_ = new java.util.ArrayList<com.google.protobuf.ByteString>();
|
||||
}
|
||||
result.columns_.addAll(other.columns_);
|
||||
}
|
||||
if (other.hasBatch()) {
|
||||
setBatch(other.getBatch());
|
||||
}
|
||||
if (other.hasStartTime()) {
|
||||
setStartTime(other.getStartTime());
|
||||
}
|
||||
if (other.hasEndTime()) {
|
||||
setEndTime(other.getEndTime());
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return mergeFrom(input,
|
||||
com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
||||
com.google.protobuf.UnknownFieldSet.newBuilder(
|
||||
this.getUnknownFields());
|
||||
while (true) {
|
||||
int tag = input.readTag();
|
||||
switch (tag) {
|
||||
case 0:
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
default: {
|
||||
if (!parseUnknownField(input, unknownFields,
|
||||
extensionRegistry, tag)) {
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 10: {
|
||||
setStartRow(input.readBytes());
|
||||
break;
|
||||
}
|
||||
case 18: {
|
||||
setEndRow(input.readBytes());
|
||||
break;
|
||||
}
|
||||
case 26: {
|
||||
addColumns(input.readBytes());
|
||||
break;
|
||||
}
|
||||
case 32: {
|
||||
setBatch(input.readInt32());
|
||||
break;
|
||||
}
|
||||
case 40: {
|
||||
setStartTime(input.readInt64());
|
||||
break;
|
||||
}
|
||||
case 48: {
|
||||
setEndTime(input.readInt64());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// optional bytes startRow = 1;
|
||||
public boolean hasStartRow() {
|
||||
return result.hasStartRow();
|
||||
}
|
||||
public com.google.protobuf.ByteString getStartRow() {
|
||||
return result.getStartRow();
|
||||
}
|
||||
public Builder setStartRow(com.google.protobuf.ByteString value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasStartRow = true;
|
||||
result.startRow_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearStartRow() {
|
||||
result.hasStartRow = false;
|
||||
result.startRow_ = com.google.protobuf.ByteString.EMPTY;
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional bytes endRow = 2;
|
||||
public boolean hasEndRow() {
|
||||
return result.hasEndRow();
|
||||
}
|
||||
public com.google.protobuf.ByteString getEndRow() {
|
||||
return result.getEndRow();
|
||||
}
|
||||
public Builder setEndRow(com.google.protobuf.ByteString value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasEndRow = true;
|
||||
result.endRow_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearEndRow() {
|
||||
result.hasEndRow = false;
|
||||
result.endRow_ = com.google.protobuf.ByteString.EMPTY;
|
||||
return this;
|
||||
}
|
||||
|
||||
// repeated bytes columns = 3;
|
||||
public java.util.List<com.google.protobuf.ByteString> getColumnsList() {
|
||||
return java.util.Collections.unmodifiableList(result.columns_);
|
||||
}
|
||||
public int getColumnsCount() {
|
||||
return result.getColumnsCount();
|
||||
}
|
||||
public com.google.protobuf.ByteString getColumns(int index) {
|
||||
return result.getColumns(index);
|
||||
}
|
||||
public Builder setColumns(int index, com.google.protobuf.ByteString value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.columns_.set(index, value);
|
||||
return this;
|
||||
}
|
||||
public Builder addColumns(com.google.protobuf.ByteString value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
if (result.columns_.isEmpty()) {
|
||||
result.columns_ = new java.util.ArrayList<com.google.protobuf.ByteString>();
|
||||
}
|
||||
result.columns_.add(value);
|
||||
return this;
|
||||
}
|
||||
public Builder addAllColumns(
|
||||
java.lang.Iterable<? extends com.google.protobuf.ByteString> values) {
|
||||
if (result.columns_.isEmpty()) {
|
||||
result.columns_ = new java.util.ArrayList<com.google.protobuf.ByteString>();
|
||||
}
|
||||
super.addAll(values, result.columns_);
|
||||
return this;
|
||||
}
|
||||
public Builder clearColumns() {
|
||||
result.columns_ = java.util.Collections.emptyList();
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional int32 batch = 4;
|
||||
public boolean hasBatch() {
|
||||
return result.hasBatch();
|
||||
}
|
||||
public int getBatch() {
|
||||
return result.getBatch();
|
||||
}
|
||||
public Builder setBatch(int value) {
|
||||
result.hasBatch = true;
|
||||
result.batch_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearBatch() {
|
||||
result.hasBatch = false;
|
||||
result.batch_ = 0;
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional int64 startTime = 5;
|
||||
public boolean hasStartTime() {
|
||||
return result.hasStartTime();
|
||||
}
|
||||
public long getStartTime() {
|
||||
return result.getStartTime();
|
||||
}
|
||||
public Builder setStartTime(long value) {
|
||||
result.hasStartTime = true;
|
||||
result.startTime_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearStartTime() {
|
||||
result.hasStartTime = false;
|
||||
result.startTime_ = 0L;
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional int64 endTime = 6;
|
||||
public boolean hasEndTime() {
|
||||
return result.hasEndTime();
|
||||
}
|
||||
public long getEndTime() {
|
||||
return result.getEndTime();
|
||||
}
|
||||
public Builder setEndTime(long value) {
|
||||
result.hasEndTime = true;
|
||||
result.endTime_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearEndTime() {
|
||||
result.hasEndTime = false;
|
||||
result.endTime_ = 0L;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
static {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.getDescriptor();
|
||||
}
|
||||
}
|
||||
|
||||
private static com.google.protobuf.Descriptors.Descriptor
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_descriptor;
|
||||
private static
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_fieldAccessorTable;
|
||||
|
||||
public static com.google.protobuf.Descriptors.FileDescriptor
|
||||
getDescriptor() {
|
||||
return descriptor;
|
||||
}
|
||||
private static com.google.protobuf.Descriptors.FileDescriptor
|
||||
descriptor;
|
||||
static {
|
||||
java.lang.String descriptorData =
|
||||
"\n\024ScannerMessage.proto\0223org.apache.hadoo" +
|
||||
"p.hbase.stargate.protobuf.generated\"o\n\007S" +
|
||||
"canner\022\020\n\010startRow\030\001 \001(\014\022\016\n\006endRow\030\002 \001(\014" +
|
||||
"\022\017\n\007columns\030\003 \003(\014\022\r\n\005batch\030\004 \001(\005\022\021\n\tstar" +
|
||||
"tTime\030\005 \001(\003\022\017\n\007endTime\030\006 \001(\003";
|
||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||
public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
||||
com.google.protobuf.Descriptors.FileDescriptor root) {
|
||||
descriptor = root;
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_descriptor =
|
||||
getDescriptor().getMessageTypes().get(0);
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_descriptor,
|
||||
new java.lang.String[] { "StartRow", "EndRow", "Columns", "Batch", "StartTime", "EndTime", },
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.class,
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.Builder.class);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
com.google.protobuf.Descriptors.FileDescriptor
|
||||
.internalBuildGeneratedFileFrom(descriptorData,
|
||||
new com.google.protobuf.Descriptors.FileDescriptor[] {
|
||||
}, assigner);
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,864 @@
|
|||
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
public final class TableInfoMessage {
|
||||
private TableInfoMessage() {}
|
||||
public static void registerAllExtensions(
|
||||
com.google.protobuf.ExtensionRegistry registry) {
|
||||
}
|
||||
public static final class TableInfo extends
|
||||
com.google.protobuf.GeneratedMessage {
|
||||
// Use TableInfo.newBuilder() to construct.
|
||||
private TableInfo() {}
|
||||
|
||||
private static final TableInfo defaultInstance = new TableInfo();
|
||||
public static TableInfo getDefaultInstance() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public TableInfo getDefaultInstanceForType() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public static final com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptor() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_descriptor;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internalGetFieldAccessorTable() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_fieldAccessorTable;
|
||||
}
|
||||
|
||||
public static final class Region extends
|
||||
com.google.protobuf.GeneratedMessage {
|
||||
// Use Region.newBuilder() to construct.
|
||||
private Region() {}
|
||||
|
||||
private static final Region defaultInstance = new Region();
|
||||
public static Region getDefaultInstance() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public Region getDefaultInstanceForType() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public static final com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptor() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_descriptor;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internalGetFieldAccessorTable() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_fieldAccessorTable;
|
||||
}
|
||||
|
||||
// required string name = 1;
|
||||
public static final int NAME_FIELD_NUMBER = 1;
|
||||
private boolean hasName;
|
||||
private java.lang.String name_ = "";
|
||||
public boolean hasName() { return hasName; }
|
||||
public java.lang.String getName() { return name_; }
|
||||
|
||||
// optional bytes startKey = 2;
|
||||
public static final int STARTKEY_FIELD_NUMBER = 2;
|
||||
private boolean hasStartKey;
|
||||
private com.google.protobuf.ByteString startKey_ = com.google.protobuf.ByteString.EMPTY;
|
||||
public boolean hasStartKey() { return hasStartKey; }
|
||||
public com.google.protobuf.ByteString getStartKey() { return startKey_; }
|
||||
|
||||
// optional bytes endKey = 3;
|
||||
public static final int ENDKEY_FIELD_NUMBER = 3;
|
||||
private boolean hasEndKey;
|
||||
private com.google.protobuf.ByteString endKey_ = com.google.protobuf.ByteString.EMPTY;
|
||||
public boolean hasEndKey() { return hasEndKey; }
|
||||
public com.google.protobuf.ByteString getEndKey() { return endKey_; }
|
||||
|
||||
// optional int64 id = 4;
|
||||
public static final int ID_FIELD_NUMBER = 4;
|
||||
private boolean hasId;
|
||||
private long id_ = 0L;
|
||||
public boolean hasId() { return hasId; }
|
||||
public long getId() { return id_; }
|
||||
|
||||
// optional string location = 5;
|
||||
public static final int LOCATION_FIELD_NUMBER = 5;
|
||||
private boolean hasLocation;
|
||||
private java.lang.String location_ = "";
|
||||
public boolean hasLocation() { return hasLocation; }
|
||||
public java.lang.String getLocation() { return location_; }
|
||||
|
||||
@Override
|
||||
public final boolean isInitialized() {
|
||||
if (!hasName) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(com.google.protobuf.CodedOutputStream output)
|
||||
throws java.io.IOException {
|
||||
if (hasName()) {
|
||||
output.writeString(1, getName());
|
||||
}
|
||||
if (hasStartKey()) {
|
||||
output.writeBytes(2, getStartKey());
|
||||
}
|
||||
if (hasEndKey()) {
|
||||
output.writeBytes(3, getEndKey());
|
||||
}
|
||||
if (hasId()) {
|
||||
output.writeInt64(4, getId());
|
||||
}
|
||||
if (hasLocation()) {
|
||||
output.writeString(5, getLocation());
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
private int memoizedSerializedSize = -1;
|
||||
@Override
|
||||
public int getSerializedSize() {
|
||||
int size = memoizedSerializedSize;
|
||||
if (size != -1) return size;
|
||||
|
||||
size = 0;
|
||||
if (hasName()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeStringSize(1, getName());
|
||||
}
|
||||
if (hasStartKey()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBytesSize(2, getStartKey());
|
||||
}
|
||||
if (hasEndKey()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBytesSize(3, getEndKey());
|
||||
}
|
||||
if (hasId()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeInt64Size(4, getId());
|
||||
}
|
||||
if (hasLocation()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeStringSize(5, getLocation());
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
}
|
||||
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
|
||||
com.google.protobuf.ByteString data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
|
||||
com.google.protobuf.ByteString data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(byte[] data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
|
||||
byte[] data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseDelimitedFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseDelimitedFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
|
||||
public static Builder newBuilder() { return new Builder(); }
|
||||
public Builder newBuilderForType() { return new Builder(); }
|
||||
public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region prototype) {
|
||||
return new Builder().mergeFrom(prototype);
|
||||
}
|
||||
public Builder toBuilder() { return newBuilder(this); }
|
||||
|
||||
public static final class Builder extends
|
||||
com.google.protobuf.GeneratedMessage.Builder<Builder> {
|
||||
// Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.newBuilder()
|
||||
private Builder() {}
|
||||
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region();
|
||||
|
||||
@Override
|
||||
protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region internalGetResult() {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clear() {
|
||||
result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clone() {
|
||||
return new Builder().mergeFrom(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptorForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.getDescriptor();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region getDefaultInstanceForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.getDefaultInstance();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region build() {
|
||||
if (result != null && !isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result);
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
private org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region buildParsed()
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
if (!isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result).asInvalidProtocolBufferException();
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region buildPartial() {
|
||||
if (result == null) {
|
||||
throw new IllegalStateException(
|
||||
"build() has already been called on this Builder."); }
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region returnMe = result;
|
||||
result = null;
|
||||
return returnMe;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(com.google.protobuf.Message other) {
|
||||
if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region) {
|
||||
return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region)other);
|
||||
} else {
|
||||
super.mergeFrom(other);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region other) {
|
||||
if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.getDefaultInstance()) return this;
|
||||
if (other.hasName()) {
|
||||
setName(other.getName());
|
||||
}
|
||||
if (other.hasStartKey()) {
|
||||
setStartKey(other.getStartKey());
|
||||
}
|
||||
if (other.hasEndKey()) {
|
||||
setEndKey(other.getEndKey());
|
||||
}
|
||||
if (other.hasId()) {
|
||||
setId(other.getId());
|
||||
}
|
||||
if (other.hasLocation()) {
|
||||
setLocation(other.getLocation());
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return mergeFrom(input,
|
||||
com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
||||
com.google.protobuf.UnknownFieldSet.newBuilder(
|
||||
this.getUnknownFields());
|
||||
while (true) {
|
||||
int tag = input.readTag();
|
||||
switch (tag) {
|
||||
case 0:
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
default: {
|
||||
if (!parseUnknownField(input, unknownFields,
|
||||
extensionRegistry, tag)) {
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 10: {
|
||||
setName(input.readString());
|
||||
break;
|
||||
}
|
||||
case 18: {
|
||||
setStartKey(input.readBytes());
|
||||
break;
|
||||
}
|
||||
case 26: {
|
||||
setEndKey(input.readBytes());
|
||||
break;
|
||||
}
|
||||
case 32: {
|
||||
setId(input.readInt64());
|
||||
break;
|
||||
}
|
||||
case 42: {
|
||||
setLocation(input.readString());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// required string name = 1;
|
||||
public boolean hasName() {
|
||||
return result.hasName();
|
||||
}
|
||||
public java.lang.String getName() {
|
||||
return result.getName();
|
||||
}
|
||||
public Builder setName(java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasName = true;
|
||||
result.name_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearName() {
|
||||
result.hasName = false;
|
||||
result.name_ = "";
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional bytes startKey = 2;
|
||||
public boolean hasStartKey() {
|
||||
return result.hasStartKey();
|
||||
}
|
||||
public com.google.protobuf.ByteString getStartKey() {
|
||||
return result.getStartKey();
|
||||
}
|
||||
public Builder setStartKey(com.google.protobuf.ByteString value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasStartKey = true;
|
||||
result.startKey_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearStartKey() {
|
||||
result.hasStartKey = false;
|
||||
result.startKey_ = com.google.protobuf.ByteString.EMPTY;
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional bytes endKey = 3;
|
||||
public boolean hasEndKey() {
|
||||
return result.hasEndKey();
|
||||
}
|
||||
public com.google.protobuf.ByteString getEndKey() {
|
||||
return result.getEndKey();
|
||||
}
|
||||
public Builder setEndKey(com.google.protobuf.ByteString value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasEndKey = true;
|
||||
result.endKey_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearEndKey() {
|
||||
result.hasEndKey = false;
|
||||
result.endKey_ = com.google.protobuf.ByteString.EMPTY;
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional int64 id = 4;
|
||||
public boolean hasId() {
|
||||
return result.hasId();
|
||||
}
|
||||
public long getId() {
|
||||
return result.getId();
|
||||
}
|
||||
public Builder setId(long value) {
|
||||
result.hasId = true;
|
||||
result.id_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearId() {
|
||||
result.hasId = false;
|
||||
result.id_ = 0L;
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional string location = 5;
|
||||
public boolean hasLocation() {
|
||||
return result.hasLocation();
|
||||
}
|
||||
public java.lang.String getLocation() {
|
||||
return result.getLocation();
|
||||
}
|
||||
public Builder setLocation(java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasLocation = true;
|
||||
result.location_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearLocation() {
|
||||
result.hasLocation = false;
|
||||
result.location_ = "";
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
static {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.getDescriptor();
|
||||
}
|
||||
}
|
||||
|
||||
// required string name = 1;
|
||||
public static final int NAME_FIELD_NUMBER = 1;
|
||||
private boolean hasName;
|
||||
private java.lang.String name_ = "";
|
||||
public boolean hasName() { return hasName; }
|
||||
public java.lang.String getName() { return name_; }
|
||||
|
||||
// repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfo.Region regions = 2;
|
||||
public static final int REGIONS_FIELD_NUMBER = 2;
|
||||
private java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region> regions_ =
|
||||
java.util.Collections.emptyList();
|
||||
public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region> getRegionsList() {
|
||||
return regions_;
|
||||
}
|
||||
public int getRegionsCount() { return regions_.size(); }
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region getRegions(int index) {
|
||||
return regions_.get(index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isInitialized() {
|
||||
if (!hasName) return false;
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region element : getRegionsList()) {
|
||||
if (!element.isInitialized()) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(com.google.protobuf.CodedOutputStream output)
|
||||
throws java.io.IOException {
|
||||
if (hasName()) {
|
||||
output.writeString(1, getName());
|
||||
}
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region element : getRegionsList()) {
|
||||
output.writeMessage(2, element);
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
private int memoizedSerializedSize = -1;
|
||||
@Override
|
||||
public int getSerializedSize() {
|
||||
int size = memoizedSerializedSize;
|
||||
if (size != -1) return size;
|
||||
|
||||
size = 0;
|
||||
if (hasName()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeStringSize(1, getName());
|
||||
}
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region element : getRegionsList()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeMessageSize(2, element);
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
}
|
||||
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
|
||||
com.google.protobuf.ByteString data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
|
||||
com.google.protobuf.ByteString data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(byte[] data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
|
||||
byte[] data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseDelimitedFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseDelimitedFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
|
||||
public static Builder newBuilder() { return new Builder(); }
|
||||
public Builder newBuilderForType() { return new Builder(); }
|
||||
public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo prototype) {
|
||||
return new Builder().mergeFrom(prototype);
|
||||
}
|
||||
public Builder toBuilder() { return newBuilder(this); }
|
||||
|
||||
public static final class Builder extends
|
||||
com.google.protobuf.GeneratedMessage.Builder<Builder> {
|
||||
// Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.newBuilder()
|
||||
private Builder() {}
|
||||
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo();
|
||||
|
||||
@Override
|
||||
protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo internalGetResult() {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clear() {
|
||||
result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clone() {
|
||||
return new Builder().mergeFrom(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptorForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.getDescriptor();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo getDefaultInstanceForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.getDefaultInstance();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo build() {
|
||||
if (result != null && !isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result);
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
private org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo buildParsed()
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
if (!isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result).asInvalidProtocolBufferException();
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo buildPartial() {
|
||||
if (result == null) {
|
||||
throw new IllegalStateException(
|
||||
"build() has already been called on this Builder."); }
|
||||
if (result.regions_ != java.util.Collections.EMPTY_LIST) {
|
||||
result.regions_ =
|
||||
java.util.Collections.unmodifiableList(result.regions_);
|
||||
}
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo returnMe = result;
|
||||
result = null;
|
||||
return returnMe;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(com.google.protobuf.Message other) {
|
||||
if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo) {
|
||||
return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo)other);
|
||||
} else {
|
||||
super.mergeFrom(other);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo other) {
|
||||
if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.getDefaultInstance()) return this;
|
||||
if (other.hasName()) {
|
||||
setName(other.getName());
|
||||
}
|
||||
if (!other.regions_.isEmpty()) {
|
||||
if (result.regions_.isEmpty()) {
|
||||
result.regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region>();
|
||||
}
|
||||
result.regions_.addAll(other.regions_);
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return mergeFrom(input,
|
||||
com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
||||
com.google.protobuf.UnknownFieldSet.newBuilder(
|
||||
this.getUnknownFields());
|
||||
while (true) {
|
||||
int tag = input.readTag();
|
||||
switch (tag) {
|
||||
case 0:
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
default: {
|
||||
if (!parseUnknownField(input, unknownFields,
|
||||
extensionRegistry, tag)) {
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 10: {
|
||||
setName(input.readString());
|
||||
break;
|
||||
}
|
||||
case 18: {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.newBuilder();
|
||||
input.readMessage(subBuilder, extensionRegistry);
|
||||
addRegions(subBuilder.buildPartial());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// required string name = 1;
|
||||
public boolean hasName() {
|
||||
return result.hasName();
|
||||
}
|
||||
public java.lang.String getName() {
|
||||
return result.getName();
|
||||
}
|
||||
public Builder setName(java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasName = true;
|
||||
result.name_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearName() {
|
||||
result.hasName = false;
|
||||
result.name_ = "";
|
||||
return this;
|
||||
}
|
||||
|
||||
// repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfo.Region regions = 2;
|
||||
public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region> getRegionsList() {
|
||||
return java.util.Collections.unmodifiableList(result.regions_);
|
||||
}
|
||||
public int getRegionsCount() {
|
||||
return result.getRegionsCount();
|
||||
}
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region getRegions(int index) {
|
||||
return result.getRegions(index);
|
||||
}
|
||||
public Builder setRegions(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.regions_.set(index, value);
|
||||
return this;
|
||||
}
|
||||
public Builder setRegions(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder builderForValue) {
|
||||
result.regions_.set(index, builderForValue.build());
|
||||
return this;
|
||||
}
|
||||
public Builder addRegions(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
if (result.regions_.isEmpty()) {
|
||||
result.regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region>();
|
||||
}
|
||||
result.regions_.add(value);
|
||||
return this;
|
||||
}
|
||||
public Builder addRegions(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder builderForValue) {
|
||||
if (result.regions_.isEmpty()) {
|
||||
result.regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region>();
|
||||
}
|
||||
result.regions_.add(builderForValue.build());
|
||||
return this;
|
||||
}
|
||||
public Builder addAllRegions(
|
||||
java.lang.Iterable<? extends org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region> values) {
|
||||
if (result.regions_.isEmpty()) {
|
||||
result.regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region>();
|
||||
}
|
||||
super.addAll(values, result.regions_);
|
||||
return this;
|
||||
}
|
||||
public Builder clearRegions() {
|
||||
result.regions_ = java.util.Collections.emptyList();
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
static {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.getDescriptor();
|
||||
}
|
||||
}
|
||||
|
||||
private static com.google.protobuf.Descriptors.Descriptor
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_descriptor;
|
||||
private static
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_fieldAccessorTable;
|
||||
private static com.google.protobuf.Descriptors.Descriptor
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_descriptor;
|
||||
private static
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_fieldAccessorTable;
|
||||
|
||||
public static com.google.protobuf.Descriptors.FileDescriptor
|
||||
getDescriptor() {
|
||||
return descriptor;
|
||||
}
|
||||
private static com.google.protobuf.Descriptors.FileDescriptor
|
||||
descriptor;
|
||||
static {
|
||||
java.lang.String descriptorData =
|
||||
"\n\026TableInfoMessage.proto\0223org.apache.had" +
|
||||
"oop.hbase.stargate.protobuf.generated\"\311\001" +
|
||||
"\n\tTableInfo\022\014\n\004name\030\001 \002(\t\022V\n\007regions\030\002 \003" +
|
||||
"(\0132E.org.apache.hadoop.hbase.stargate.pr" +
|
||||
"otobuf.generated.TableInfo.Region\032V\n\006Reg" +
|
||||
"ion\022\014\n\004name\030\001 \002(\t\022\020\n\010startKey\030\002 \001(\014\022\016\n\006e" +
|
||||
"ndKey\030\003 \001(\014\022\n\n\002id\030\004 \001(\003\022\020\n\010location\030\005 \001(" +
|
||||
"\t";
|
||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||
public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
||||
com.google.protobuf.Descriptors.FileDescriptor root) {
|
||||
descriptor = root;
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_descriptor =
|
||||
getDescriptor().getMessageTypes().get(0);
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_descriptor,
|
||||
new java.lang.String[] { "Name", "Regions", },
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.class,
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Builder.class);
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_descriptor =
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_descriptor.getNestedTypes().get(0);
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_descriptor,
|
||||
new java.lang.String[] { "Name", "StartKey", "EndKey", "Id", "Location", },
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.class,
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder.class);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
com.google.protobuf.Descriptors.FileDescriptor
|
||||
.internalBuildGeneratedFileFrom(descriptorData,
|
||||
new com.google.protobuf.Descriptors.FileDescriptor[] {
|
||||
}, assigner);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,355 @@
|
|||
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
public final class TableListMessage {
|
||||
private TableListMessage() {}
|
||||
public static void registerAllExtensions(
|
||||
com.google.protobuf.ExtensionRegistry registry) {
|
||||
}
|
||||
public static final class TableList extends
|
||||
com.google.protobuf.GeneratedMessage {
|
||||
// Use TableList.newBuilder() to construct.
|
||||
private TableList() {}
|
||||
|
||||
private static final TableList defaultInstance = new TableList();
|
||||
public static TableList getDefaultInstance() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public TableList getDefaultInstanceForType() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public static final com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptor() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_descriptor;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internalGetFieldAccessorTable() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_fieldAccessorTable;
|
||||
}
|
||||
|
||||
// repeated string name = 1;
|
||||
public static final int NAME_FIELD_NUMBER = 1;
|
||||
private java.util.List<java.lang.String> name_ =
|
||||
java.util.Collections.emptyList();
|
||||
public java.util.List<java.lang.String> getNameList() {
|
||||
return name_;
|
||||
}
|
||||
public int getNameCount() { return name_.size(); }
|
||||
public java.lang.String getName(int index) {
|
||||
return name_.get(index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isInitialized() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(com.google.protobuf.CodedOutputStream output)
|
||||
throws java.io.IOException {
|
||||
for (java.lang.String element : getNameList()) {
|
||||
output.writeString(1, element);
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
private int memoizedSerializedSize = -1;
|
||||
@Override
|
||||
public int getSerializedSize() {
|
||||
int size = memoizedSerializedSize;
|
||||
if (size != -1) return size;
|
||||
|
||||
size = 0;
|
||||
{
|
||||
int dataSize = 0;
|
||||
for (java.lang.String element : getNameList()) {
|
||||
dataSize += com.google.protobuf.CodedOutputStream
|
||||
.computeStringSizeNoTag(element);
|
||||
}
|
||||
size += dataSize;
|
||||
size += 1 * getNameList().size();
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
}
|
||||
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
|
||||
com.google.protobuf.ByteString data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
|
||||
com.google.protobuf.ByteString data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(byte[] data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
|
||||
byte[] data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseDelimitedFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseDelimitedFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
|
||||
public static Builder newBuilder() { return new Builder(); }
|
||||
public Builder newBuilderForType() { return new Builder(); }
|
||||
public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList prototype) {
|
||||
return new Builder().mergeFrom(prototype);
|
||||
}
|
||||
public Builder toBuilder() { return newBuilder(this); }
|
||||
|
||||
public static final class Builder extends
|
||||
com.google.protobuf.GeneratedMessage.Builder<Builder> {
|
||||
// Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.newBuilder()
|
||||
private Builder() {}
|
||||
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList();
|
||||
|
||||
@Override
|
||||
protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList internalGetResult() {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clear() {
|
||||
result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clone() {
|
||||
return new Builder().mergeFrom(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptorForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.getDescriptor();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList getDefaultInstanceForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.getDefaultInstance();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList build() {
|
||||
if (result != null && !isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result);
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
private org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList buildParsed()
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
if (!isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result).asInvalidProtocolBufferException();
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList buildPartial() {
|
||||
if (result == null) {
|
||||
throw new IllegalStateException(
|
||||
"build() has already been called on this Builder."); }
|
||||
if (result.name_ != java.util.Collections.EMPTY_LIST) {
|
||||
result.name_ =
|
||||
java.util.Collections.unmodifiableList(result.name_);
|
||||
}
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList returnMe = result;
|
||||
result = null;
|
||||
return returnMe;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(com.google.protobuf.Message other) {
|
||||
if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList) {
|
||||
return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList)other);
|
||||
} else {
|
||||
super.mergeFrom(other);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList other) {
|
||||
if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.getDefaultInstance()) return this;
|
||||
if (!other.name_.isEmpty()) {
|
||||
if (result.name_.isEmpty()) {
|
||||
result.name_ = new java.util.ArrayList<java.lang.String>();
|
||||
}
|
||||
result.name_.addAll(other.name_);
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return mergeFrom(input,
|
||||
com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
||||
com.google.protobuf.UnknownFieldSet.newBuilder(
|
||||
this.getUnknownFields());
|
||||
while (true) {
|
||||
int tag = input.readTag();
|
||||
switch (tag) {
|
||||
case 0:
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
default: {
|
||||
if (!parseUnknownField(input, unknownFields,
|
||||
extensionRegistry, tag)) {
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 10: {
|
||||
addName(input.readString());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// repeated string name = 1;
|
||||
public java.util.List<java.lang.String> getNameList() {
|
||||
return java.util.Collections.unmodifiableList(result.name_);
|
||||
}
|
||||
public int getNameCount() {
|
||||
return result.getNameCount();
|
||||
}
|
||||
public java.lang.String getName(int index) {
|
||||
return result.getName(index);
|
||||
}
|
||||
public Builder setName(int index, java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.name_.set(index, value);
|
||||
return this;
|
||||
}
|
||||
public Builder addName(java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
if (result.name_.isEmpty()) {
|
||||
result.name_ = new java.util.ArrayList<java.lang.String>();
|
||||
}
|
||||
result.name_.add(value);
|
||||
return this;
|
||||
}
|
||||
public Builder addAllName(
|
||||
java.lang.Iterable<? extends java.lang.String> values) {
|
||||
if (result.name_.isEmpty()) {
|
||||
result.name_ = new java.util.ArrayList<java.lang.String>();
|
||||
}
|
||||
super.addAll(values, result.name_);
|
||||
return this;
|
||||
}
|
||||
public Builder clearName() {
|
||||
result.name_ = java.util.Collections.emptyList();
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
static {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.getDescriptor();
|
||||
}
|
||||
}
|
||||
|
||||
private static com.google.protobuf.Descriptors.Descriptor
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_descriptor;
|
||||
private static
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_fieldAccessorTable;
|
||||
|
||||
public static com.google.protobuf.Descriptors.FileDescriptor
|
||||
getDescriptor() {
|
||||
return descriptor;
|
||||
}
|
||||
private static com.google.protobuf.Descriptors.FileDescriptor
|
||||
descriptor;
|
||||
static {
|
||||
java.lang.String descriptorData =
|
||||
"\n\026TableListMessage.proto\0223org.apache.had" +
|
||||
"oop.hbase.stargate.protobuf.generated\"\031\n" +
|
||||
"\tTableList\022\014\n\004name\030\001 \003(\t";
|
||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||
public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
||||
com.google.protobuf.Descriptors.FileDescriptor root) {
|
||||
descriptor = root;
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_descriptor =
|
||||
getDescriptor().getMessageTypes().get(0);
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_descriptor,
|
||||
new java.lang.String[] { "Name", },
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.class,
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.Builder.class);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
com.google.protobuf.Descriptors.FileDescriptor
|
||||
.internalBuildGeneratedFileFrom(descriptorData,
|
||||
new com.google.protobuf.Descriptors.FileDescriptor[] {
|
||||
}, assigner);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,911 @@
|
|||
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
public final class TableSchemaMessage {
|
||||
private TableSchemaMessage() {}
|
||||
public static void registerAllExtensions(
|
||||
com.google.protobuf.ExtensionRegistry registry) {
|
||||
}
|
||||
public static final class TableSchema extends
|
||||
com.google.protobuf.GeneratedMessage {
|
||||
// Use TableSchema.newBuilder() to construct.
|
||||
private TableSchema() {}
|
||||
|
||||
private static final TableSchema defaultInstance = new TableSchema();
|
||||
public static TableSchema getDefaultInstance() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public TableSchema getDefaultInstanceForType() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public static final com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptor() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internalGetFieldAccessorTable() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_fieldAccessorTable;
|
||||
}
|
||||
|
||||
public static final class Attribute extends
|
||||
com.google.protobuf.GeneratedMessage {
|
||||
// Use Attribute.newBuilder() to construct.
|
||||
private Attribute() {}
|
||||
|
||||
private static final Attribute defaultInstance = new Attribute();
|
||||
public static Attribute getDefaultInstance() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public Attribute getDefaultInstanceForType() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public static final com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptor() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_descriptor;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internalGetFieldAccessorTable() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_fieldAccessorTable;
|
||||
}
|
||||
|
||||
// required string name = 1;
|
||||
public static final int NAME_FIELD_NUMBER = 1;
|
||||
private boolean hasName;
|
||||
private java.lang.String name_ = "";
|
||||
public boolean hasName() { return hasName; }
|
||||
public java.lang.String getName() { return name_; }
|
||||
|
||||
// required string value = 2;
|
||||
public static final int VALUE_FIELD_NUMBER = 2;
|
||||
private boolean hasValue;
|
||||
private java.lang.String value_ = "";
|
||||
public boolean hasValue() { return hasValue; }
|
||||
public java.lang.String getValue() { return value_; }
|
||||
|
||||
@Override
|
||||
public final boolean isInitialized() {
|
||||
if (!hasName) return false;
|
||||
if (!hasValue) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(com.google.protobuf.CodedOutputStream output)
|
||||
throws java.io.IOException {
|
||||
if (hasName()) {
|
||||
output.writeString(1, getName());
|
||||
}
|
||||
if (hasValue()) {
|
||||
output.writeString(2, getValue());
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
private int memoizedSerializedSize = -1;
|
||||
@Override
|
||||
public int getSerializedSize() {
|
||||
int size = memoizedSerializedSize;
|
||||
if (size != -1) return size;
|
||||
|
||||
size = 0;
|
||||
if (hasName()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeStringSize(1, getName());
|
||||
}
|
||||
if (hasValue()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeStringSize(2, getValue());
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
}
|
||||
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
|
||||
com.google.protobuf.ByteString data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
|
||||
com.google.protobuf.ByteString data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(byte[] data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
|
||||
byte[] data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseDelimitedFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseDelimitedFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
|
||||
public static Builder newBuilder() { return new Builder(); }
|
||||
public Builder newBuilderForType() { return new Builder(); }
|
||||
public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute prototype) {
|
||||
return new Builder().mergeFrom(prototype);
|
||||
}
|
||||
public Builder toBuilder() { return newBuilder(this); }
|
||||
|
||||
public static final class Builder extends
|
||||
com.google.protobuf.GeneratedMessage.Builder<Builder> {
|
||||
// Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.newBuilder()
|
||||
private Builder() {}
|
||||
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute();
|
||||
|
||||
@Override
|
||||
protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute internalGetResult() {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clear() {
|
||||
result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clone() {
|
||||
return new Builder().mergeFrom(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptorForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDescriptor();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getDefaultInstanceForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDefaultInstance();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute build() {
|
||||
if (result != null && !isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result);
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
private org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute buildParsed()
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
if (!isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result).asInvalidProtocolBufferException();
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute buildPartial() {
|
||||
if (result == null) {
|
||||
throw new IllegalStateException(
|
||||
"build() has already been called on this Builder."); }
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute returnMe = result;
|
||||
result = null;
|
||||
return returnMe;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(com.google.protobuf.Message other) {
|
||||
if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute) {
|
||||
return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute)other);
|
||||
} else {
|
||||
super.mergeFrom(other);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute other) {
|
||||
if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDefaultInstance()) return this;
|
||||
if (other.hasName()) {
|
||||
setName(other.getName());
|
||||
}
|
||||
if (other.hasValue()) {
|
||||
setValue(other.getValue());
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return mergeFrom(input,
|
||||
com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
||||
com.google.protobuf.UnknownFieldSet.newBuilder(
|
||||
this.getUnknownFields());
|
||||
while (true) {
|
||||
int tag = input.readTag();
|
||||
switch (tag) {
|
||||
case 0:
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
default: {
|
||||
if (!parseUnknownField(input, unknownFields,
|
||||
extensionRegistry, tag)) {
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 10: {
|
||||
setName(input.readString());
|
||||
break;
|
||||
}
|
||||
case 18: {
|
||||
setValue(input.readString());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// required string name = 1;
|
||||
public boolean hasName() {
|
||||
return result.hasName();
|
||||
}
|
||||
public java.lang.String getName() {
|
||||
return result.getName();
|
||||
}
|
||||
public Builder setName(java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasName = true;
|
||||
result.name_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearName() {
|
||||
result.hasName = false;
|
||||
result.name_ = "";
|
||||
return this;
|
||||
}
|
||||
|
||||
// required string value = 2;
|
||||
public boolean hasValue() {
|
||||
return result.hasValue();
|
||||
}
|
||||
public java.lang.String getValue() {
|
||||
return result.getValue();
|
||||
}
|
||||
public Builder setValue(java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasValue = true;
|
||||
result.value_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearValue() {
|
||||
result.hasValue = false;
|
||||
result.value_ = "";
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
static {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.getDescriptor();
|
||||
}
|
||||
}
|
||||
|
||||
// optional string name = 1;
|
||||
public static final int NAME_FIELD_NUMBER = 1;
|
||||
private boolean hasName;
|
||||
private java.lang.String name_ = "";
|
||||
public boolean hasName() { return hasName; }
|
||||
public java.lang.String getName() { return name_; }
|
||||
|
||||
// repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchema.Attribute attrs = 2;
|
||||
public static final int ATTRS_FIELD_NUMBER = 2;
|
||||
private java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> attrs_ =
|
||||
java.util.Collections.emptyList();
|
||||
public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> getAttrsList() {
|
||||
return attrs_;
|
||||
}
|
||||
public int getAttrsCount() { return attrs_.size(); }
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getAttrs(int index) {
|
||||
return attrs_.get(index);
|
||||
}
|
||||
|
||||
// repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema columns = 3;
|
||||
public static final int COLUMNS_FIELD_NUMBER = 3;
|
||||
private java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema> columns_ =
|
||||
java.util.Collections.emptyList();
|
||||
public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema> getColumnsList() {
|
||||
return columns_;
|
||||
}
|
||||
public int getColumnsCount() { return columns_.size(); }
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema getColumns(int index) {
|
||||
return columns_.get(index);
|
||||
}
|
||||
|
||||
// optional bool inMemory = 4;
|
||||
public static final int INMEMORY_FIELD_NUMBER = 4;
|
||||
private boolean hasInMemory;
|
||||
private boolean inMemory_ = false;
|
||||
public boolean hasInMemory() { return hasInMemory; }
|
||||
public boolean getInMemory() { return inMemory_; }
|
||||
|
||||
// optional bool readOnly = 5;
|
||||
public static final int READONLY_FIELD_NUMBER = 5;
|
||||
private boolean hasReadOnly;
|
||||
private boolean readOnly_ = false;
|
||||
public boolean hasReadOnly() { return hasReadOnly; }
|
||||
public boolean getReadOnly() { return readOnly_; }
|
||||
|
||||
@Override
|
||||
public final boolean isInitialized() {
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute element : getAttrsList()) {
|
||||
if (!element.isInitialized()) return false;
|
||||
}
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema element : getColumnsList()) {
|
||||
if (!element.isInitialized()) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(com.google.protobuf.CodedOutputStream output)
|
||||
throws java.io.IOException {
|
||||
if (hasName()) {
|
||||
output.writeString(1, getName());
|
||||
}
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute element : getAttrsList()) {
|
||||
output.writeMessage(2, element);
|
||||
}
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema element : getColumnsList()) {
|
||||
output.writeMessage(3, element);
|
||||
}
|
||||
if (hasInMemory()) {
|
||||
output.writeBool(4, getInMemory());
|
||||
}
|
||||
if (hasReadOnly()) {
|
||||
output.writeBool(5, getReadOnly());
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
private int memoizedSerializedSize = -1;
|
||||
@Override
|
||||
public int getSerializedSize() {
|
||||
int size = memoizedSerializedSize;
|
||||
if (size != -1) return size;
|
||||
|
||||
size = 0;
|
||||
if (hasName()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeStringSize(1, getName());
|
||||
}
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute element : getAttrsList()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeMessageSize(2, element);
|
||||
}
|
||||
for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema element : getColumnsList()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeMessageSize(3, element);
|
||||
}
|
||||
if (hasInMemory()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBoolSize(4, getInMemory());
|
||||
}
|
||||
if (hasReadOnly()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBoolSize(5, getReadOnly());
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
}
|
||||
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
|
||||
com.google.protobuf.ByteString data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
|
||||
com.google.protobuf.ByteString data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(byte[] data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
|
||||
byte[] data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseDelimitedFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseDelimitedFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
|
||||
public static Builder newBuilder() { return new Builder(); }
|
||||
public Builder newBuilderForType() { return new Builder(); }
|
||||
public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema prototype) {
|
||||
return new Builder().mergeFrom(prototype);
|
||||
}
|
||||
public Builder toBuilder() { return newBuilder(this); }
|
||||
|
||||
public static final class Builder extends
|
||||
com.google.protobuf.GeneratedMessage.Builder<Builder> {
|
||||
// Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.newBuilder()
|
||||
private Builder() {}
|
||||
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema();
|
||||
|
||||
@Override
|
||||
protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema internalGetResult() {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clear() {
|
||||
result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clone() {
|
||||
return new Builder().mergeFrom(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptorForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.getDescriptor();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema getDefaultInstanceForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.getDefaultInstance();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema build() {
|
||||
if (result != null && !isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result);
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
private org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema buildParsed()
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
if (!isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result).asInvalidProtocolBufferException();
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema buildPartial() {
|
||||
if (result == null) {
|
||||
throw new IllegalStateException(
|
||||
"build() has already been called on this Builder."); }
|
||||
if (result.attrs_ != java.util.Collections.EMPTY_LIST) {
|
||||
result.attrs_ =
|
||||
java.util.Collections.unmodifiableList(result.attrs_);
|
||||
}
|
||||
if (result.columns_ != java.util.Collections.EMPTY_LIST) {
|
||||
result.columns_ =
|
||||
java.util.Collections.unmodifiableList(result.columns_);
|
||||
}
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema returnMe = result;
|
||||
result = null;
|
||||
return returnMe;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(com.google.protobuf.Message other) {
|
||||
if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema) {
|
||||
return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema)other);
|
||||
} else {
|
||||
super.mergeFrom(other);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema other) {
|
||||
if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.getDefaultInstance()) return this;
|
||||
if (other.hasName()) {
|
||||
setName(other.getName());
|
||||
}
|
||||
if (!other.attrs_.isEmpty()) {
|
||||
if (result.attrs_.isEmpty()) {
|
||||
result.attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute>();
|
||||
}
|
||||
result.attrs_.addAll(other.attrs_);
|
||||
}
|
||||
if (!other.columns_.isEmpty()) {
|
||||
if (result.columns_.isEmpty()) {
|
||||
result.columns_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema>();
|
||||
}
|
||||
result.columns_.addAll(other.columns_);
|
||||
}
|
||||
if (other.hasInMemory()) {
|
||||
setInMemory(other.getInMemory());
|
||||
}
|
||||
if (other.hasReadOnly()) {
|
||||
setReadOnly(other.getReadOnly());
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return mergeFrom(input,
|
||||
com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
||||
com.google.protobuf.UnknownFieldSet.newBuilder(
|
||||
this.getUnknownFields());
|
||||
while (true) {
|
||||
int tag = input.readTag();
|
||||
switch (tag) {
|
||||
case 0:
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
default: {
|
||||
if (!parseUnknownField(input, unknownFields,
|
||||
extensionRegistry, tag)) {
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 10: {
|
||||
setName(input.readString());
|
||||
break;
|
||||
}
|
||||
case 18: {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.newBuilder();
|
||||
input.readMessage(subBuilder, extensionRegistry);
|
||||
addAttrs(subBuilder.buildPartial());
|
||||
break;
|
||||
}
|
||||
case 26: {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.newBuilder();
|
||||
input.readMessage(subBuilder, extensionRegistry);
|
||||
addColumns(subBuilder.buildPartial());
|
||||
break;
|
||||
}
|
||||
case 32: {
|
||||
setInMemory(input.readBool());
|
||||
break;
|
||||
}
|
||||
case 40: {
|
||||
setReadOnly(input.readBool());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// optional string name = 1;
|
||||
public boolean hasName() {
|
||||
return result.hasName();
|
||||
}
|
||||
public java.lang.String getName() {
|
||||
return result.getName();
|
||||
}
|
||||
public Builder setName(java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasName = true;
|
||||
result.name_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearName() {
|
||||
result.hasName = false;
|
||||
result.name_ = "";
|
||||
return this;
|
||||
}
|
||||
|
||||
// repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchema.Attribute attrs = 2;
|
||||
public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> getAttrsList() {
|
||||
return java.util.Collections.unmodifiableList(result.attrs_);
|
||||
}
|
||||
public int getAttrsCount() {
|
||||
return result.getAttrsCount();
|
||||
}
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getAttrs(int index) {
|
||||
return result.getAttrs(index);
|
||||
}
|
||||
public Builder setAttrs(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.attrs_.set(index, value);
|
||||
return this;
|
||||
}
|
||||
public Builder setAttrs(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder builderForValue) {
|
||||
result.attrs_.set(index, builderForValue.build());
|
||||
return this;
|
||||
}
|
||||
public Builder addAttrs(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
if (result.attrs_.isEmpty()) {
|
||||
result.attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute>();
|
||||
}
|
||||
result.attrs_.add(value);
|
||||
return this;
|
||||
}
|
||||
public Builder addAttrs(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder builderForValue) {
|
||||
if (result.attrs_.isEmpty()) {
|
||||
result.attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute>();
|
||||
}
|
||||
result.attrs_.add(builderForValue.build());
|
||||
return this;
|
||||
}
|
||||
public Builder addAllAttrs(
|
||||
java.lang.Iterable<? extends org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> values) {
|
||||
if (result.attrs_.isEmpty()) {
|
||||
result.attrs_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute>();
|
||||
}
|
||||
super.addAll(values, result.attrs_);
|
||||
return this;
|
||||
}
|
||||
public Builder clearAttrs() {
|
||||
result.attrs_ = java.util.Collections.emptyList();
|
||||
return this;
|
||||
}
|
||||
|
||||
// repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema columns = 3;
|
||||
public java.util.List<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema> getColumnsList() {
|
||||
return java.util.Collections.unmodifiableList(result.columns_);
|
||||
}
|
||||
public int getColumnsCount() {
|
||||
return result.getColumnsCount();
|
||||
}
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema getColumns(int index) {
|
||||
return result.getColumns(index);
|
||||
}
|
||||
public Builder setColumns(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.columns_.set(index, value);
|
||||
return this;
|
||||
}
|
||||
public Builder setColumns(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder builderForValue) {
|
||||
result.columns_.set(index, builderForValue.build());
|
||||
return this;
|
||||
}
|
||||
public Builder addColumns(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
if (result.columns_.isEmpty()) {
|
||||
result.columns_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema>();
|
||||
}
|
||||
result.columns_.add(value);
|
||||
return this;
|
||||
}
|
||||
public Builder addColumns(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder builderForValue) {
|
||||
if (result.columns_.isEmpty()) {
|
||||
result.columns_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema>();
|
||||
}
|
||||
result.columns_.add(builderForValue.build());
|
||||
return this;
|
||||
}
|
||||
public Builder addAllColumns(
|
||||
java.lang.Iterable<? extends org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema> values) {
|
||||
if (result.columns_.isEmpty()) {
|
||||
result.columns_ = new java.util.ArrayList<org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema>();
|
||||
}
|
||||
super.addAll(values, result.columns_);
|
||||
return this;
|
||||
}
|
||||
public Builder clearColumns() {
|
||||
result.columns_ = java.util.Collections.emptyList();
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional bool inMemory = 4;
|
||||
public boolean hasInMemory() {
|
||||
return result.hasInMemory();
|
||||
}
|
||||
public boolean getInMemory() {
|
||||
return result.getInMemory();
|
||||
}
|
||||
public Builder setInMemory(boolean value) {
|
||||
result.hasInMemory = true;
|
||||
result.inMemory_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearInMemory() {
|
||||
result.hasInMemory = false;
|
||||
result.inMemory_ = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional bool readOnly = 5;
|
||||
public boolean hasReadOnly() {
|
||||
return result.hasReadOnly();
|
||||
}
|
||||
public boolean getReadOnly() {
|
||||
return result.getReadOnly();
|
||||
}
|
||||
public Builder setReadOnly(boolean value) {
|
||||
result.hasReadOnly = true;
|
||||
result.readOnly_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearReadOnly() {
|
||||
result.hasReadOnly = false;
|
||||
result.readOnly_ = false;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
static {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.getDescriptor();
|
||||
}
|
||||
}
|
||||
|
||||
private static com.google.protobuf.Descriptors.Descriptor
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor;
|
||||
private static
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_fieldAccessorTable;
|
||||
private static com.google.protobuf.Descriptors.Descriptor
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_descriptor;
|
||||
private static
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_fieldAccessorTable;
|
||||
|
||||
public static com.google.protobuf.Descriptors.FileDescriptor
|
||||
getDescriptor() {
|
||||
return descriptor;
|
||||
}
|
||||
private static com.google.protobuf.Descriptors.FileDescriptor
|
||||
descriptor;
|
||||
static {
|
||||
java.lang.String descriptorData =
|
||||
"\n\030TableSchemaMessage.proto\0223org.apache.h" +
|
||||
"adoop.hbase.stargate.protobuf.generated\032" +
|
||||
"\031ColumnSchemaMessage.proto\"\230\002\n\013TableSche" +
|
||||
"ma\022\014\n\004name\030\001 \001(\t\022Y\n\005attrs\030\002 \003(\0132J.org.ap" +
|
||||
"ache.hadoop.hbase.stargate.protobuf.gene" +
|
||||
"rated.TableSchema.Attribute\022R\n\007columns\030\003" +
|
||||
" \003(\0132A.org.apache.hadoop.hbase.stargate." +
|
||||
"protobuf.generated.ColumnSchema\022\020\n\010inMem" +
|
||||
"ory\030\004 \001(\010\022\020\n\010readOnly\030\005 \001(\010\032(\n\tAttribute" +
|
||||
"\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t";
|
||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||
public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
||||
com.google.protobuf.Descriptors.FileDescriptor root) {
|
||||
descriptor = root;
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor =
|
||||
getDescriptor().getMessageTypes().get(0);
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor,
|
||||
new java.lang.String[] { "Name", "Attrs", "Columns", "InMemory", "ReadOnly", },
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.class,
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Builder.class);
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_descriptor =
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor.getNestedTypes().get(0);
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_descriptor,
|
||||
new java.lang.String[] { "Name", "Value", },
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.class,
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder.class);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
com.google.protobuf.Descriptors.FileDescriptor
|
||||
.internalBuildGeneratedFileFrom(descriptorData,
|
||||
new com.google.protobuf.Descriptors.FileDescriptor[] {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.getDescriptor(),
|
||||
}, assigner);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,489 @@
|
|||
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.protobuf.generated;
|
||||
|
||||
public final class VersionMessage {
|
||||
private VersionMessage() {}
|
||||
public static void registerAllExtensions(
|
||||
com.google.protobuf.ExtensionRegistry registry) {
|
||||
}
|
||||
public static final class Version extends
|
||||
com.google.protobuf.GeneratedMessage {
|
||||
// Use Version.newBuilder() to construct.
|
||||
private Version() {}
|
||||
|
||||
private static final Version defaultInstance = new Version();
|
||||
public static Version getDefaultInstance() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public Version getDefaultInstanceForType() {
|
||||
return defaultInstance;
|
||||
}
|
||||
|
||||
public static final com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptor() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_descriptor;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internalGetFieldAccessorTable() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_fieldAccessorTable;
|
||||
}
|
||||
|
||||
// optional string stargateVersion = 1;
|
||||
public static final int STARGATEVERSION_FIELD_NUMBER = 1;
|
||||
private boolean hasStargateVersion;
|
||||
private java.lang.String stargateVersion_ = "";
|
||||
public boolean hasStargateVersion() { return hasStargateVersion; }
|
||||
public java.lang.String getStargateVersion() { return stargateVersion_; }
|
||||
|
||||
// optional string jvmVersion = 2;
|
||||
public static final int JVMVERSION_FIELD_NUMBER = 2;
|
||||
private boolean hasJvmVersion;
|
||||
private java.lang.String jvmVersion_ = "";
|
||||
public boolean hasJvmVersion() { return hasJvmVersion; }
|
||||
public java.lang.String getJvmVersion() { return jvmVersion_; }
|
||||
|
||||
// optional string osVersion = 3;
|
||||
public static final int OSVERSION_FIELD_NUMBER = 3;
|
||||
private boolean hasOsVersion;
|
||||
private java.lang.String osVersion_ = "";
|
||||
public boolean hasOsVersion() { return hasOsVersion; }
|
||||
public java.lang.String getOsVersion() { return osVersion_; }
|
||||
|
||||
// optional string serverVersion = 4;
|
||||
public static final int SERVERVERSION_FIELD_NUMBER = 4;
|
||||
private boolean hasServerVersion;
|
||||
private java.lang.String serverVersion_ = "";
|
||||
public boolean hasServerVersion() { return hasServerVersion; }
|
||||
public java.lang.String getServerVersion() { return serverVersion_; }
|
||||
|
||||
// optional string jerseyVersion = 5;
|
||||
public static final int JERSEYVERSION_FIELD_NUMBER = 5;
|
||||
private boolean hasJerseyVersion;
|
||||
private java.lang.String jerseyVersion_ = "";
|
||||
public boolean hasJerseyVersion() { return hasJerseyVersion; }
|
||||
public java.lang.String getJerseyVersion() { return jerseyVersion_; }
|
||||
|
||||
@Override
|
||||
public final boolean isInitialized() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(com.google.protobuf.CodedOutputStream output)
|
||||
throws java.io.IOException {
|
||||
if (hasStargateVersion()) {
|
||||
output.writeString(1, getStargateVersion());
|
||||
}
|
||||
if (hasJvmVersion()) {
|
||||
output.writeString(2, getJvmVersion());
|
||||
}
|
||||
if (hasOsVersion()) {
|
||||
output.writeString(3, getOsVersion());
|
||||
}
|
||||
if (hasServerVersion()) {
|
||||
output.writeString(4, getServerVersion());
|
||||
}
|
||||
if (hasJerseyVersion()) {
|
||||
output.writeString(5, getJerseyVersion());
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
private int memoizedSerializedSize = -1;
|
||||
@Override
|
||||
public int getSerializedSize() {
|
||||
int size = memoizedSerializedSize;
|
||||
if (size != -1) return size;
|
||||
|
||||
size = 0;
|
||||
if (hasStargateVersion()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeStringSize(1, getStargateVersion());
|
||||
}
|
||||
if (hasJvmVersion()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeStringSize(2, getJvmVersion());
|
||||
}
|
||||
if (hasOsVersion()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeStringSize(3, getOsVersion());
|
||||
}
|
||||
if (hasServerVersion()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeStringSize(4, getServerVersion());
|
||||
}
|
||||
if (hasJerseyVersion()) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeStringSize(5, getJerseyVersion());
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
}
|
||||
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
|
||||
com.google.protobuf.ByteString data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
|
||||
com.google.protobuf.ByteString data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(byte[] data)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
|
||||
byte[] data,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
return newBuilder().mergeFrom(data, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseDelimitedFrom(java.io.InputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseDelimitedFrom(
|
||||
java.io.InputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeDelimitedFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input).buildParsed();
|
||||
}
|
||||
public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
return newBuilder().mergeFrom(input, extensionRegistry)
|
||||
.buildParsed();
|
||||
}
|
||||
|
||||
public static Builder newBuilder() { return new Builder(); }
|
||||
public Builder newBuilderForType() { return new Builder(); }
|
||||
public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version prototype) {
|
||||
return new Builder().mergeFrom(prototype);
|
||||
}
|
||||
public Builder toBuilder() { return newBuilder(this); }
|
||||
|
||||
public static final class Builder extends
|
||||
com.google.protobuf.GeneratedMessage.Builder<Builder> {
|
||||
// Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.newBuilder()
|
||||
private Builder() {}
|
||||
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version result = new org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version();
|
||||
|
||||
@Override
|
||||
protected org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version internalGetResult() {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clear() {
|
||||
result = new org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder clone() {
|
||||
return new Builder().mergeFrom(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public com.google.protobuf.Descriptors.Descriptor
|
||||
getDescriptorForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.getDescriptor();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version getDefaultInstanceForType() {
|
||||
return org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.getDefaultInstance();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version build() {
|
||||
if (result != null && !isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result);
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
private org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version buildParsed()
|
||||
throws com.google.protobuf.InvalidProtocolBufferException {
|
||||
if (!isInitialized()) {
|
||||
throw new com.google.protobuf.UninitializedMessageException(
|
||||
result).asInvalidProtocolBufferException();
|
||||
}
|
||||
return buildPartial();
|
||||
}
|
||||
|
||||
public org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version buildPartial() {
|
||||
if (result == null) {
|
||||
throw new IllegalStateException(
|
||||
"build() has already been called on this Builder."); }
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version returnMe = result;
|
||||
result = null;
|
||||
return returnMe;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(com.google.protobuf.Message other) {
|
||||
if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version) {
|
||||
return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version)other);
|
||||
} else {
|
||||
super.mergeFrom(other);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version other) {
|
||||
if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.getDefaultInstance()) return this;
|
||||
if (other.hasStargateVersion()) {
|
||||
setStargateVersion(other.getStargateVersion());
|
||||
}
|
||||
if (other.hasJvmVersion()) {
|
||||
setJvmVersion(other.getJvmVersion());
|
||||
}
|
||||
if (other.hasOsVersion()) {
|
||||
setOsVersion(other.getOsVersion());
|
||||
}
|
||||
if (other.hasServerVersion()) {
|
||||
setServerVersion(other.getServerVersion());
|
||||
}
|
||||
if (other.hasJerseyVersion()) {
|
||||
setJerseyVersion(other.getJerseyVersion());
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input)
|
||||
throws java.io.IOException {
|
||||
return mergeFrom(input,
|
||||
com.google.protobuf.ExtensionRegistry.getEmptyRegistry());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder mergeFrom(
|
||||
com.google.protobuf.CodedInputStream input,
|
||||
com.google.protobuf.ExtensionRegistry extensionRegistry)
|
||||
throws java.io.IOException {
|
||||
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
||||
com.google.protobuf.UnknownFieldSet.newBuilder(
|
||||
this.getUnknownFields());
|
||||
while (true) {
|
||||
int tag = input.readTag();
|
||||
switch (tag) {
|
||||
case 0:
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
default: {
|
||||
if (!parseUnknownField(input, unknownFields,
|
||||
extensionRegistry, tag)) {
|
||||
this.setUnknownFields(unknownFields.build());
|
||||
return this;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 10: {
|
||||
setStargateVersion(input.readString());
|
||||
break;
|
||||
}
|
||||
case 18: {
|
||||
setJvmVersion(input.readString());
|
||||
break;
|
||||
}
|
||||
case 26: {
|
||||
setOsVersion(input.readString());
|
||||
break;
|
||||
}
|
||||
case 34: {
|
||||
setServerVersion(input.readString());
|
||||
break;
|
||||
}
|
||||
case 42: {
|
||||
setJerseyVersion(input.readString());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// optional string stargateVersion = 1;
|
||||
public boolean hasStargateVersion() {
|
||||
return result.hasStargateVersion();
|
||||
}
|
||||
public java.lang.String getStargateVersion() {
|
||||
return result.getStargateVersion();
|
||||
}
|
||||
public Builder setStargateVersion(java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasStargateVersion = true;
|
||||
result.stargateVersion_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearStargateVersion() {
|
||||
result.hasStargateVersion = false;
|
||||
result.stargateVersion_ = "";
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional string jvmVersion = 2;
|
||||
public boolean hasJvmVersion() {
|
||||
return result.hasJvmVersion();
|
||||
}
|
||||
public java.lang.String getJvmVersion() {
|
||||
return result.getJvmVersion();
|
||||
}
|
||||
public Builder setJvmVersion(java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasJvmVersion = true;
|
||||
result.jvmVersion_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearJvmVersion() {
|
||||
result.hasJvmVersion = false;
|
||||
result.jvmVersion_ = "";
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional string osVersion = 3;
|
||||
public boolean hasOsVersion() {
|
||||
return result.hasOsVersion();
|
||||
}
|
||||
public java.lang.String getOsVersion() {
|
||||
return result.getOsVersion();
|
||||
}
|
||||
public Builder setOsVersion(java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasOsVersion = true;
|
||||
result.osVersion_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearOsVersion() {
|
||||
result.hasOsVersion = false;
|
||||
result.osVersion_ = "";
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional string serverVersion = 4;
|
||||
public boolean hasServerVersion() {
|
||||
return result.hasServerVersion();
|
||||
}
|
||||
public java.lang.String getServerVersion() {
|
||||
return result.getServerVersion();
|
||||
}
|
||||
public Builder setServerVersion(java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasServerVersion = true;
|
||||
result.serverVersion_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearServerVersion() {
|
||||
result.hasServerVersion = false;
|
||||
result.serverVersion_ = "";
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional string jerseyVersion = 5;
|
||||
public boolean hasJerseyVersion() {
|
||||
return result.hasJerseyVersion();
|
||||
}
|
||||
public java.lang.String getJerseyVersion() {
|
||||
return result.getJerseyVersion();
|
||||
}
|
||||
public Builder setJerseyVersion(java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
result.hasJerseyVersion = true;
|
||||
result.jerseyVersion_ = value;
|
||||
return this;
|
||||
}
|
||||
public Builder clearJerseyVersion() {
|
||||
result.hasJerseyVersion = false;
|
||||
result.jerseyVersion_ = "";
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
static {
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.getDescriptor();
|
||||
}
|
||||
}
|
||||
|
||||
private static com.google.protobuf.Descriptors.Descriptor
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_descriptor;
|
||||
private static
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_fieldAccessorTable;
|
||||
|
||||
public static com.google.protobuf.Descriptors.FileDescriptor
|
||||
getDescriptor() {
|
||||
return descriptor;
|
||||
}
|
||||
private static com.google.protobuf.Descriptors.FileDescriptor
|
||||
descriptor;
|
||||
static {
|
||||
java.lang.String descriptorData =
|
||||
"\n\024VersionMessage.proto\0223org.apache.hadoo" +
|
||||
"p.hbase.stargate.protobuf.generated\"w\n\007V" +
|
||||
"ersion\022\027\n\017stargateVersion\030\001 \001(\t\022\022\n\njvmVe" +
|
||||
"rsion\030\002 \001(\t\022\021\n\tosVersion\030\003 \001(\t\022\025\n\rserver" +
|
||||
"Version\030\004 \001(\t\022\025\n\rjerseyVersion\030\005 \001(\t";
|
||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||
public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
||||
com.google.protobuf.Descriptors.FileDescriptor root) {
|
||||
descriptor = root;
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_descriptor =
|
||||
getDescriptor().getMessageTypes().get(0);
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_descriptor,
|
||||
new java.lang.String[] { "StargateVersion", "JvmVersion", "OsVersion", "ServerVersion", "JerseyVersion", },
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.class,
|
||||
org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.Builder.class);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
com.google.protobuf.Descriptors.FileDescriptor
|
||||
.internalBuildGeneratedFileFrom(descriptorData,
|
||||
new com.google.protobuf.Descriptors.FileDescriptor[] {
|
||||
}, assigner);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.provider;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.ws.rs.ext.ContextResolver;
|
||||
import javax.ws.rs.ext.Provider;
|
||||
import javax.xml.bind.JAXBContext;
|
||||
|
||||
import org.apache.hadoop.hbase.stargate.model.CellModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.CellSetModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.RowModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.ScannerModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.StorageClusterStatusModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.StorageClusterVersionModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableInfoModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableListModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableRegionModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.VersionModel;
|
||||
|
||||
import com.sun.jersey.api.json.JSONConfiguration;
|
||||
import com.sun.jersey.api.json.JSONJAXBContext;
|
||||
|
||||
@Provider
|
||||
public class JAXBContextResolver implements ContextResolver<JAXBContext> {
|
||||
|
||||
private final JAXBContext context;
|
||||
|
||||
private final Set<Class<?>> types;
|
||||
|
||||
private final Class<?>[] cTypes = {
|
||||
CellModel.class,
|
||||
CellSetModel.class,
|
||||
ColumnSchemaModel.class,
|
||||
RowModel.class,
|
||||
ScannerModel.class,
|
||||
StorageClusterStatusModel.class,
|
||||
StorageClusterVersionModel.class,
|
||||
TableInfoModel.class,
|
||||
TableListModel.class,
|
||||
TableModel.class,
|
||||
TableRegionModel.class,
|
||||
TableSchemaModel.class,
|
||||
VersionModel.class
|
||||
};
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public JAXBContextResolver() throws Exception {
|
||||
this.types = new HashSet(Arrays.asList(cTypes));
|
||||
this.context = new JSONJAXBContext(JSONConfiguration.natural().build(),
|
||||
cTypes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JAXBContext getContext(Class<?> objectType) {
|
||||
System.out.println("Executed getContext");
|
||||
return (types.contains(objectType)) ? context : null;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.provider.consumer;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.lang.annotation.Annotation;
|
||||
import java.lang.reflect.Type;
|
||||
|
||||
import javax.ws.rs.Consumes;
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import javax.ws.rs.core.MultivaluedMap;
|
||||
import javax.ws.rs.ext.MessageBodyReader;
|
||||
import javax.ws.rs.ext.Provider;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.stargate.Constants;
|
||||
import org.apache.hadoop.hbase.stargate.model.IProtobufWrapper;
|
||||
|
||||
@Provider
|
||||
@Consumes(Constants.MIMETYPE_PROTOBUF)
|
||||
public class ProtobufMessageBodyConsumer implements MessageBodyReader<IProtobufWrapper> {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(ProtobufMessageBodyConsumer.class);
|
||||
|
||||
@Override
|
||||
public boolean isReadable(Class<?> type, Type genericType,
|
||||
Annotation[] annotations, MediaType mediaType) {
|
||||
return IProtobufWrapper.class.isAssignableFrom(type);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IProtobufWrapper readFrom(Class<IProtobufWrapper> type, Type genericType,
|
||||
Annotation[] annotations, MediaType mediaType,
|
||||
MultivaluedMap<String, String> httpHeaders, InputStream inputStream)
|
||||
throws IOException, WebApplicationException {
|
||||
IProtobufWrapper obj = null;
|
||||
try {
|
||||
obj = type.newInstance();
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||
byte[] buffer = new byte[4096];
|
||||
int read;
|
||||
do {
|
||||
read = inputStream.read(buffer, 0, buffer.length);
|
||||
if (read > 0) {
|
||||
baos.write(buffer, 0, read);
|
||||
}
|
||||
} while (read > 0);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(getClass() + ": read " + baos.size() + " bytes from " +
|
||||
inputStream);
|
||||
}
|
||||
obj = obj.getObjectFromMessage(baos.toByteArray());
|
||||
} catch (InstantiationException e) {
|
||||
throw new WebApplicationException(e);
|
||||
} catch (IllegalAccessException e) {
|
||||
throw new WebApplicationException(e);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.provider.producer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.lang.annotation.Annotation;
|
||||
import java.lang.reflect.Type;
|
||||
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import javax.ws.rs.core.MultivaluedMap;
|
||||
import javax.ws.rs.ext.MessageBodyWriter;
|
||||
import javax.ws.rs.ext.Provider;
|
||||
|
||||
import org.apache.hadoop.hbase.stargate.Constants;
|
||||
|
||||
@Provider
|
||||
@Produces(Constants.MIMETYPE_TEXT)
|
||||
public class PlainTextMessageBodyProducer implements MessageBodyWriter<Object>{
|
||||
|
||||
@Override
|
||||
public long getSize(Object object, Class<?> type, Type genericType,
|
||||
Annotation[] annotations, MediaType mediaType) {
|
||||
//TODO This is cheating, this needs to either be cashed or I need
|
||||
// to figure out a better way to calculate this information
|
||||
return object.toString().getBytes().length;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isWriteable(Class<?> arg0, Type arg1, Annotation[] arg2,
|
||||
MediaType arg3) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(Object object, Class<?> type, Type genericType,
|
||||
Annotation[] annotations, MediaType mediaType,
|
||||
MultivaluedMap<String, Object> httpHeaders, OutputStream outStream)
|
||||
throws IOException, WebApplicationException {
|
||||
outStream.write(object.toString().getBytes());
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.provider.producer;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.lang.annotation.Annotation;
|
||||
import java.lang.reflect.Type;
|
||||
import java.util.Map;
|
||||
import java.util.WeakHashMap;
|
||||
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import javax.ws.rs.core.MultivaluedMap;
|
||||
import javax.ws.rs.ext.MessageBodyWriter;
|
||||
import javax.ws.rs.ext.Provider;
|
||||
|
||||
import org.apache.hadoop.hbase.stargate.Constants;
|
||||
import org.apache.hadoop.hbase.stargate.model.IProtobufWrapper;
|
||||
|
||||
@Provider
|
||||
@Produces(Constants.MIMETYPE_PROTOBUF)
|
||||
public class ProtobufMessageBodyProducer implements MessageBodyWriter<IProtobufWrapper> {
|
||||
|
||||
@Override
|
||||
public boolean isWriteable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) {
|
||||
return IProtobufWrapper.class.isAssignableFrom(type);
|
||||
}
|
||||
|
||||
private Map<Object, byte[]> buffer = new WeakHashMap<Object, byte[]>();
|
||||
|
||||
@Override
|
||||
public long getSize(IProtobufWrapper m, Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) {
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||
try {
|
||||
baos.write(m.createProtobufOutput());
|
||||
} catch (IOException e) {
|
||||
return -1;
|
||||
}
|
||||
byte[] bytes = baos.toByteArray();
|
||||
buffer.put(m, bytes);
|
||||
return bytes.length;
|
||||
}
|
||||
|
||||
public void writeTo(IProtobufWrapper m, Class<?> type, Type genericType, Annotation[] annotations,
|
||||
MediaType mediaType, MultivaluedMap<String, Object> httpHeaders,
|
||||
OutputStream entityStream) throws IOException, WebApplicationException {
|
||||
entityStream.write(buffer.remove(m));
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
<?xml version="1.0"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<!--
|
||||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>hbase.regionserver.msginterval</name>
|
||||
<value>1000</value>
|
||||
<description>Interval between messages from the RegionServer to HMaster
|
||||
in milliseconds. Default is 15. Set this value low if you want unit
|
||||
tests to be responsive.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.client.pause</name>
|
||||
<value>5000</value>
|
||||
<description>General client pause value. Used mostly as value to wait
|
||||
before running a retry of a failed get, region lookup, etc.</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.master.meta.thread.rescanfrequency</name>
|
||||
<value>10000</value>
|
||||
<description>How long the HMaster sleeps (in milliseconds) between scans of
|
||||
the root and meta tables.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.server.thread.wakefrequency</name>
|
||||
<value>1000</value>
|
||||
<description>Time to sleep in between searches for work (in milliseconds).
|
||||
Used as sleep interval by service threads such as META scanner and log roller.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.regionserver.handler.count</name>
|
||||
<value>5</value>
|
||||
<description>Count of RPC Server instances spun up on RegionServers
|
||||
Same property is used by the HMaster for count of master handlers.
|
||||
Default is 10.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.master.lease.period</name>
|
||||
<value>6000</value>
|
||||
<description>Length of time the master will wait before timing out a region
|
||||
server lease. Since region servers report in every second (see above), this
|
||||
value has been reduced so that the master will notice a dead region server
|
||||
sooner. The default is 30 seconds.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.master.info.port</name>
|
||||
<value>-1</value>
|
||||
<description>The port for the hbase master web UI
|
||||
Set to -1 if you do not want the info server to run.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.regionserver.info.port</name>
|
||||
<value>-1</value>
|
||||
<description>The port for the hbase regionserver web UI
|
||||
Set to -1 if you do not want the info server to run.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.regionserver.info.port.auto</name>
|
||||
<value>true</value>
|
||||
<description>Info server auto port bind. Enables automatic port
|
||||
search if hbase.regionserver.info.port is already in use.
|
||||
Enabled for testing to run multiple tests on one machine.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.master.lease.thread.wakefrequency</name>
|
||||
<value>3000</value>
|
||||
<description>The interval between checks for expired region server leases.
|
||||
This value has been reduced due to the other reduced values above so that
|
||||
the master will notice a dead region server sooner. The default is 15 seconds.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.regionserver.optionalcacheflushinterval</name>
|
||||
<value>10000</value>
|
||||
<description>
|
||||
Amount of time to wait since the last time a region was flushed before
|
||||
invoking an optional cache flush. Default 60,000.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.regionserver.safemode</name>
|
||||
<value>false</value>
|
||||
<description>
|
||||
Turn on/off safe mode in region server. Always on for production, always off
|
||||
for tests.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.hregion.max.filesize</name>
|
||||
<value>67108864</value>
|
||||
<description>
|
||||
Maximum desired file size for an HRegion. If filesize exceeds
|
||||
value + (value / 2), the HRegion is split in two. Default: 256M.
|
||||
|
||||
Keep the maximum filesize small so we split more often in tests.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.log.dir</name>
|
||||
<value>${user.dir}/../logs</value>
|
||||
</property>
|
||||
</configuration>
|
|
@ -0,0 +1,47 @@
|
|||
# Define some default values that can be overridden by system properties
|
||||
hbase.root.logger=INFO,console
|
||||
hbase.log.dir=.
|
||||
hbase.log.file=hbase.log
|
||||
|
||||
# Define the root logger to the system property "hbase.root.logger".
|
||||
log4j.rootLogger=${hbase.root.logger}
|
||||
|
||||
# Logging Threshold
|
||||
log4j.threshhold=ALL
|
||||
|
||||
#
|
||||
# Daily Rolling File Appender
|
||||
#
|
||||
log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
|
||||
|
||||
# Rollver at midnight
|
||||
log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
|
||||
|
||||
# 30-day backup
|
||||
#log4j.appender.DRFA.MaxBackupIndex=30
|
||||
log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
|
||||
|
||||
# Pattern format: Date LogLevel LoggerName LogMessage
|
||||
#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
|
||||
# Debugging Pattern format
|
||||
log4j.appender.DRFA.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n
|
||||
|
||||
|
||||
#
|
||||
# console
|
||||
# Add "console" to rootlogger above if you want to use this
|
||||
#
|
||||
log4j.appender.console=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.console.target=System.err
|
||||
log4j.appender.console.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.console.layout.ConversionPattern=%d %-5p [%t] %C{2}(%L): %m%n
|
||||
|
||||
# Custom Logging levels
|
||||
|
||||
#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
|
||||
|
||||
log4j.logger.org.apache.hadoop=WARN
|
||||
log4j.logger.org.apache.zookeeper=ERROR
|
||||
log4j.logger.org.apache.hadoop.hbase=DEBUG
|
|
@ -0,0 +1,239 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.LocalHBaseCluster;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.MiniZooKeeperCluster;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.mortbay.jetty.Server;
|
||||
import org.mortbay.jetty.servlet.Context;
|
||||
import org.mortbay.jetty.servlet.ServletHolder;
|
||||
|
||||
import com.sun.jersey.spi.container.servlet.ServletContainer;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class MiniClusterTestCase extends TestCase {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(MiniClusterTestCase.class);
|
||||
|
||||
public static final String MIMETYPE_BINARY = "application/octet-stream";
|
||||
public static final String MIMETYPE_JSON = "application/json";
|
||||
public static final String MIMETYPE_PLAIN = "text/plain";
|
||||
public static final String MIMETYPE_PROTOBUF = "application/x-protobuf";
|
||||
public static final String MIMETYPE_XML = "text/xml";
|
||||
|
||||
// use a nonstandard port
|
||||
public static final int DEFAULT_TEST_PORT = 38080;
|
||||
|
||||
protected static HBaseConfiguration conf = new HBaseConfiguration();
|
||||
protected static MiniZooKeeperCluster zooKeeperCluster;
|
||||
protected static MiniHBaseCluster hbaseCluster;
|
||||
protected static MiniDFSCluster dfsCluster;
|
||||
protected static File testDir;
|
||||
protected static int testServletPort;
|
||||
protected static Server server;
|
||||
|
||||
public static boolean isMiniClusterRunning() {
|
||||
return server != null;
|
||||
}
|
||||
|
||||
private static void startDFS() throws Exception {
|
||||
if (dfsCluster != null) {
|
||||
LOG.error("MiniDFSCluster already running");
|
||||
return;
|
||||
}
|
||||
Path path = new Path(
|
||||
conf.get("test.build.data",
|
||||
System.getProperty("test.build.data", "build/test/data")));
|
||||
FileSystem testFS = FileSystem.get(conf);
|
||||
if (testFS.exists(path)) {
|
||||
testFS.delete(path, true);
|
||||
}
|
||||
testDir = new File(path.toString());
|
||||
dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
FileSystem filesystem = dfsCluster.getFileSystem();
|
||||
conf.set("fs.default.name", filesystem.getUri().toString());
|
||||
Path parentdir = filesystem.getHomeDirectory();
|
||||
conf.set(HConstants.HBASE_DIR, parentdir.toString());
|
||||
filesystem.mkdirs(parentdir);
|
||||
FSUtils.setVersion(filesystem, parentdir);
|
||||
LOG.info("started MiniDFSCluster in " + testDir.toString());
|
||||
}
|
||||
|
||||
private static void stopDFS() {
|
||||
if (dfsCluster != null) try {
|
||||
dfsCluster.shutdown();
|
||||
dfsCluster = null;
|
||||
} catch (Exception e) {
|
||||
LOG.warn(StringUtils.stringifyException(e));
|
||||
}
|
||||
}
|
||||
|
||||
private static void startZooKeeper() throws Exception {
|
||||
if (zooKeeperCluster != null) {
|
||||
LOG.error("ZooKeeper already running");
|
||||
return;
|
||||
}
|
||||
zooKeeperCluster = new MiniZooKeeperCluster();
|
||||
zooKeeperCluster.startup(testDir);
|
||||
LOG.info("started " + zooKeeperCluster.getClass().getName());
|
||||
}
|
||||
|
||||
private static void stopZooKeeper() {
|
||||
if (zooKeeperCluster != null) try {
|
||||
zooKeeperCluster.shutdown();
|
||||
zooKeeperCluster = null;
|
||||
} catch (Exception e) {
|
||||
LOG.warn(StringUtils.stringifyException(e));
|
||||
}
|
||||
}
|
||||
|
||||
private static void startHBase() throws Exception {
|
||||
if (hbaseCluster != null) {
|
||||
LOG.error("MiniHBaseCluster already running");
|
||||
return;
|
||||
}
|
||||
hbaseCluster = new MiniHBaseCluster(conf, 1);
|
||||
// opening the META table ensures that cluster is running
|
||||
new HTable(conf, HConstants.META_TABLE_NAME);
|
||||
LOG.info("started MiniHBaseCluster");
|
||||
}
|
||||
|
||||
private static void stopHBase() {
|
||||
if (hbaseCluster != null) try {
|
||||
for (LocalHBaseCluster.RegionServerThread regionThread:
|
||||
hbaseCluster.getRegionThreads()) {
|
||||
regionThread.getRegionServer().abort();
|
||||
}
|
||||
hbaseCluster.shutdown();
|
||||
hbaseCluster = null;
|
||||
} catch (Exception e) {
|
||||
LOG.warn(StringUtils.stringifyException(e));
|
||||
}
|
||||
}
|
||||
|
||||
private static void startServletContainer() throws Exception {
|
||||
if (server != null) {
|
||||
LOG.error("ServletContainer already running");
|
||||
return;
|
||||
}
|
||||
|
||||
// set up the Jersey servlet container for Jetty
|
||||
ServletHolder sh = new ServletHolder(ServletContainer.class);
|
||||
sh.setInitParameter(
|
||||
"com.sun.jersey.config.property.resourceConfigClass",
|
||||
ResourceConfig.class.getCanonicalName());
|
||||
sh.setInitParameter("com.sun.jersey.config.property.packages",
|
||||
"jetty");
|
||||
|
||||
LOG.info("configured " + ServletContainer.class.getName());
|
||||
|
||||
// set up Jetty and run the embedded server
|
||||
testServletPort = conf.getInt("test.stargate.port", DEFAULT_TEST_PORT);
|
||||
server = new Server(testServletPort);
|
||||
server.setSendServerVersion(false);
|
||||
server.setSendDateHeader(false);
|
||||
// set up context
|
||||
Context context = new Context(server, "/", Context.SESSIONS);
|
||||
context.addServlet(sh, "/*");
|
||||
// start the server
|
||||
server.start();
|
||||
|
||||
LOG.info("started " + server.getClass().getName() + " on port " +
|
||||
testServletPort);
|
||||
}
|
||||
|
||||
private static void stopServletContainer() {
|
||||
if (server != null) try {
|
||||
server.stop();
|
||||
server = null;
|
||||
} catch (Exception e) {
|
||||
LOG.warn(StringUtils.stringifyException(e));
|
||||
}
|
||||
}
|
||||
|
||||
public static void startMiniCluster() throws Exception {
|
||||
try {
|
||||
startDFS();
|
||||
startZooKeeper();
|
||||
startHBase();
|
||||
startServletContainer();
|
||||
} catch (Exception e) {
|
||||
stopServletContainer();
|
||||
stopHBase();
|
||||
stopZooKeeper();
|
||||
stopDFS();
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
public static void stopMiniCluster() {
|
||||
stopServletContainer();
|
||||
stopHBase();
|
||||
stopZooKeeper();
|
||||
stopDFS();
|
||||
}
|
||||
|
||||
class MiniClusterShutdownThread extends Thread {
|
||||
public void run() {
|
||||
stopMiniCluster();
|
||||
Path path = new Path(
|
||||
conf.get("test.build.data",
|
||||
System.getProperty("test.build.data", "build/test/data")));
|
||||
try {
|
||||
FileSystem.get(conf).delete(path, true);
|
||||
} catch (IOException e) {
|
||||
LOG.error(StringUtils.stringifyException(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
// start the mini cluster if it is not running yet
|
||||
if (!isMiniClusterRunning()) {
|
||||
startMiniCluster();
|
||||
Runtime.getRuntime().addShutdownHook(new MiniClusterShutdownThread());
|
||||
}
|
||||
|
||||
// tell HttpClient to dump request and response headers into the test
|
||||
// log at DEBUG level
|
||||
Logger.getLogger("httpclient.wire.header").setLevel(Level.DEBUG);
|
||||
|
||||
super.setUp();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
|
||||
public class Test00MiniCluster extends MiniClusterTestCase {
|
||||
public void testDFSMiniCluster() {
|
||||
assertNotNull(dfsCluster);
|
||||
}
|
||||
|
||||
public void testZooKeeperMiniCluster() {
|
||||
assertNotNull(zooKeeperCluster);
|
||||
}
|
||||
|
||||
public void testHBaseMiniCluster() throws IOException {
|
||||
assertNotNull(hbaseCluster);
|
||||
assertNotNull(new HTable(conf, HConstants.META_TABLE_NAME));
|
||||
}
|
||||
|
||||
public void testStargateServlet() throws IOException {
|
||||
assertNotNull(server);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,352 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
import javax.xml.bind.Marshaller;
|
||||
import javax.xml.bind.Unmarshaller;
|
||||
|
||||
import org.apache.commons.httpclient.Header;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.stargate.client.Client;
|
||||
import org.apache.hadoop.hbase.stargate.client.Cluster;
|
||||
import org.apache.hadoop.hbase.stargate.client.Response;
|
||||
import org.apache.hadoop.hbase.stargate.model.CellModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.CellSetModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.RowModel;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
public class TestRowResource extends MiniClusterTestCase {
|
||||
private static final String TABLE = "TestRowResource";
|
||||
private static final String COLUMN_1 = "a:";
|
||||
private static final String COLUMN_2 = "b:";
|
||||
private static final String ROW_1 = "testrow1";
|
||||
private static final String VALUE_1 = "testvalue1";
|
||||
private static final String ROW_2 = "testrow2";
|
||||
private static final String VALUE_2 = "testvalue2";
|
||||
private static final String ROW_3 = "testrow3";
|
||||
private static final String VALUE_3 = "testvalue3";
|
||||
private static final String ROW_4 = "testrow4";
|
||||
private static final String VALUE_4 = "testvalue4";
|
||||
|
||||
private Client client;
|
||||
private JAXBContext context;
|
||||
private Marshaller marshaller;
|
||||
private Unmarshaller unmarshaller;
|
||||
private HBaseAdmin admin;
|
||||
|
||||
public TestRowResource() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(
|
||||
CellModel.class,
|
||||
CellSetModel.class,
|
||||
RowModel.class);
|
||||
marshaller = context.createMarshaller();
|
||||
unmarshaller = context.createUnmarshaller();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
client = new Client(new Cluster().add("localhost", testServletPort));
|
||||
admin = new HBaseAdmin(conf);
|
||||
if (admin.tableExists(TABLE)) {
|
||||
return;
|
||||
}
|
||||
HTableDescriptor htd = new HTableDescriptor(TABLE);
|
||||
htd.addFamily(new HColumnDescriptor(COLUMN_1));
|
||||
htd.addFamily(new HColumnDescriptor(COLUMN_2));
|
||||
admin.createTable(htd);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
client.shutdown();
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
private Response deleteRow(String table, String row) throws IOException {
|
||||
StringBuilder path = new StringBuilder();
|
||||
path.append('/');
|
||||
path.append(table);
|
||||
path.append('/');
|
||||
path.append(row);
|
||||
Response response = client.delete(path.toString());
|
||||
Thread.yield(); // yield for the minicluster threads
|
||||
return response;
|
||||
}
|
||||
|
||||
private Response deleteValue(String table, String row, String column)
|
||||
throws IOException {
|
||||
StringBuilder path = new StringBuilder();
|
||||
path.append('/');
|
||||
path.append(table);
|
||||
path.append('/');
|
||||
path.append(row);
|
||||
path.append('/');
|
||||
path.append(column);
|
||||
Response response = client.delete(path.toString());
|
||||
Thread.yield(); // yield for the minicluster threads
|
||||
return response;
|
||||
}
|
||||
|
||||
private Response getValueXML(String table, String row, String column)
|
||||
throws IOException {
|
||||
StringBuilder path = new StringBuilder();
|
||||
path.append('/');
|
||||
path.append(table);
|
||||
path.append('/');
|
||||
path.append(row);
|
||||
path.append('/');
|
||||
path.append(column);
|
||||
Response response = client.get(path.toString(), MIMETYPE_XML);
|
||||
Thread.yield(); // yield for the minicluster threads
|
||||
return response;
|
||||
}
|
||||
|
||||
private Response getValuePB(String table, String row, String column)
|
||||
throws IOException {
|
||||
StringBuilder path = new StringBuilder();
|
||||
path.append('/');
|
||||
path.append(table);
|
||||
path.append('/');
|
||||
path.append(row);
|
||||
path.append('/');
|
||||
path.append(column);
|
||||
Response response = client.get(path.toString(), MIMETYPE_PROTOBUF);
|
||||
Thread.yield(); // yield for the minicluster threads
|
||||
return response;
|
||||
}
|
||||
|
||||
private Response putValueXML(String table, String row, String column,
|
||||
String value) throws IOException, JAXBException {
|
||||
StringBuilder path = new StringBuilder();
|
||||
path.append('/');
|
||||
path.append(table);
|
||||
path.append('/');
|
||||
path.append(row);
|
||||
path.append('/');
|
||||
path.append(column);
|
||||
RowModel rowModel = new RowModel(row);
|
||||
rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(value)));
|
||||
CellSetModel cellSetModel = new CellSetModel();
|
||||
cellSetModel.addRow(rowModel);
|
||||
StringWriter writer = new StringWriter();
|
||||
marshaller.marshal(cellSetModel, writer);
|
||||
Response response = client.put(path.toString(), MIMETYPE_XML,
|
||||
Bytes.toBytes(writer.toString()));
|
||||
Thread.yield(); // yield for the minicluster threads
|
||||
return response;
|
||||
}
|
||||
|
||||
private void checkValueXML(String table, String row, String column,
|
||||
String value) throws IOException, JAXBException {
|
||||
Response response = getValueXML(table, row, column);
|
||||
assertEquals(response.getCode(), 200);
|
||||
CellSetModel cellSet = (CellSetModel)
|
||||
unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
|
||||
RowModel rowModel = cellSet.getRows().get(0);
|
||||
CellModel cell = rowModel.getCells().get(0);
|
||||
assertEquals(Bytes.toString(cell.getColumn()), column);
|
||||
assertEquals(Bytes.toString(cell.getValue()), value);
|
||||
}
|
||||
|
||||
private Response putValuePB(String table, String row, String column,
|
||||
String value) throws IOException {
|
||||
StringBuilder path = new StringBuilder();
|
||||
path.append('/');
|
||||
path.append(table);
|
||||
path.append('/');
|
||||
path.append(row);
|
||||
path.append('/');
|
||||
path.append(column);
|
||||
RowModel rowModel = new RowModel(row);
|
||||
rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(value)));
|
||||
CellSetModel cellSetModel = new CellSetModel();
|
||||
cellSetModel.addRow(rowModel);
|
||||
Response response = client.put(path.toString(), MIMETYPE_PROTOBUF,
|
||||
cellSetModel.createProtobufOutput());
|
||||
Thread.yield(); // yield for the minicluster threads
|
||||
return response;
|
||||
}
|
||||
|
||||
private void checkValuePB(String table, String row, String column,
|
||||
String value) throws IOException {
|
||||
Response response = getValuePB(table, row, column);
|
||||
assertEquals(response.getCode(), 200);
|
||||
CellSetModel cellSet = new CellSetModel();
|
||||
cellSet.getObjectFromMessage(response.getBody());
|
||||
RowModel rowModel = cellSet.getRows().get(0);
|
||||
CellModel cell = rowModel.getCells().get(0);
|
||||
assertEquals(Bytes.toString(cell.getColumn()), column);
|
||||
assertEquals(Bytes.toString(cell.getValue()), value);
|
||||
}
|
||||
|
||||
public void testSingleCellGetPutXML() throws IOException, JAXBException {
|
||||
Response response = getValueXML(TABLE, ROW_1, COLUMN_1);
|
||||
assertEquals(response.getCode(), 404);
|
||||
|
||||
response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
|
||||
assertEquals(response.getCode(), 200);
|
||||
checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
|
||||
|
||||
response = putValueXML(TABLE, ROW_1, COLUMN_2, VALUE_1);
|
||||
assertEquals(response.getCode(), 200);
|
||||
|
||||
response = deleteValue(TABLE, ROW_1, COLUMN_1);
|
||||
assertEquals(response.getCode(), 200);
|
||||
response = getValueXML(TABLE, ROW_1, COLUMN_1);
|
||||
assertEquals(response.getCode(), 404);
|
||||
checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_1);
|
||||
|
||||
response = deleteRow(TABLE, ROW_1);
|
||||
assertEquals(response.getCode(), 200);
|
||||
}
|
||||
|
||||
public void testSingleCellGetPutPB() throws IOException, JAXBException {
|
||||
Response response = getValuePB(TABLE, ROW_2, COLUMN_1);
|
||||
assertEquals(response.getCode(), 404);
|
||||
|
||||
response = putValuePB(TABLE, ROW_2, COLUMN_1, VALUE_1);
|
||||
assertEquals(response.getCode(), 200);
|
||||
checkValuePB(TABLE, ROW_2, COLUMN_1, VALUE_1);
|
||||
|
||||
response = putValuePB(TABLE, ROW_2, COLUMN_2, VALUE_1);
|
||||
assertEquals(response.getCode(), 200);
|
||||
response = putValuePB(TABLE, ROW_2, COLUMN_2, VALUE_2);
|
||||
assertEquals(response.getCode(), 200);
|
||||
checkValuePB(TABLE, ROW_2, COLUMN_2, VALUE_2);
|
||||
|
||||
response = deleteRow(TABLE, ROW_2);
|
||||
assertEquals(response.getCode(), 200);
|
||||
}
|
||||
|
||||
public void testSingleCellGetPutBinary() throws IOException {
|
||||
final String path = "/" + TABLE + "/" + ROW_3 + "/" + COLUMN_1;
|
||||
final byte[] body = Bytes.toBytes(VALUE_3);
|
||||
|
||||
Response response = client.put(path, MIMETYPE_BINARY, body);
|
||||
assertEquals(response.getCode(), 200);
|
||||
Thread.yield(); // yield for minicluster threads
|
||||
|
||||
response = client.get(path, MIMETYPE_BINARY);
|
||||
assertEquals(response.getCode(), 200);
|
||||
assertTrue(Bytes.equals(response.getBody(), body));
|
||||
boolean foundTimestampHeader = false;
|
||||
for (Header header: response.getHeaders()) {
|
||||
if (header.getName().equals("X-Timestamp")) {
|
||||
foundTimestampHeader = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assertTrue(foundTimestampHeader);
|
||||
|
||||
response = deleteRow(TABLE, ROW_3);
|
||||
assertEquals(response.getCode(), 200);
|
||||
}
|
||||
|
||||
public void testSingleCellGetJSON() throws IOException, JAXBException {
|
||||
final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1;
|
||||
|
||||
Response response = client.put(path, MIMETYPE_BINARY,
|
||||
Bytes.toBytes(VALUE_4));
|
||||
assertEquals(response.getCode(), 200);
|
||||
Thread.yield(); // yield for minicluster threads
|
||||
response = client.get(path, MIMETYPE_JSON);
|
||||
assertEquals(response.getCode(), 200);
|
||||
|
||||
response = deleteRow(TABLE, ROW_4);
|
||||
assertEquals(response.getCode(), 200);
|
||||
}
|
||||
|
||||
public void testMultiCellGetPutXML() throws IOException, JAXBException {
|
||||
String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row
|
||||
|
||||
CellSetModel cellSetModel = new CellSetModel();
|
||||
RowModel rowModel = new RowModel(ROW_1);
|
||||
rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1)));
|
||||
rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2)));
|
||||
cellSetModel.addRow(rowModel);
|
||||
rowModel = new RowModel(ROW_2);
|
||||
rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3)));
|
||||
rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4)));
|
||||
cellSetModel.addRow(rowModel);
|
||||
StringWriter writer = new StringWriter();
|
||||
marshaller.marshal(cellSetModel, writer);
|
||||
Response response = client.put(path, MIMETYPE_XML,
|
||||
Bytes.toBytes(writer.toString()));
|
||||
Thread.yield(); // yield for the minicluster threads
|
||||
|
||||
// make sure the fake row was not actually created
|
||||
response = client.get(path);
|
||||
assertEquals(response.getCode(), 404);
|
||||
|
||||
// check that all of the values were created
|
||||
checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
|
||||
checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2);
|
||||
checkValueXML(TABLE, ROW_2, COLUMN_1, VALUE_3);
|
||||
checkValueXML(TABLE, ROW_2, COLUMN_2, VALUE_4);
|
||||
|
||||
response = deleteRow(TABLE, ROW_1);
|
||||
assertEquals(response.getCode(), 200);
|
||||
response = deleteRow(TABLE, ROW_2);
|
||||
assertEquals(response.getCode(), 200);
|
||||
}
|
||||
|
||||
public void testMultiCellGetPutPB() throws IOException {
|
||||
String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row
|
||||
|
||||
CellSetModel cellSetModel = new CellSetModel();
|
||||
RowModel rowModel = new RowModel(ROW_1);
|
||||
rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1)));
|
||||
rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2)));
|
||||
cellSetModel.addRow(rowModel);
|
||||
rowModel = new RowModel(ROW_2);
|
||||
rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3)));
|
||||
rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4)));
|
||||
cellSetModel.addRow(rowModel);
|
||||
Response response = client.put(path, MIMETYPE_PROTOBUF,
|
||||
cellSetModel.createProtobufOutput());
|
||||
Thread.yield(); // yield for the minicluster threads
|
||||
|
||||
// make sure the fake row was not actually created
|
||||
response = client.get(path);
|
||||
assertEquals(response.getCode(), 404);
|
||||
|
||||
// check that all of the values were created
|
||||
checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
|
||||
checkValuePB(TABLE, ROW_1, COLUMN_2, VALUE_2);
|
||||
checkValuePB(TABLE, ROW_2, COLUMN_1, VALUE_3);
|
||||
checkValuePB(TABLE, ROW_2, COLUMN_2, VALUE_4);
|
||||
|
||||
response = deleteRow(TABLE, ROW_1);
|
||||
assertEquals(response.getCode(), 200);
|
||||
response = deleteRow(TABLE, ROW_2);
|
||||
assertEquals(response.getCode(), 200);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,269 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
import java.util.Iterator;
|
||||
import java.util.Random;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
import javax.xml.bind.Marshaller;
|
||||
import javax.xml.bind.Unmarshaller;
|
||||
|
||||
import org.apache.commons.httpclient.Header;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.stargate.client.Client;
|
||||
import org.apache.hadoop.hbase.stargate.client.Cluster;
|
||||
import org.apache.hadoop.hbase.stargate.client.Response;
|
||||
import org.apache.hadoop.hbase.stargate.model.CellModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.CellSetModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.RowModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.ScannerModel;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
public class TestScannerResource extends MiniClusterTestCase {
|
||||
private static final String TABLE = "TestScannerResource";
|
||||
private static final String COLUMN_1 = "a:";
|
||||
private static final String COLUMN_2 = "b:";
|
||||
|
||||
private static int expectedRows1;
|
||||
private static int expectedRows2;
|
||||
|
||||
private Client client;
|
||||
private JAXBContext context;
|
||||
private Marshaller marshaller;
|
||||
private Unmarshaller unmarshaller;
|
||||
private HBaseAdmin admin;
|
||||
|
||||
private int insertData(String tableName, String column, double prob)
|
||||
throws IOException {
|
||||
Random rng = new Random();
|
||||
int count = 0;
|
||||
HTable table = new HTable(conf, tableName);
|
||||
byte[] k = new byte[3];
|
||||
byte [][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));
|
||||
for (byte b1 = 'a'; b1 < 'z'; b1++) {
|
||||
for (byte b2 = 'a'; b2 < 'z'; b2++) {
|
||||
for (byte b3 = 'a'; b3 < 'z'; b3++) {
|
||||
if (rng.nextDouble() < prob) {
|
||||
k[0] = b1;
|
||||
k[1] = b2;
|
||||
k[2] = b3;
|
||||
Put put = new Put(k);
|
||||
put.add(famAndQf[0], famAndQf[1], k);
|
||||
table.put(put);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
table.flushCommits();
|
||||
return count;
|
||||
}
|
||||
|
||||
public TestScannerResource() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(
|
||||
CellModel.class,
|
||||
CellSetModel.class,
|
||||
RowModel.class,
|
||||
ScannerModel.class);
|
||||
marshaller = context.createMarshaller();
|
||||
unmarshaller = context.createUnmarshaller();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
client = new Client(new Cluster().add("localhost", testServletPort));
|
||||
admin = new HBaseAdmin(conf);
|
||||
if (admin.tableExists(TABLE)) {
|
||||
return;
|
||||
}
|
||||
HTableDescriptor htd = new HTableDescriptor(TABLE);
|
||||
htd.addFamily(new HColumnDescriptor(COLUMN_1));
|
||||
htd.addFamily(new HColumnDescriptor(COLUMN_2));
|
||||
admin.createTable(htd);
|
||||
expectedRows1 = insertData(TABLE, COLUMN_1, 1.0);
|
||||
expectedRows2 = insertData(TABLE, COLUMN_2, 0.5);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
client.shutdown();
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
private int countCellSet(CellSetModel model) {
|
||||
int count = 0;
|
||||
Iterator<RowModel> rows = model.getRows().iterator();
|
||||
while (rows.hasNext()) {
|
||||
RowModel row = rows.next();
|
||||
Iterator<CellModel> cells = row.getCells().iterator();
|
||||
while (cells.hasNext()) {
|
||||
cells.next();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
public void testSimpleScannerXML() throws IOException, JAXBException {
|
||||
final int BATCH_SIZE = 5;
|
||||
// new scanner
|
||||
ScannerModel model = new ScannerModel();
|
||||
model.setBatch(BATCH_SIZE);
|
||||
model.addColumn(Bytes.toBytes(COLUMN_1));
|
||||
StringWriter writer = new StringWriter();
|
||||
marshaller.marshal(model, writer);
|
||||
byte[] body = Bytes.toBytes(writer.toString());
|
||||
Response response = client.put("/" + TABLE + "/scanner", MIMETYPE_XML,
|
||||
body);
|
||||
assertEquals(response.getCode(), 201);
|
||||
String scannerURI = response.getLocation();
|
||||
assertNotNull(scannerURI);
|
||||
|
||||
// get a cell set
|
||||
response = client.get(scannerURI, MIMETYPE_XML);
|
||||
assertEquals(response.getCode(), 200);
|
||||
CellSetModel cellSet = (CellSetModel)
|
||||
unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
|
||||
// confirm batch size conformance
|
||||
assertEquals(countCellSet(cellSet), BATCH_SIZE);
|
||||
|
||||
// delete the scanner
|
||||
response = client.delete(scannerURI);
|
||||
assertEquals(response.getCode(), 200);
|
||||
}
|
||||
|
||||
public void testSimpleScannerPB() throws IOException {
|
||||
final int BATCH_SIZE = 10;
|
||||
// new scanner
|
||||
ScannerModel model = new ScannerModel();
|
||||
model.setBatch(BATCH_SIZE);
|
||||
model.addColumn(Bytes.toBytes(COLUMN_1));
|
||||
Response response = client.put("/" + TABLE + "/scanner",
|
||||
MIMETYPE_PROTOBUF, model.createProtobufOutput());
|
||||
assertEquals(response.getCode(), 201);
|
||||
String scannerURI = response.getLocation();
|
||||
assertNotNull(scannerURI);
|
||||
|
||||
// get a cell set
|
||||
response = client.get(scannerURI, MIMETYPE_PROTOBUF);
|
||||
assertEquals(response.getCode(), 200);
|
||||
CellSetModel cellSet = new CellSetModel();
|
||||
cellSet.getObjectFromMessage(response.getBody());
|
||||
// confirm batch size conformance
|
||||
assertEquals(countCellSet(cellSet), BATCH_SIZE);
|
||||
|
||||
// delete the scanner
|
||||
response = client.delete(scannerURI);
|
||||
assertEquals(response.getCode(), 200);
|
||||
}
|
||||
|
||||
public void testSimpleScannerBinary() throws IOException {
|
||||
// new scanner
|
||||
ScannerModel model = new ScannerModel();
|
||||
model.setBatch(1);
|
||||
model.addColumn(Bytes.toBytes(COLUMN_1));
|
||||
Response response = client.put("/" + TABLE + "/scanner",
|
||||
MIMETYPE_PROTOBUF, model.createProtobufOutput());
|
||||
assertEquals(response.getCode(), 201);
|
||||
String scannerURI = response.getLocation();
|
||||
assertNotNull(scannerURI);
|
||||
|
||||
// get a cell
|
||||
response = client.get(scannerURI, MIMETYPE_BINARY);
|
||||
assertEquals(response.getCode(), 200);
|
||||
// verify that data was returned
|
||||
assertTrue(response.getBody().length > 0);
|
||||
// verify that the expected X-headers are present
|
||||
boolean foundRowHeader = false, foundColumnHeader = false,
|
||||
foundTimestampHeader = false;
|
||||
for (Header header: response.getHeaders()) {
|
||||
if (header.getName().equals("X-Row")) {
|
||||
foundRowHeader = true;
|
||||
} else if (header.getName().equals("X-Column")) {
|
||||
foundColumnHeader = true;
|
||||
} else if (header.getName().equals("X-Timestamp")) {
|
||||
foundTimestampHeader = true;
|
||||
}
|
||||
}
|
||||
assertTrue(foundRowHeader);
|
||||
assertTrue(foundColumnHeader);
|
||||
assertTrue(foundTimestampHeader);
|
||||
|
||||
// delete the scanner
|
||||
response = client.delete(scannerURI);
|
||||
assertEquals(response.getCode(), 200);
|
||||
}
|
||||
|
||||
private int fullTableScan(ScannerModel model) throws IOException {
|
||||
model.setBatch(100);
|
||||
Response response = client.put("/" + TABLE + "/scanner",
|
||||
MIMETYPE_PROTOBUF, model.createProtobufOutput());
|
||||
assertEquals(response.getCode(), 201);
|
||||
String scannerURI = response.getLocation();
|
||||
assertNotNull(scannerURI);
|
||||
int count = 0;
|
||||
while (true) {
|
||||
response = client.get(scannerURI, MIMETYPE_PROTOBUF);
|
||||
assertTrue(response.getCode() == 200 || response.getCode() == 204);
|
||||
if (response.getCode() == 200) {
|
||||
CellSetModel cellSet = new CellSetModel();
|
||||
cellSet.getObjectFromMessage(response.getBody());
|
||||
Iterator<RowModel> rows = cellSet.getRows().iterator();
|
||||
while (rows.hasNext()) {
|
||||
RowModel row = rows.next();
|
||||
Iterator<CellModel> cells = row.getCells().iterator();
|
||||
while (cells.hasNext()) {
|
||||
cells.next();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
// delete the scanner
|
||||
response = client.delete(scannerURI);
|
||||
assertEquals(response.getCode(), 200);
|
||||
return count;
|
||||
}
|
||||
|
||||
public void testFullTableScan() throws IOException {
|
||||
ScannerModel model = new ScannerModel();
|
||||
model.addColumn(Bytes.toBytes(COLUMN_1));
|
||||
assertEquals(fullTableScan(model), expectedRows1);
|
||||
|
||||
model = new ScannerModel();
|
||||
model.addColumn(Bytes.toBytes(COLUMN_2));
|
||||
assertEquals(fullTableScan(model), expectedRows2);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,137 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.stargate.client.Client;
|
||||
import org.apache.hadoop.hbase.stargate.client.Cluster;
|
||||
import org.apache.hadoop.hbase.stargate.client.Response;
|
||||
import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TestTableSchemaModel;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
public class TestSchemaResource extends MiniClusterTestCase {
|
||||
private Client client;
|
||||
private JAXBContext context;
|
||||
private HBaseAdmin admin;
|
||||
|
||||
private static String TABLE1 = "TestSchemaResource1";
|
||||
private static String TABLE2 = "TestSchemaResource2";
|
||||
|
||||
public TestSchemaResource() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(
|
||||
ColumnSchemaModel.class,
|
||||
TableSchemaModel.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
admin = new HBaseAdmin(conf);
|
||||
client = new Client(new Cluster().add("localhost", testServletPort));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
client.shutdown();
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
private byte[] toXML(TableSchemaModel model) throws JAXBException {
|
||||
StringWriter writer = new StringWriter();
|
||||
context.createMarshaller().marshal(model, writer);
|
||||
return Bytes.toBytes(writer.toString());
|
||||
}
|
||||
|
||||
private TableSchemaModel fromXML(byte[] content) throws JAXBException {
|
||||
return (TableSchemaModel) context.createUnmarshaller()
|
||||
.unmarshal(new ByteArrayInputStream(content));
|
||||
}
|
||||
|
||||
public void testTableCreateAndDeleteXML()
|
||||
throws IOException, JAXBException {
|
||||
String schemaPath = "/" + TABLE1 + "/schema";
|
||||
TableSchemaModel model;
|
||||
Response response;
|
||||
|
||||
assertFalse(admin.tableExists(TABLE1));
|
||||
|
||||
// create the table
|
||||
model = TestTableSchemaModel.buildTestModel(TABLE1);
|
||||
TestTableSchemaModel.checkModel(model, TABLE1);
|
||||
response = client.put(schemaPath, MIMETYPE_XML, toXML(model));
|
||||
assertEquals(response.getCode(), 201);
|
||||
|
||||
// make sure HBase concurs, and wait for the table to come online
|
||||
admin.enableTable(TABLE1);
|
||||
|
||||
// retrieve the schema and validate it
|
||||
response = client.get(schemaPath, MIMETYPE_XML);
|
||||
assertEquals(response.getCode(), 200);
|
||||
model = fromXML(response.getBody());
|
||||
TestTableSchemaModel.checkModel(model, TABLE1);
|
||||
|
||||
// delete the table
|
||||
client.delete(schemaPath);
|
||||
|
||||
// make sure HBase concurs
|
||||
assertFalse(admin.tableExists(TABLE1));
|
||||
}
|
||||
|
||||
public void testTableCreateAndDeletePB() throws IOException, JAXBException {
|
||||
String schemaPath = "/" + TABLE2 + "/schema";
|
||||
TableSchemaModel model;
|
||||
Response response;
|
||||
|
||||
assertFalse(admin.tableExists(TABLE2));
|
||||
|
||||
// create the table
|
||||
model = TestTableSchemaModel.buildTestModel(TABLE2);
|
||||
TestTableSchemaModel.checkModel(model, TABLE2);
|
||||
response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF,
|
||||
model.createProtobufOutput());
|
||||
assertEquals(response.getCode(), 201);
|
||||
|
||||
// make sure HBase concurs, and wait for the table to come online
|
||||
admin.enableTable(TABLE2);
|
||||
|
||||
// retrieve the schema and validate it
|
||||
response = client.get(schemaPath, Constants.MIMETYPE_PROTOBUF);
|
||||
assertEquals(response.getCode(), 200);
|
||||
model = new TableSchemaModel();
|
||||
model.getObjectFromMessage(response.getBody());
|
||||
TestTableSchemaModel.checkModel(model, TABLE2);
|
||||
|
||||
// delete the table
|
||||
client.delete(schemaPath);
|
||||
|
||||
// make sure HBase concurs
|
||||
assertFalse(admin.tableExists(TABLE2));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,104 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import org.apache.hadoop.hbase.stargate.client.Client;
|
||||
import org.apache.hadoop.hbase.stargate.client.Cluster;
|
||||
import org.apache.hadoop.hbase.stargate.client.Response;
|
||||
import org.apache.hadoop.hbase.stargate.model.StorageClusterStatusModel;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
public class TestStatusResource extends MiniClusterTestCase {
|
||||
private static final byte[] ROOT_REGION_NAME = Bytes.toBytes("-ROOT-,,0");
|
||||
private static final byte[] META_REGION_NAME = Bytes.toBytes(".META.,,1");
|
||||
|
||||
private Client client;
|
||||
private JAXBContext context;
|
||||
|
||||
public TestStatusResource() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(
|
||||
StorageClusterStatusModel.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
client = new Client(new Cluster().add("localhost", testServletPort));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
client.shutdown();
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
private void validate(StorageClusterStatusModel model) {
|
||||
assertNotNull(model);
|
||||
assertTrue(model.getRegions() >= 2);
|
||||
assertTrue(model.getRequests() >= 0);
|
||||
// assumes minicluster with two regionservers
|
||||
assertTrue(model.getAverageLoad() >= 1.0);
|
||||
assertNotNull(model.getLiveNodes());
|
||||
assertNotNull(model.getDeadNodes());
|
||||
assertFalse(model.getLiveNodes().isEmpty());
|
||||
boolean foundRoot = false, foundMeta = false;
|
||||
for (StorageClusterStatusModel.Node node: model.getLiveNodes()) {
|
||||
assertNotNull(node.getName());
|
||||
assertTrue(node.getStartCode() > 0L);
|
||||
assertTrue(node.getRequests() >= 0);
|
||||
assertFalse(node.getRegions().isEmpty());
|
||||
for (StorageClusterStatusModel.Node.Region region: node.getRegions()) {
|
||||
if (Bytes.equals(region.getName(), ROOT_REGION_NAME)) {
|
||||
foundRoot = true;
|
||||
} else if (Bytes.equals(region.getName(), META_REGION_NAME)) {
|
||||
foundMeta = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
assertTrue(foundRoot);
|
||||
assertTrue(foundMeta);
|
||||
}
|
||||
|
||||
public void testGetClusterStatusXML() throws IOException, JAXBException {
|
||||
Response response = client.get(Constants.PATH_STATUS_CLUSTER, MIMETYPE_XML);
|
||||
assertEquals(response.getCode(), 200);
|
||||
StorageClusterStatusModel model = (StorageClusterStatusModel)
|
||||
context.createUnmarshaller().unmarshal(
|
||||
new ByteArrayInputStream(response.getBody()));
|
||||
validate(model);
|
||||
}
|
||||
|
||||
public void testGetClusterStatusPB() throws IOException {
|
||||
Response response =
|
||||
client.get(Constants.PATH_STATUS_CLUSTER, MIMETYPE_PROTOBUF);
|
||||
assertEquals(response.getCode(), 200);
|
||||
StorageClusterStatusModel model = new StorageClusterStatusModel();
|
||||
model.getObjectFromMessage(response.getBody());
|
||||
validate(model);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,220 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.stargate.client.Client;
|
||||
import org.apache.hadoop.hbase.stargate.client.Cluster;
|
||||
import org.apache.hadoop.hbase.stargate.client.Response;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableInfoModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableListModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.TableRegionModel;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
public class TestTableResource extends MiniClusterTestCase {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TestTableResource.class);
|
||||
|
||||
private static String TABLE = "TestTableResource";
|
||||
private static String COLUMN = "test:";
|
||||
private static Map<HRegionInfo,HServerAddress> regionMap;
|
||||
|
||||
private Client client;
|
||||
private JAXBContext context;
|
||||
private HBaseAdmin admin;
|
||||
|
||||
public TestTableResource() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(
|
||||
TableModel.class,
|
||||
TableInfoModel.class,
|
||||
TableListModel.class,
|
||||
TableRegionModel.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
client = new Client(new Cluster().add("localhost", testServletPort));
|
||||
admin = new HBaseAdmin(conf);
|
||||
if (admin.tableExists(TABLE)) {
|
||||
return;
|
||||
}
|
||||
HTableDescriptor htd = new HTableDescriptor(TABLE);
|
||||
htd.addFamily(new HColumnDescriptor(COLUMN));
|
||||
admin.createTable(htd);
|
||||
HTable table = new HTable(conf, TABLE);
|
||||
byte[] k = new byte[3];
|
||||
byte [][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(COLUMN));
|
||||
for (byte b1 = 'a'; b1 < 'z'; b1++) {
|
||||
for (byte b2 = 'a'; b2 < 'z'; b2++) {
|
||||
for (byte b3 = 'a'; b3 < 'z'; b3++) {
|
||||
k[0] = b1;
|
||||
k[1] = b2;
|
||||
k[2] = b3;
|
||||
Put put = new Put(k);
|
||||
put.add(famAndQf[0], famAndQf[1], k);
|
||||
table.put(put);
|
||||
}
|
||||
}
|
||||
}
|
||||
table.flushCommits();
|
||||
// get the initial layout (should just be one region)
|
||||
Map<HRegionInfo,HServerAddress> m = table.getRegionsInfo();
|
||||
assertEquals(m.size(), 1);
|
||||
// tell the master to split the table
|
||||
admin.split(TABLE);
|
||||
// give some time for the split to happen
|
||||
try {
|
||||
Thread.sleep(15 * 1000);
|
||||
} catch (InterruptedException e) {
|
||||
LOG.warn(StringUtils.stringifyException(e));
|
||||
}
|
||||
// check again
|
||||
m = table.getRegionsInfo();
|
||||
// should have two regions now
|
||||
assertEquals(m.size(), 2);
|
||||
regionMap = m;
|
||||
LOG.info("regions: " + regionMap);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
client.shutdown();
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
private void checkTableList(TableListModel model) {
|
||||
boolean found = false;
|
||||
Iterator<TableModel> tables = model.getTables().iterator();
|
||||
assertTrue(tables.hasNext());
|
||||
while (tables.hasNext()) {
|
||||
TableModel table = tables.next();
|
||||
if (table.getName().equals(TABLE)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assertTrue(found);
|
||||
}
|
||||
|
||||
public void testTableListText() throws IOException {
|
||||
Response response = client.get("/", MIMETYPE_PLAIN);
|
||||
assertEquals(response.getCode(), 200);
|
||||
}
|
||||
|
||||
public void testTableListXML() throws IOException, JAXBException {
|
||||
Response response = client.get("/", MIMETYPE_XML);
|
||||
assertEquals(response.getCode(), 200);
|
||||
TableListModel model = (TableListModel)
|
||||
context.createUnmarshaller()
|
||||
.unmarshal(new ByteArrayInputStream(response.getBody()));
|
||||
checkTableList(model);
|
||||
}
|
||||
|
||||
public void testTableListJSON() throws IOException {
|
||||
Response response = client.get("/", MIMETYPE_JSON);
|
||||
assertEquals(response.getCode(), 200);
|
||||
}
|
||||
|
||||
public void testTableListPB() throws IOException, JAXBException {
|
||||
Response response = client.get("/", MIMETYPE_PROTOBUF);
|
||||
assertEquals(response.getCode(), 200);
|
||||
TableListModel model = new TableListModel();
|
||||
model.getObjectFromMessage(response.getBody());
|
||||
checkTableList(model);
|
||||
}
|
||||
|
||||
public void checkTableInfo(TableInfoModel model) {
|
||||
assertEquals(model.getName(), TABLE);
|
||||
Iterator<TableRegionModel> regions = model.getRegions().iterator();
|
||||
assertTrue(regions.hasNext());
|
||||
while (regions.hasNext()) {
|
||||
TableRegionModel region = regions.next();
|
||||
boolean found = false;
|
||||
for (Map.Entry<HRegionInfo,HServerAddress> e: regionMap.entrySet()) {
|
||||
HRegionInfo hri = e.getKey();
|
||||
if (hri.getRegionNameAsString().equals(region.getName())) {
|
||||
found = true;
|
||||
byte[] startKey = hri.getStartKey();
|
||||
byte[] endKey = hri.getEndKey();
|
||||
InetSocketAddress sa = e.getValue().getInetSocketAddress();
|
||||
String location = sa.getHostName() + ":" +
|
||||
Integer.valueOf(sa.getPort());
|
||||
assertEquals(hri.getRegionId(), region.getId());
|
||||
assertTrue(Bytes.equals(startKey, region.getStartKey()));
|
||||
assertTrue(Bytes.equals(endKey, region.getEndKey()));
|
||||
assertEquals(location, region.getLocation());
|
||||
break;
|
||||
}
|
||||
}
|
||||
assertTrue(found);
|
||||
}
|
||||
}
|
||||
|
||||
public void testTableInfoText() throws IOException {
|
||||
Response response = client.get("/" + TABLE + "/regions", MIMETYPE_PLAIN);
|
||||
assertEquals(response.getCode(), 200);
|
||||
}
|
||||
|
||||
public void testTableInfoXML() throws IOException, JAXBException {
|
||||
Response response = client.get("/" + TABLE + "/regions", MIMETYPE_XML);
|
||||
assertEquals(response.getCode(), 200);
|
||||
TableInfoModel model = (TableInfoModel)
|
||||
context.createUnmarshaller()
|
||||
.unmarshal(new ByteArrayInputStream(response.getBody()));
|
||||
checkTableInfo(model);
|
||||
}
|
||||
|
||||
public void testTableInfoJSON() throws IOException {
|
||||
Response response = client.get("/" + TABLE + "/regions", MIMETYPE_JSON);
|
||||
assertEquals(response.getCode(), 200);
|
||||
}
|
||||
|
||||
public void testTableInfoPB() throws IOException, JAXBException {
|
||||
Response response =
|
||||
client.get("/" + TABLE + "/regions", MIMETYPE_PROTOBUF);
|
||||
assertEquals(response.getCode(), 200);
|
||||
TableInfoModel model = new TableInfoModel();
|
||||
model.getObjectFromMessage(response.getBody());
|
||||
checkTableInfo(model);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.stargate.client.Client;
|
||||
import org.apache.hadoop.hbase.stargate.client.Cluster;
|
||||
import org.apache.hadoop.hbase.stargate.client.Response;
|
||||
import org.apache.hadoop.hbase.stargate.model.StorageClusterVersionModel;
|
||||
import org.apache.hadoop.hbase.stargate.model.VersionModel;
|
||||
|
||||
public class TestVersionResource extends MiniClusterTestCase {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TestVersionResource.class);
|
||||
|
||||
private Client client;
|
||||
private JAXBContext context;
|
||||
|
||||
public TestVersionResource() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(
|
||||
VersionModel.class,
|
||||
StorageClusterVersionModel.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
client = new Client(new Cluster().add("localhost", testServletPort));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
client.shutdown();
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
private static void validate(VersionModel model) {
|
||||
assertNotNull(model);
|
||||
assertNotNull(model.getStargateVersion());
|
||||
assertNotNull(model.getOsVersion());
|
||||
assertNotNull(model.getJvmVersion());
|
||||
assertNotNull(model.getServerVersion());
|
||||
assertNotNull(model.getJerseyVersion());
|
||||
}
|
||||
|
||||
public void testGetStargateVersionText() throws IOException {
|
||||
Response response = client.get(Constants.PATH_VERSION, MIMETYPE_PLAIN);
|
||||
assertTrue(response.getCode() == 200);
|
||||
}
|
||||
|
||||
public void testGetStargateVersionXML() throws IOException, JAXBException {
|
||||
Response response = client.get(Constants.PATH_VERSION, MIMETYPE_XML);
|
||||
assertTrue(response.getCode() == 200);
|
||||
VersionModel model = (VersionModel)
|
||||
context.createUnmarshaller().unmarshal(
|
||||
new ByteArrayInputStream(response.getBody()));
|
||||
validate(model);
|
||||
LOG.info("success retrieving Stargate version as XML");
|
||||
}
|
||||
|
||||
public void testGetStargateVersionJSON() throws IOException {
|
||||
Response response = client.get(Constants.PATH_VERSION, MIMETYPE_JSON);
|
||||
assertTrue(response.getCode() == 200);
|
||||
}
|
||||
|
||||
public void testGetStargateVersionPB() throws IOException {
|
||||
Response response = client.get(Constants.PATH_VERSION, MIMETYPE_PROTOBUF);
|
||||
assertTrue(response.getCode() == 200);
|
||||
VersionModel model = new VersionModel();
|
||||
model.getObjectFromMessage(response.getBody());
|
||||
validate(model);
|
||||
LOG.info("success retrieving Stargate version as protobuf");
|
||||
}
|
||||
|
||||
public void testGetStorageClusterVersionText() throws IOException {
|
||||
Response response =
|
||||
client.get(Constants.PATH_VERSION_CLUSTER, MIMETYPE_PLAIN);
|
||||
assertTrue(response.getCode() == 200);
|
||||
}
|
||||
|
||||
public void testGetStorageClusterVersionXML() throws IOException,
|
||||
JAXBException {
|
||||
Response response =
|
||||
client.get(Constants.PATH_VERSION_CLUSTER, MIMETYPE_XML);
|
||||
assertTrue(response.getCode() == 200);
|
||||
StorageClusterVersionModel clusterVersionModel =
|
||||
(StorageClusterVersionModel)
|
||||
context.createUnmarshaller().unmarshal(
|
||||
new ByteArrayInputStream(response.getBody()));
|
||||
assertNotNull(clusterVersionModel);
|
||||
assertNotNull(clusterVersionModel.getVersion());
|
||||
LOG.info("success retrieving storage cluster version as XML");
|
||||
}
|
||||
|
||||
public void testGetStorageClusterVersionJSON() throws IOException {
|
||||
Response response =
|
||||
client.get(Constants.PATH_VERSION_CLUSTER, MIMETYPE_JSON);
|
||||
assertTrue(response.getCode() == 200);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,104 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
import java.io.StringWriter;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Base64;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestCellModel extends TestCase {
|
||||
|
||||
private static final long TIMESTAMP = 1245219839331L;
|
||||
private static final byte[] COLUMN = Bytes.toBytes("testcolumn");
|
||||
private static final byte[] VALUE = Bytes.toBytes("testvalue");
|
||||
|
||||
private static final String AS_XML =
|
||||
"<Cell timestamp=\"1245219839331\"" +
|
||||
" column=\"dGVzdGNvbHVtbg==\">" +
|
||||
"dGVzdHZhbHVl</Cell>";
|
||||
|
||||
private static final String AS_PB =
|
||||
"Egp0ZXN0Y29sdW1uGOO6i+eeJCIJdGVzdHZhbHVl";
|
||||
|
||||
private JAXBContext context;
|
||||
|
||||
public TestCellModel() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(CellModel.class);
|
||||
}
|
||||
|
||||
private CellModel buildTestModel() {
|
||||
CellModel model = new CellModel();
|
||||
model.setColumn(COLUMN);
|
||||
model.setTimestamp(TIMESTAMP);
|
||||
model.setValue(VALUE);
|
||||
return model;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private String toXML(CellModel model) throws JAXBException {
|
||||
StringWriter writer = new StringWriter();
|
||||
context.createMarshaller().marshal(model, writer);
|
||||
return writer.toString();
|
||||
}
|
||||
|
||||
private CellModel fromXML(String xml) throws JAXBException {
|
||||
return (CellModel)
|
||||
context.createUnmarshaller().unmarshal(new StringReader(xml));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private byte[] toPB(CellModel model) {
|
||||
return model.createProtobufOutput();
|
||||
}
|
||||
|
||||
private CellModel fromPB(String pb) throws IOException {
|
||||
return (CellModel)
|
||||
new CellModel().getObjectFromMessage(Base64.decode(AS_PB));
|
||||
}
|
||||
|
||||
private void checkModel(CellModel model) {
|
||||
assertTrue(Bytes.equals(model.getColumn(), COLUMN));
|
||||
assertTrue(Bytes.equals(model.getValue(), VALUE));
|
||||
assertTrue(model.hasUserTimestamp());
|
||||
assertEquals(model.getTimestamp(), TIMESTAMP);
|
||||
}
|
||||
|
||||
public void testBuildModel() throws Exception {
|
||||
checkModel(buildTestModel());
|
||||
}
|
||||
|
||||
public void testFromXML() throws Exception {
|
||||
checkModel(fromXML(AS_XML));
|
||||
}
|
||||
|
||||
public void testFromPB() throws Exception {
|
||||
checkModel(fromPB(AS_PB));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,154 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
import java.io.StringWriter;
|
||||
import java.util.Iterator;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Base64;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestCellSetModel extends TestCase {
|
||||
|
||||
private static final byte[] ROW1 = Bytes.toBytes("testrow1");
|
||||
private static final byte[] COLUMN1 = Bytes.toBytes("testcolumn1");
|
||||
private static final byte[] VALUE1 = Bytes.toBytes("testvalue1");
|
||||
private static final long TIMESTAMP1 = 1245219839331L;
|
||||
private static final byte[] ROW2 = Bytes.toBytes("testrow1");
|
||||
private static final byte[] COLUMN2 = Bytes.toBytes("testcolumn2");
|
||||
private static final byte[] VALUE2 = Bytes.toBytes("testvalue2");
|
||||
private static final long TIMESTAMP2 = 1245239813319L;
|
||||
private static final byte[] COLUMN3 = Bytes.toBytes("testcolumn3");
|
||||
private static final byte[] VALUE3 = Bytes.toBytes("testvalue3");
|
||||
private static final long TIMESTAMP3 = 1245393318192L;
|
||||
|
||||
private static final String AS_XML =
|
||||
"<CellSet>" +
|
||||
"<Row key=\"dGVzdHJvdzE=\">" +
|
||||
"<Cell timestamp=\"1245219839331\" column=\"dGVzdGNvbHVtbjE=\">" +
|
||||
"dGVzdHZhbHVlMQ==</Cell>" +
|
||||
"</Row>" +
|
||||
"<Row key=\"dGVzdHJvdzE=\">" +
|
||||
"<Cell timestamp=\"1245239813319\" column=\"dGVzdGNvbHVtbjI=\">" +
|
||||
"dGVzdHZhbHVlMg==</Cell>" +
|
||||
"<Cell timestamp=\"1245393318192\" column=\"dGVzdGNvbHVtbjM=\">" +
|
||||
"dGVzdHZhbHVlMw==</Cell>" +
|
||||
"</Row>" +
|
||||
"</CellSet>";
|
||||
|
||||
private static final String AS_PB =
|
||||
"CiwKCHRlc3Ryb3cxEiASC3Rlc3Rjb2x1bW4xGOO6i+eeJCIKdGVzdHZhbHVlMQpOCgh0ZXN0cm93" +
|
||||
"MRIgEgt0ZXN0Y29sdW1uMhjHyc7wniQiCnRlc3R2YWx1ZTISIBILdGVzdGNvbHVtbjMYsOLnuZ8k" +
|
||||
"Igp0ZXN0dmFsdWUz";
|
||||
|
||||
private JAXBContext context;
|
||||
|
||||
public TestCellSetModel() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(
|
||||
CellModel.class,
|
||||
CellSetModel.class,
|
||||
RowModel.class);
|
||||
}
|
||||
|
||||
private CellSetModel buildTestModel() {
|
||||
CellSetModel model = new CellSetModel();
|
||||
RowModel row;
|
||||
row = new RowModel();
|
||||
row.setKey(ROW1);
|
||||
row.addCell(new CellModel(COLUMN1, TIMESTAMP1, VALUE1));
|
||||
model.addRow(row);
|
||||
row = new RowModel();
|
||||
row.setKey(ROW2);
|
||||
row.addCell(new CellModel(COLUMN2, TIMESTAMP2, VALUE2));
|
||||
row.addCell(new CellModel(COLUMN3, TIMESTAMP3, VALUE3));
|
||||
model.addRow(row);
|
||||
return model;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private String toXML(CellSetModel model) throws JAXBException {
|
||||
StringWriter writer = new StringWriter();
|
||||
context.createMarshaller().marshal(model, writer);
|
||||
return writer.toString();
|
||||
}
|
||||
|
||||
private CellSetModel fromXML(String xml) throws JAXBException {
|
||||
return (CellSetModel)
|
||||
context.createUnmarshaller().unmarshal(new StringReader(xml));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private byte[] toPB(CellSetModel model) {
|
||||
return model.createProtobufOutput();
|
||||
}
|
||||
|
||||
private CellSetModel fromPB(String pb) throws IOException {
|
||||
return (CellSetModel)
|
||||
new CellSetModel().getObjectFromMessage(Base64.decode(AS_PB));
|
||||
}
|
||||
|
||||
private void checkModel(CellSetModel model) {
|
||||
Iterator<RowModel> rows = model.getRows().iterator();
|
||||
RowModel row = rows.next();
|
||||
assertTrue(Bytes.equals(ROW1, row.getKey()));
|
||||
Iterator<CellModel> cells = row.getCells().iterator();
|
||||
CellModel cell = cells.next();
|
||||
assertTrue(Bytes.equals(COLUMN1, cell.getColumn()));
|
||||
assertTrue(Bytes.equals(VALUE1, cell.getValue()));
|
||||
assertTrue(cell.hasUserTimestamp());
|
||||
assertEquals(cell.getTimestamp(), TIMESTAMP1);
|
||||
assertFalse(cells.hasNext());
|
||||
row = rows.next();
|
||||
assertTrue(Bytes.equals(ROW2, row.getKey()));
|
||||
cells = row.getCells().iterator();
|
||||
cell = cells.next();
|
||||
assertTrue(Bytes.equals(COLUMN2, cell.getColumn()));
|
||||
assertTrue(Bytes.equals(VALUE2, cell.getValue()));
|
||||
assertTrue(cell.hasUserTimestamp());
|
||||
assertEquals(cell.getTimestamp(), TIMESTAMP2);
|
||||
cell = cells.next();
|
||||
assertTrue(Bytes.equals(COLUMN3, cell.getColumn()));
|
||||
assertTrue(Bytes.equals(VALUE3, cell.getValue()));
|
||||
assertTrue(cell.hasUserTimestamp());
|
||||
assertEquals(cell.getTimestamp(), TIMESTAMP3);
|
||||
assertFalse(cells.hasNext());
|
||||
}
|
||||
|
||||
public void testBuildModel() throws Exception {
|
||||
checkModel(buildTestModel());
|
||||
}
|
||||
|
||||
public void testFromXML() throws Exception {
|
||||
checkModel(fromXML(AS_XML));
|
||||
}
|
||||
|
||||
public void testFromPB() throws Exception {
|
||||
checkModel(fromPB(AS_PB));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.StringReader;
|
||||
import java.io.StringWriter;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestColumnSchemaModel extends TestCase {
|
||||
|
||||
protected static final String COLUMN_NAME = "testcolumn";
|
||||
protected static final boolean BLOCKCACHE = true;
|
||||
protected static final int BLOCKSIZE = 16384;
|
||||
protected static final boolean BLOOMFILTER = false;
|
||||
protected static final String COMPRESSION = "GZ";
|
||||
protected static final boolean IN_MEMORY = false;
|
||||
protected static final int TTL = 86400;
|
||||
protected static final int VERSIONS = 1;
|
||||
|
||||
protected static final String AS_XML =
|
||||
"<ColumnSchema name=\"testcolumn\"" +
|
||||
" BLOCKSIZE=\"16384\"" +
|
||||
" BLOOMFILTER=\"false\"" +
|
||||
" BLOCKCACHE=\"true\"" +
|
||||
" COMPRESSION=\"GZ\"" +
|
||||
" VERSIONS=\"1\"" +
|
||||
" TTL=\"86400\"" +
|
||||
" IN_MEMORY=\"false\"/>";
|
||||
|
||||
private JAXBContext context;
|
||||
|
||||
public TestColumnSchemaModel() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(ColumnSchemaModel.class);
|
||||
}
|
||||
|
||||
protected static ColumnSchemaModel buildTestModel() {
|
||||
ColumnSchemaModel model = new ColumnSchemaModel();
|
||||
model.setName(COLUMN_NAME);
|
||||
model.__setBlockcache(BLOCKCACHE);
|
||||
model.__setBlocksize(BLOCKSIZE);
|
||||
model.__setBloomfilter(BLOOMFILTER);
|
||||
model.__setCompression(COMPRESSION);
|
||||
model.__setInMemory(IN_MEMORY);
|
||||
model.__setTTL(TTL);
|
||||
model.__setVersions(VERSIONS);
|
||||
return model;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private String toXML(ColumnSchemaModel model) throws JAXBException {
|
||||
StringWriter writer = new StringWriter();
|
||||
context.createMarshaller().marshal(model, writer);
|
||||
return writer.toString();
|
||||
}
|
||||
|
||||
private ColumnSchemaModel fromXML(String xml) throws JAXBException {
|
||||
return (ColumnSchemaModel)
|
||||
context.createUnmarshaller().unmarshal(new StringReader(xml));
|
||||
}
|
||||
|
||||
protected static void checkModel(ColumnSchemaModel model) {
|
||||
assertEquals(model.getName(), COLUMN_NAME);
|
||||
assertEquals(model.__getBlockcache(), BLOCKCACHE);
|
||||
assertEquals(model.__getBlocksize(), BLOCKSIZE);
|
||||
assertEquals(model.__getBloomfilter(), BLOOMFILTER);
|
||||
assertTrue(model.__getCompression().equalsIgnoreCase(COMPRESSION));
|
||||
assertEquals(model.__getInMemory(), IN_MEMORY);
|
||||
assertEquals(model.__getTTL(), TTL);
|
||||
assertEquals(model.__getVersions(), VERSIONS);
|
||||
}
|
||||
|
||||
public void testBuildModel() throws Exception {
|
||||
checkModel(buildTestModel());
|
||||
}
|
||||
|
||||
public void testFromXML() throws Exception {
|
||||
checkModel(fromXML(AS_XML));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.StringReader;
|
||||
import java.io.StringWriter;
|
||||
import java.util.Iterator;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestRowModel extends TestCase {
|
||||
|
||||
private static final byte[] ROW1 = Bytes.toBytes("testrow1");
|
||||
private static final byte[] COLUMN1 = Bytes.toBytes("testcolumn1");
|
||||
private static final byte[] VALUE1 = Bytes.toBytes("testvalue1");
|
||||
private static final long TIMESTAMP1 = 1245219839331L;
|
||||
|
||||
private static final String AS_XML =
|
||||
"<Row key=\"dGVzdHJvdzE=\">" +
|
||||
"<Cell timestamp=\"1245219839331\" column=\"dGVzdGNvbHVtbjE=\">" +
|
||||
"dGVzdHZhbHVlMQ==</Cell>" +
|
||||
"</Row>";
|
||||
|
||||
private JAXBContext context;
|
||||
|
||||
public TestRowModel() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(
|
||||
CellModel.class,
|
||||
RowModel.class);
|
||||
}
|
||||
|
||||
private RowModel buildTestModel() {
|
||||
RowModel model = new RowModel();
|
||||
model.setKey(ROW1);
|
||||
model.addCell(new CellModel(COLUMN1, TIMESTAMP1, VALUE1));
|
||||
return model;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private String toXML(RowModel model) throws JAXBException {
|
||||
StringWriter writer = new StringWriter();
|
||||
context.createMarshaller().marshal(model, writer);
|
||||
return writer.toString();
|
||||
}
|
||||
|
||||
private RowModel fromXML(String xml) throws JAXBException {
|
||||
return (RowModel)
|
||||
context.createUnmarshaller().unmarshal(new StringReader(xml));
|
||||
}
|
||||
|
||||
private void checkModel(RowModel model) {
|
||||
assertTrue(Bytes.equals(ROW1, model.getKey()));
|
||||
Iterator<CellModel> cells = model.getCells().iterator();
|
||||
CellModel cell = cells.next();
|
||||
assertTrue(Bytes.equals(COLUMN1, cell.getColumn()));
|
||||
assertTrue(Bytes.equals(VALUE1, cell.getValue()));
|
||||
assertTrue(cell.hasUserTimestamp());
|
||||
assertEquals(cell.getTimestamp(), TIMESTAMP1);
|
||||
assertFalse(cells.hasNext());
|
||||
}
|
||||
|
||||
public void testBuildModel() throws Exception {
|
||||
checkModel(buildTestModel());
|
||||
}
|
||||
|
||||
public void testFromXML() throws Exception {
|
||||
checkModel(fromXML(AS_XML));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,126 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
import java.io.StringWriter;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Base64;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestScannerModel extends TestCase {
|
||||
private static final byte[] START_ROW = Bytes.toBytes("abracadabra");
|
||||
private static final byte[] END_ROW = Bytes.toBytes("zzyzx");
|
||||
private static final byte[] COLUMN1 = Bytes.toBytes("column1");
|
||||
private static final byte[] COLUMN2 = Bytes.toBytes("column2:foo");
|
||||
private static final long START_TIME = 1245219839331L;
|
||||
private static final long END_TIME = 1245393318192L;
|
||||
private static final int BATCH = 100;
|
||||
|
||||
private static final String AS_XML =
|
||||
"<Scanner startTime=\"1245219839331\"" +
|
||||
" startRow=\"YWJyYWNhZGFicmE=\"" +
|
||||
" endTime=\"1245393318192\"" +
|
||||
" endRow=\"enp5eng=\"" +
|
||||
" column=\"Y29sdW1uMQ== Y29sdW1uMjpmb28=\"" +
|
||||
" batch=\"100\"/>";
|
||||
|
||||
private static final String AS_PB =
|
||||
"CgthYnJhY2FkYWJyYRIFenp5engaB2NvbHVtbjEaC2NvbHVtbjI6Zm9vIGQo47qL554kMLDi57mf" +
|
||||
"JA==";
|
||||
|
||||
private JAXBContext context;
|
||||
|
||||
public TestScannerModel() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(ScannerModel.class);
|
||||
}
|
||||
|
||||
private ScannerModel buildTestModel() {
|
||||
ScannerModel model = new ScannerModel();
|
||||
model.setStartRow(START_ROW);
|
||||
model.setEndRow(END_ROW);
|
||||
model.addColumn(COLUMN1);
|
||||
model.addColumn(COLUMN2);
|
||||
model.setStartTime(START_TIME);
|
||||
model.setEndTime(END_TIME);
|
||||
model.setBatch(BATCH);
|
||||
return model;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private String toXML(ScannerModel model) throws JAXBException {
|
||||
StringWriter writer = new StringWriter();
|
||||
context.createMarshaller().marshal(model, writer);
|
||||
return writer.toString();
|
||||
}
|
||||
|
||||
private ScannerModel fromXML(String xml) throws JAXBException {
|
||||
return (ScannerModel)
|
||||
context.createUnmarshaller().unmarshal(new StringReader(xml));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private byte[] toPB(ScannerModel model) {
|
||||
return model.createProtobufOutput();
|
||||
}
|
||||
|
||||
private ScannerModel fromPB(String pb) throws IOException {
|
||||
return (ScannerModel)
|
||||
new ScannerModel().getObjectFromMessage(Base64.decode(AS_PB));
|
||||
}
|
||||
|
||||
private void checkModel(ScannerModel model) {
|
||||
assertTrue(Bytes.equals(model.getStartRow(), START_ROW));
|
||||
assertTrue(Bytes.equals(model.getEndRow(), END_ROW));
|
||||
boolean foundCol1 = false, foundCol2 = false;
|
||||
for (byte[] column: model.getColumns()) {
|
||||
if (Bytes.equals(column, COLUMN1)) {
|
||||
foundCol1 = true;
|
||||
} else if (Bytes.equals(column, COLUMN2)) {
|
||||
foundCol2 = true;
|
||||
}
|
||||
}
|
||||
assertTrue(foundCol1);
|
||||
assertTrue(foundCol2);
|
||||
assertEquals(model.getStartTime(), START_TIME);
|
||||
assertEquals(model.getEndTime(), END_TIME);
|
||||
assertEquals(model.getBatch(), BATCH);
|
||||
}
|
||||
|
||||
public void testBuildModel() throws Exception {
|
||||
checkModel(buildTestModel());
|
||||
}
|
||||
|
||||
public void testFromXML() throws Exception {
|
||||
checkModel(fromXML(AS_XML));
|
||||
}
|
||||
|
||||
public void testFromPB() throws Exception {
|
||||
checkModel(fromPB(AS_PB));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
import java.io.StringWriter;
|
||||
import java.util.Iterator;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Base64;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestStorageClusterStatusModel extends TestCase {
|
||||
|
||||
private static final String AS_XML =
|
||||
"<ClusterStatus requests=\"0\" regions=\"2\" averageLoad=\"1.0\">" +
|
||||
"<LiveNodes>" +
|
||||
"<Node startCode=\"1245219839331\" requests=\"0\" name=\"test1\">" +
|
||||
"<Region name=\"LVJPT1QtLCww\"/>" +
|
||||
"</Node>" +
|
||||
"<Node startCode=\"1245239331198\" requests=\"0\" name=\"test2\">" +
|
||||
"<Region name=\"Lk1FVEEuLCwxMjQ2MDAwMDQzNzI0\"/>" +
|
||||
"</Node>" +
|
||||
"</LiveNodes>" +
|
||||
"<DeadNodes/>" +
|
||||
"</ClusterStatus>";
|
||||
|
||||
private static final String AS_PB =
|
||||
"ChsKBXRlc3QxEAAaCS1ST09ULSwsMCDjuovnniQKJwoFdGVzdDIQABoVLk1FVEEuLCwxMjQ2MDAw" +
|
||||
"MDQzNzI0IP6SsfCeJBgCIAApAAAAAAAA8D8=";
|
||||
|
||||
private JAXBContext context;
|
||||
|
||||
public TestStorageClusterStatusModel() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(StorageClusterStatusModel.class);
|
||||
}
|
||||
|
||||
private StorageClusterStatusModel buildTestModel() {
|
||||
StorageClusterStatusModel model = new StorageClusterStatusModel();
|
||||
model.setRegions(2);
|
||||
model.setRequests(0);
|
||||
model.setAverageLoad(1.0);
|
||||
model.addLiveNode("test1", 1245219839331L)
|
||||
.addRegion(Bytes.toBytes("-ROOT-,,0"));
|
||||
model.addLiveNode("test2", 1245239331198L)
|
||||
.addRegion(Bytes.toBytes(".META.,,1246000043724"));
|
||||
return model;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private String toXML(StorageClusterStatusModel model) throws JAXBException {
|
||||
StringWriter writer = new StringWriter();
|
||||
context.createMarshaller().marshal(model, writer);
|
||||
return writer.toString();
|
||||
}
|
||||
|
||||
private StorageClusterStatusModel fromXML(String xml) throws JAXBException {
|
||||
return (StorageClusterStatusModel)
|
||||
context.createUnmarshaller().unmarshal(new StringReader(xml));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private byte[] toPB(StorageClusterStatusModel model) {
|
||||
return model.createProtobufOutput();
|
||||
}
|
||||
|
||||
private StorageClusterStatusModel fromPB(String pb) throws IOException {
|
||||
return (StorageClusterStatusModel)
|
||||
new StorageClusterStatusModel().getObjectFromMessage(Base64.decode(AS_PB));
|
||||
}
|
||||
|
||||
private void checkModel(StorageClusterStatusModel model) {
|
||||
assertEquals(model.getRegions(), 2);
|
||||
assertEquals(model.getRequests(), 0);
|
||||
assertEquals(model.getAverageLoad(), 1.0);
|
||||
Iterator<StorageClusterStatusModel.Node> nodes =
|
||||
model.getLiveNodes().iterator();
|
||||
StorageClusterStatusModel.Node node = nodes.next();
|
||||
assertEquals(node.getName(), "test1");
|
||||
assertEquals(node.getStartCode(), 1245219839331L);
|
||||
Iterator<StorageClusterStatusModel.Node.Region> regions =
|
||||
node.getRegions().iterator();
|
||||
StorageClusterStatusModel.Node.Region region = regions.next();
|
||||
assertTrue(Bytes.toString(region.getName()).equals("-ROOT-,,0"));
|
||||
assertFalse(regions.hasNext());
|
||||
node = nodes.next();
|
||||
assertEquals(node.getName(), "test2");
|
||||
assertEquals(node.getStartCode(), 1245239331198L);
|
||||
regions = node.getRegions().iterator();
|
||||
region = regions.next();
|
||||
assertEquals(Bytes.toString(region.getName()), ".META.,,1246000043724");
|
||||
assertFalse(regions.hasNext());
|
||||
assertFalse(nodes.hasNext());
|
||||
}
|
||||
|
||||
public void testBuildModel() throws Exception {
|
||||
checkModel(buildTestModel());
|
||||
}
|
||||
|
||||
public void testFromXML() throws Exception {
|
||||
checkModel(fromXML(AS_XML));
|
||||
}
|
||||
|
||||
public void testFromPB() throws Exception {
|
||||
checkModel(fromPB(AS_PB));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.StringReader;
|
||||
import java.io.StringWriter;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestStorageClusterVersionModel extends TestCase {
|
||||
private static final String VERSION = "0.0.1-testing";
|
||||
|
||||
private static final String AS_XML =
|
||||
"<ClusterVersion>" + VERSION + "</ClusterVersion>";
|
||||
|
||||
private JAXBContext context;
|
||||
|
||||
public TestStorageClusterVersionModel() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(StorageClusterVersionModel.class);
|
||||
}
|
||||
|
||||
private StorageClusterVersionModel buildTestModel() {
|
||||
StorageClusterVersionModel model = new StorageClusterVersionModel();
|
||||
model.setVersion(VERSION);
|
||||
return model;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private String toXML(StorageClusterVersionModel model) throws JAXBException {
|
||||
StringWriter writer = new StringWriter();
|
||||
context.createMarshaller().marshal(model, writer);
|
||||
return writer.toString();
|
||||
}
|
||||
|
||||
private StorageClusterVersionModel fromXML(String xml) throws JAXBException {
|
||||
return (StorageClusterVersionModel)
|
||||
context.createUnmarshaller().unmarshal(new StringReader(xml));
|
||||
}
|
||||
|
||||
private void checkModel(StorageClusterVersionModel model) {
|
||||
assertEquals(model.getVersion(), VERSION);
|
||||
}
|
||||
|
||||
public void testBuildModel() throws Exception {
|
||||
checkModel(buildTestModel());
|
||||
}
|
||||
|
||||
public void testFromXML() throws Exception {
|
||||
checkModel(fromXML(AS_XML));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
import java.io.StringWriter;
|
||||
import java.util.Iterator;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Base64;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestTableInfoModel extends TestCase {
|
||||
private static final String TABLE = "testtable";
|
||||
private static final byte[] START_KEY = Bytes.toBytes("abracadbra");
|
||||
private static final byte[] END_KEY = Bytes.toBytes("zzyzx");
|
||||
private static final long ID = 8731042424L;
|
||||
private static final String LOCATION = "testhost:9876";
|
||||
|
||||
private static final String AS_XML =
|
||||
"<TableInfo name=\"testtable\">" +
|
||||
"<Region location=\"testhost:9876\"" +
|
||||
" endKey=\"enp5eng=\"" +
|
||||
" startKey=\"YWJyYWNhZGJyYQ==\"" +
|
||||
" id=\"8731042424\"" +
|
||||
" name=\"testtable,abracadbra,8731042424\"/>" +
|
||||
"</TableInfo>";
|
||||
|
||||
private static final String AS_PB =
|
||||
"Cgl0ZXN0dGFibGUSSQofdGVzdHRhYmxlLGFicmFjYWRicmEsODczMTA0MjQyNBIKYWJyYWNhZGJy" +
|
||||
"YRoFenp5engg+MSkwyAqDXRlc3Rob3N0Ojk4NzY=";
|
||||
|
||||
private JAXBContext context;
|
||||
|
||||
public TestTableInfoModel() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(
|
||||
TableInfoModel.class,
|
||||
TableRegionModel.class);
|
||||
}
|
||||
|
||||
private TableInfoModel buildTestModel() {
|
||||
TableInfoModel model = new TableInfoModel();
|
||||
model.setName(TABLE);
|
||||
model.add(new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION));
|
||||
return model;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private String toXML(TableInfoModel model) throws JAXBException {
|
||||
StringWriter writer = new StringWriter();
|
||||
context.createMarshaller().marshal(model, writer);
|
||||
return writer.toString();
|
||||
}
|
||||
|
||||
private TableInfoModel fromXML(String xml) throws JAXBException {
|
||||
return (TableInfoModel)
|
||||
context.createUnmarshaller().unmarshal(new StringReader(xml));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private byte[] toPB(TableInfoModel model) {
|
||||
return model.createProtobufOutput();
|
||||
}
|
||||
|
||||
private TableInfoModel fromPB(String pb) throws IOException {
|
||||
return (TableInfoModel)
|
||||
new TableInfoModel().getObjectFromMessage(Base64.decode(AS_PB));
|
||||
}
|
||||
|
||||
private void checkModel(TableInfoModel model) {
|
||||
assertEquals(model.getName(), TABLE);
|
||||
Iterator<TableRegionModel> regions = model.getRegions().iterator();
|
||||
TableRegionModel region = regions.next();
|
||||
assertTrue(Bytes.equals(region.getStartKey(), START_KEY));
|
||||
assertTrue(Bytes.equals(region.getEndKey(), END_KEY));
|
||||
assertEquals(region.getId(), ID);
|
||||
assertEquals(region.getLocation(), LOCATION);
|
||||
assertFalse(regions.hasNext());
|
||||
}
|
||||
|
||||
public void testBuildModel() throws Exception {
|
||||
checkModel(buildTestModel());
|
||||
}
|
||||
|
||||
public void testFromXML() throws Exception {
|
||||
checkModel(fromXML(AS_XML));
|
||||
}
|
||||
|
||||
public void testFromPB() throws Exception {
|
||||
checkModel(fromPB(AS_PB));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
import java.io.StringWriter;
|
||||
import java.util.Iterator;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Base64;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestTableListModel extends TestCase {
|
||||
private static final String TABLE1 = "table1";
|
||||
private static final String TABLE2 = "table2";
|
||||
private static final String TABLE3 = "table3";
|
||||
|
||||
private static final String AS_XML =
|
||||
"<TableList><table name=\"table1\"/><table name=\"table2\"/>" +
|
||||
"<table name=\"table3\"/></TableList>";
|
||||
|
||||
private static final String AS_PB = "CgZ0YWJsZTEKBnRhYmxlMgoGdGFibGUz";
|
||||
|
||||
private JAXBContext context;
|
||||
|
||||
public TestTableListModel() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(
|
||||
TableListModel.class,
|
||||
TableModel.class);
|
||||
}
|
||||
|
||||
private TableListModel buildTestModel() {
|
||||
TableListModel model = new TableListModel();
|
||||
model.add(new TableModel(TABLE1));
|
||||
model.add(new TableModel(TABLE2));
|
||||
model.add(new TableModel(TABLE3));
|
||||
return model;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private String toXML(TableListModel model) throws JAXBException {
|
||||
StringWriter writer = new StringWriter();
|
||||
context.createMarshaller().marshal(model, writer);
|
||||
return writer.toString();
|
||||
}
|
||||
|
||||
private TableListModel fromXML(String xml) throws JAXBException {
|
||||
return (TableListModel)
|
||||
context.createUnmarshaller().unmarshal(new StringReader(xml));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private byte[] toPB(TableListModel model) {
|
||||
return model.createProtobufOutput();
|
||||
}
|
||||
|
||||
private TableListModel fromPB(String pb) throws IOException {
|
||||
return (TableListModel)
|
||||
new TableListModel().getObjectFromMessage(Base64.decode(AS_PB));
|
||||
}
|
||||
|
||||
private void checkModel(TableListModel model) {
|
||||
Iterator<TableModel> tables = model.getTables().iterator();
|
||||
TableModel table = tables.next();
|
||||
assertEquals(table.getName(), TABLE1);
|
||||
table = tables.next();
|
||||
assertEquals(table.getName(), TABLE2);
|
||||
table = tables.next();
|
||||
assertEquals(table.getName(), TABLE3);
|
||||
assertFalse(tables.hasNext());
|
||||
}
|
||||
|
||||
public void testBuildModel() throws Exception {
|
||||
checkModel(buildTestModel());
|
||||
}
|
||||
|
||||
public void testFromXML() throws Exception {
|
||||
checkModel(fromXML(AS_XML));
|
||||
}
|
||||
|
||||
public void testFromPB() throws Exception {
|
||||
checkModel(fromPB(AS_PB));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.StringReader;
|
||||
import java.io.StringWriter;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestTableRegionModel extends TestCase {
|
||||
private static final String TABLE = "testtable";
|
||||
private static final byte[] START_KEY = Bytes.toBytes("abracadbra");
|
||||
private static final byte[] END_KEY = Bytes.toBytes("zzyzx");
|
||||
private static final long ID = 8731042424L;
|
||||
private static final String LOCATION = "testhost:9876";
|
||||
|
||||
private static final String AS_XML =
|
||||
"<Region location=\"testhost:9876\"" +
|
||||
" endKey=\"enp5eng=\"" +
|
||||
" startKey=\"YWJyYWNhZGJyYQ==\"" +
|
||||
" id=\"8731042424\"" +
|
||||
" name=\"testtable,abracadbra,8731042424\"/>";
|
||||
|
||||
private JAXBContext context;
|
||||
|
||||
public TestTableRegionModel() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(TableRegionModel.class);
|
||||
}
|
||||
|
||||
private TableRegionModel buildTestModel() {
|
||||
TableRegionModel model =
|
||||
new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION);
|
||||
return model;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private String toXML(TableRegionModel model) throws JAXBException {
|
||||
StringWriter writer = new StringWriter();
|
||||
context.createMarshaller().marshal(model, writer);
|
||||
return writer.toString();
|
||||
}
|
||||
|
||||
private TableRegionModel fromXML(String xml) throws JAXBException {
|
||||
return (TableRegionModel)
|
||||
context.createUnmarshaller().unmarshal(new StringReader(xml));
|
||||
}
|
||||
|
||||
private void checkModel(TableRegionModel model) {
|
||||
assertTrue(Bytes.equals(model.getStartKey(), START_KEY));
|
||||
assertTrue(Bytes.equals(model.getEndKey(), END_KEY));
|
||||
assertEquals(model.getId(), ID);
|
||||
assertEquals(model.getLocation(), LOCATION);
|
||||
assertEquals(model.getName(),
|
||||
TABLE + "," + Bytes.toString(START_KEY) + "," + Long.toString(ID));
|
||||
}
|
||||
|
||||
public void testBuildModel() throws Exception {
|
||||
checkModel(buildTestModel());
|
||||
}
|
||||
|
||||
public void testFromXML() throws Exception {
|
||||
checkModel(fromXML(AS_XML));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
import java.io.StringWriter;
|
||||
import java.util.Iterator;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Base64;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestTableSchemaModel extends TestCase {
|
||||
|
||||
public static final String TABLE_NAME = "testTable";
|
||||
private static final boolean IN_MEMORY = false;
|
||||
private static final boolean IS_META = false;
|
||||
private static final boolean IS_ROOT = false;
|
||||
private static final boolean READONLY = false;
|
||||
|
||||
private static final String AS_XML =
|
||||
"<TableSchema name=\"testTable\"" +
|
||||
" IS_META=\"false\"" +
|
||||
" IS_ROOT=\"false\"" +
|
||||
" READONLY=\"false\"" +
|
||||
" IN_MEMORY=\"false\">" +
|
||||
TestColumnSchemaModel.AS_XML +
|
||||
"</TableSchema>";
|
||||
|
||||
private static final String AS_PB =
|
||||
"Cgl0ZXN0VGFibGUSEAoHSVNfTUVUQRIFZmFsc2USEAoHSVNfUk9PVBIFZmFsc2USEQoIUkVBRE9O" +
|
||||
"TFkSBWZhbHNlEhIKCUlOX01FTU9SWRIFZmFsc2UamAEKCnRlc3Rjb2x1bW4SEgoJQkxPQ0tTSVpF" +
|
||||
"EgUxNjM4NBIUCgtCTE9PTUZJTFRFUhIFZmFsc2USEgoKQkxPQ0tDQUNIRRIEdHJ1ZRIRCgtDT01Q" +
|
||||
"UkVTU0lPThICZ3oSDQoIVkVSU0lPTlMSATESDAoDVFRMEgU4NjQwMBISCglJTl9NRU1PUlkSBWZh" +
|
||||
"bHNlGICjBSABKgJneiAAKAA=";
|
||||
|
||||
private JAXBContext context;
|
||||
|
||||
public TestTableSchemaModel() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(
|
||||
ColumnSchemaModel.class,
|
||||
TableSchemaModel.class);
|
||||
}
|
||||
|
||||
public static TableSchemaModel buildTestModel() {
|
||||
return buildTestModel(TABLE_NAME);
|
||||
}
|
||||
|
||||
public static TableSchemaModel buildTestModel(String name) {
|
||||
TableSchemaModel model = new TableSchemaModel();
|
||||
model.setName(name);
|
||||
model.__setInMemory(IN_MEMORY);
|
||||
model.__setIsMeta(IS_META);
|
||||
model.__setIsRoot(IS_ROOT);
|
||||
model.__setReadOnly(READONLY);
|
||||
model.addColumnFamily(TestColumnSchemaModel.buildTestModel());
|
||||
return model;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private String toXML(TableSchemaModel model) throws JAXBException {
|
||||
StringWriter writer = new StringWriter();
|
||||
context.createMarshaller().marshal(model, writer);
|
||||
return writer.toString();
|
||||
}
|
||||
|
||||
private TableSchemaModel fromXML(String xml) throws JAXBException {
|
||||
return (TableSchemaModel)
|
||||
context.createUnmarshaller().unmarshal(new StringReader(xml));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private byte[] toPB(TableSchemaModel model) {
|
||||
return model.createProtobufOutput();
|
||||
}
|
||||
|
||||
private TableSchemaModel fromPB(String pb) throws IOException {
|
||||
return (TableSchemaModel)
|
||||
new TableSchemaModel().getObjectFromMessage(Base64.decode(AS_PB));
|
||||
}
|
||||
|
||||
public static void checkModel(TableSchemaModel model) {
|
||||
checkModel(model, TABLE_NAME);
|
||||
}
|
||||
|
||||
public static void checkModel(TableSchemaModel model, String tableName) {
|
||||
assertEquals(model.getName(), tableName);
|
||||
assertEquals(model.__getInMemory(), IN_MEMORY);
|
||||
assertEquals(model.__getIsMeta(), IS_META);
|
||||
assertEquals(model.__getIsRoot(), IS_ROOT);
|
||||
assertEquals(model.__getReadOnly(), READONLY);
|
||||
Iterator<ColumnSchemaModel> families = model.getColumns().iterator();
|
||||
assertTrue(families.hasNext());
|
||||
ColumnSchemaModel family = families.next();
|
||||
TestColumnSchemaModel.checkModel(family);
|
||||
assertFalse(families.hasNext());
|
||||
}
|
||||
|
||||
public void testBuildModel() throws Exception {
|
||||
checkModel(buildTestModel());
|
||||
}
|
||||
|
||||
public void testFromXML() throws Exception {
|
||||
checkModel(fromXML(AS_XML));
|
||||
}
|
||||
|
||||
public void testFromPB() throws Exception {
|
||||
checkModel(fromPB(AS_PB));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.stargate.model;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
import java.io.StringWriter;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Base64;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestVersionModel extends TestCase {
|
||||
private static final String STARGATE_VERSION = "0.0.1";
|
||||
private static final String OS_VERSION =
|
||||
"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64";
|
||||
private static final String JVM_VERSION =
|
||||
"Sun Microsystems Inc. 1.6.0_13-11.3-b02";
|
||||
private static final String JETTY_VERSION = "6.1.14";
|
||||
private static final String JERSEY_VERSION = "1.1.0-ea";
|
||||
|
||||
private static final String AS_XML =
|
||||
"<Version Stargate=\"" + STARGATE_VERSION + "\"" +
|
||||
" OS=\"" + OS_VERSION + "\"" +
|
||||
" JVM=\"" + JVM_VERSION + "\"" +
|
||||
" Server=\"" + JETTY_VERSION + "\"" +
|
||||
" Jersey=\"" + JERSEY_VERSION + "\"/>";
|
||||
|
||||
private static final String AS_PB =
|
||||
"CgUwLjAuMRInU3VuIE1pY3Jvc3lzdGVtcyBJbmMuIDEuNi4wXzEzLTExLjMtYjAyGi1MaW51eCAy" +
|
||||
"LjYuMTgtMTI4LjEuNi5lbDUuY2VudG9zLnBsdXN4ZW4gYW1kNjQiBjYuMS4xNCoIMS4xLjAtZWE=";
|
||||
|
||||
private JAXBContext context;
|
||||
|
||||
public TestVersionModel() throws JAXBException {
|
||||
super();
|
||||
context = JAXBContext.newInstance(VersionModel.class);
|
||||
}
|
||||
|
||||
private VersionModel buildTestModel() {
|
||||
VersionModel model = new VersionModel();
|
||||
model.setStargateVersion(STARGATE_VERSION);
|
||||
model.setOsVersion(OS_VERSION);
|
||||
model.setJvmVersion(JVM_VERSION);
|
||||
model.setServerVersion(JETTY_VERSION);
|
||||
model.setJerseyVersion(JERSEY_VERSION);
|
||||
return model;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private String toXML(VersionModel model) throws JAXBException {
|
||||
StringWriter writer = new StringWriter();
|
||||
context.createMarshaller().marshal(model, writer);
|
||||
return writer.toString();
|
||||
}
|
||||
|
||||
private VersionModel fromXML(String xml) throws JAXBException {
|
||||
return (VersionModel)
|
||||
context.createUnmarshaller().unmarshal(new StringReader(xml));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private byte[] toPB(VersionModel model) {
|
||||
return model.createProtobufOutput();
|
||||
}
|
||||
|
||||
private VersionModel fromPB(String pb) throws IOException {
|
||||
return (VersionModel)
|
||||
new VersionModel().getObjectFromMessage(Base64.decode(AS_PB));
|
||||
}
|
||||
|
||||
private void checkModel(VersionModel model) {
|
||||
assertEquals(model.getStargateVersion(), STARGATE_VERSION);
|
||||
assertEquals(model.getOsVersion(), OS_VERSION);
|
||||
assertEquals(model.getJvmVersion(), JVM_VERSION);
|
||||
assertEquals(model.getServerVersion(), JETTY_VERSION);
|
||||
assertEquals(model.getJerseyVersion(), JERSEY_VERSION);
|
||||
}
|
||||
|
||||
public void testBuildModel() throws Exception {
|
||||
checkModel(buildTestModel());
|
||||
}
|
||||
|
||||
public void testFromXML() throws Exception {
|
||||
checkModel(fromXML(AS_XML));
|
||||
}
|
||||
|
||||
public void testFromPB() throws Exception {
|
||||
checkModel(fromPB(AS_PB));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
# The number of milliseconds of each tick
|
||||
tickTime=2000
|
||||
# The number of ticks that the initial
|
||||
# synchronization phase can take
|
||||
initLimit=10
|
||||
# The number of ticks that can pass between
|
||||
# sending a request and getting an acknowledgement
|
||||
syncLimit=5
|
||||
# the directory where the snapshot is stored.
|
||||
dataDir=${hbase.tmp.dir}/zookeeper
|
||||
# the port at which the clients will connect
|
||||
clientPort=21810
|
||||
|
||||
server.0=localhost:2888:3888
|
Loading…
Reference in New Issue