HBASE-6553 Remove Avro Gateway

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1374486 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2012-08-18 00:40:43 +00:00
parent b7f7c4ac09
commit 28e176de82
6 changed files with 0 additions and 2003 deletions

View File

@ -1,609 +0,0 @@
{
"protocol" : "HBase",
"namespace" : "org.apache.hadoop.hbase.avro.generated",
"types" : [ {
"type" : "record",
"name" : "AServerAddress",
"fields" : [ {
"name" : "hostname",
"type" : "string"
}, {
"name" : "inetSocketAddress",
"type" : "string"
}, {
"name" : "port",
"type" : "int"
} ]
}, {
"type" : "record",
"name" : "ARegionLoad",
"fields" : [ {
"name" : "memStoreSizeMB",
"type" : "int"
}, {
"name" : "name",
"type" : "bytes"
}, {
"name" : "storefileIndexSizeMB",
"type" : "int"
}, {
"name" : "storefiles",
"type" : "int"
}, {
"name" : "storefileSizeMB",
"type" : "int"
}, {
"name" : "stores",
"type" : "int"
} ]
}, {
"type" : "record",
"name" : "AServerLoad",
"fields" : [ {
"name" : "load",
"type" : "int"
}, {
"name" : "maxHeapMB",
"type" : "int"
}, {
"name" : "memStoreSizeInMB",
"type" : "int"
}, {
"name" : "numberOfRegions",
"type" : "int"
}, {
"name" : "numberOfRequests",
"type" : "int"
}, {
"name" : "regionsLoad",
"type" : {
"type" : "array",
"items" : "ARegionLoad"
}
}, {
"name" : "storefileIndexSizeInMB",
"type" : "int"
}, {
"name" : "storefiles",
"type" : "int"
}, {
"name" : "storefileSizeInMB",
"type" : "int"
}, {
"name" : "usedHeapMB",
"type" : "int"
} ]
}, {
"type" : "record",
"name" : "AServerInfo",
"fields" : [ {
"name" : "infoPort",
"type" : "int"
}, {
"name" : "load",
"type" : "AServerLoad"
}, {
"name" : "serverAddress",
"type" : "AServerAddress"
}, {
"name" : "serverName",
"type" : "string"
}, {
"name" : "startCode",
"type" : "long"
} ]
}, {
"type" : "record",
"name" : "AClusterStatus",
"fields" : [ {
"name" : "averageLoad",
"type" : "double"
}, {
"name" : "deadServerNames",
"type" : {
"type" : "array",
"items" : "string"
}
}, {
"name" : "deadServers",
"type" : "int"
}, {
"name" : "hbaseVersion",
"type" : "string"
}, {
"name" : "regionsCount",
"type" : "int"
}, {
"name" : "requestsCount",
"type" : "int"
}, {
"name" : "serverInfos",
"type" : {
"type" : "array",
"items" : "AServerInfo"
}
}, {
"name" : "servers",
"type" : "int"
} ]
}, {
"type" : "enum",
"name" : "ACompressionAlgorithm",
"symbols" : [ "LZO", "GZ", "NONE" ]
}, {
"type" : "record",
"name" : "AFamilyDescriptor",
"fields" : [ {
"name" : "name",
"type" : "bytes"
}, {
"name" : "compression",
"type" : [ "ACompressionAlgorithm", "null" ]
}, {
"name" : "maxVersions",
"type" : [ "int", "null" ]
}, {
"name" : "blocksize",
"type" : [ "int", "null" ]
}, {
"name" : "inMemory",
"type" : [ "boolean", "null" ]
}, {
"name" : "timeToLive",
"type" : [ "int", "null" ]
}, {
"name" : "blockCacheEnabled",
"type" : [ "boolean", "null" ]
} ]
}, {
"type" : "record",
"name" : "ATableDescriptor",
"fields" : [ {
"name" : "name",
"type" : "bytes"
}, {
"name" : "families",
"type" : [ {
"type" : "array",
"items" : "AFamilyDescriptor"
}, "null" ]
}, {
"name" : "maxFileSize",
"type" : [ "long", "null" ]
}, {
"name" : "memStoreFlushSize",
"type" : [ "long", "null" ]
}, {
"name" : "rootRegion",
"type" : [ "boolean", "null" ]
}, {
"name" : "metaRegion",
"type" : [ "boolean", "null" ]
}, {
"name" : "metaTable",
"type" : [ "boolean", "null" ]
}, {
"name" : "readOnly",
"type" : [ "boolean", "null" ]
}, {
"name" : "deferredLogFlush",
"type" : [ "boolean", "null" ]
} ]
}, {
"type" : "record",
"name" : "AColumn",
"fields" : [ {
"name" : "family",
"type" : "bytes"
}, {
"name" : "qualifier",
"type" : [ "bytes", "null" ]
} ]
}, {
"type" : "record",
"name" : "ATimeRange",
"fields" : [ {
"name" : "minStamp",
"type" : "long"
}, {
"name" : "maxStamp",
"type" : "long"
} ]
}, {
"type" : "record",
"name" : "AGet",
"fields" : [ {
"name" : "row",
"type" : "bytes"
}, {
"name" : "columns",
"type" : [ {
"type" : "array",
"items" : "AColumn"
}, "null" ]
}, {
"name" : "timestamp",
"type" : [ "long", "null" ]
}, {
"name" : "timerange",
"type" : [ "ATimeRange", "null" ]
}, {
"name" : "maxVersions",
"type" : [ "int", "null" ]
} ]
}, {
"type" : "record",
"name" : "AResultEntry",
"fields" : [ {
"name" : "family",
"type" : "bytes"
}, {
"name" : "qualifier",
"type" : "bytes"
}, {
"name" : "value",
"type" : "bytes"
}, {
"name" : "timestamp",
"type" : "long"
} ]
}, {
"type" : "record",
"name" : "AResult",
"fields" : [ {
"name" : "row",
"type" : "bytes"
}, {
"name" : "entries",
"type" : {
"type" : "array",
"items" : "AResultEntry"
}
} ]
}, {
"type" : "record",
"name" : "AColumnValue",
"fields" : [ {
"name" : "family",
"type" : "bytes"
}, {
"name" : "qualifier",
"type" : "bytes"
}, {
"name" : "value",
"type" : "bytes"
}, {
"name" : "timestamp",
"type" : [ "long", "null" ]
} ]
}, {
"type" : "record",
"name" : "APut",
"fields" : [ {
"name" : "row",
"type" : "bytes"
}, {
"name" : "columnValues",
"type" : {
"type" : "array",
"items" : "AColumnValue"
}
} ]
}, {
"type" : "record",
"name" : "ADelete",
"fields" : [ {
"name" : "row",
"type" : "bytes"
}, {
"name" : "columns",
"type" : [ {
"type" : "array",
"items" : "AColumn"
}, "null" ]
} ]
}, {
"type" : "record",
"name" : "AScan",
"fields" : [ {
"name" : "startRow",
"type" : [ "bytes", "null" ]
}, {
"name" : "stopRow",
"type" : [ "bytes", "null" ]
}, {
"name" : "columns",
"type" : [ {
"type" : "array",
"items" : "AColumn"
}, "null" ]
}, {
"name" : "timestamp",
"type" : [ "long", "null" ]
}, {
"name" : "timerange",
"type" : [ "ATimeRange", "null" ]
}, {
"name" : "maxVersions",
"type" : [ "int", "null" ]
} ]
}, {
"type" : "error",
"name" : "AIOError",
"fields" : [ {
"name" : "message",
"type" : "string"
} ]
}, {
"type" : "error",
"name" : "AIllegalArgument",
"fields" : [ {
"name" : "message",
"type" : "string"
} ]
}, {
"type" : "error",
"name" : "ATableExists",
"fields" : [ {
"name" : "message",
"type" : "string"
} ]
}, {
"type" : "error",
"name" : "AMasterNotRunning",
"fields" : [ {
"name" : "message",
"type" : "string"
} ]
} ],
"messages" : {
"getHBaseVersion" : {
"request" : [ ],
"response" : "string",
"errors" : [ "AIOError" ]
},
"getClusterStatus" : {
"request" : [ ],
"response" : "AClusterStatus",
"errors" : [ "AIOError" ]
},
"listTables" : {
"request" : [ ],
"response" : {
"type" : "array",
"items" : "ATableDescriptor"
},
"errors" : [ "AIOError" ]
},
"describeTable" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
} ],
"response" : "ATableDescriptor",
"errors" : [ "AIOError" ]
},
"isTableEnabled" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
} ],
"response" : "boolean",
"errors" : [ "AIOError" ]
},
"tableExists" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
} ],
"response" : "boolean",
"errors" : [ "AIOError" ]
},
"describeFamily" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
}, {
"name" : "family",
"type" : "bytes"
} ],
"response" : "AFamilyDescriptor",
"errors" : [ "AIOError" ]
},
"createTable" : {
"request" : [ {
"name" : "table",
"type" : "ATableDescriptor"
} ],
"response" : "null",
"errors" : [ "AIOError", "AIllegalArgument", "ATableExists", "AMasterNotRunning" ]
},
"deleteTable" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
} ],
"response" : "null",
"errors" : [ "AIOError" ]
},
"modifyTable" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
}, {
"name" : "tableDescriptor",
"type" : "ATableDescriptor"
} ],
"response" : "null",
"errors" : [ "AIOError" ]
},
"enableTable" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
} ],
"response" : "null",
"errors" : [ "AIOError" ]
},
"disableTable" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
} ],
"response" : "null",
"errors" : [ "AIOError" ]
},
"flush" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
} ],
"response" : "null",
"errors" : [ "AIOError" ]
},
"split" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
} ],
"response" : "null",
"errors" : [ "AIOError" ]
},
"addFamily" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
}, {
"name" : "family",
"type" : "AFamilyDescriptor"
} ],
"response" : "null",
"errors" : [ "AIOError" ]
},
"deleteFamily" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
}, {
"name" : "family",
"type" : "bytes"
} ],
"response" : "null",
"errors" : [ "AIOError" ]
},
"modifyFamily" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
}, {
"name" : "familyName",
"type" : "bytes"
}, {
"name" : "familyDescriptor",
"type" : "AFamilyDescriptor"
} ],
"response" : "null",
"errors" : [ "AIOError" ]
},
"get" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
}, {
"name" : "get",
"type" : "AGet"
} ],
"response" : "AResult",
"errors" : [ "AIOError" ]
},
"exists" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
}, {
"name" : "get",
"type" : "AGet"
} ],
"response" : "boolean",
"errors" : [ "AIOError" ]
},
"put" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
}, {
"name" : "put",
"type" : "APut"
} ],
"response" : "null",
"errors" : [ "AIOError" ]
},
"delete" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
}, {
"name" : "delete",
"type" : "ADelete"
} ],
"response" : "null",
"errors" : [ "AIOError" ]
},
"incrementColumnValue" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
}, {
"name" : "row",
"type" : "bytes"
}, {
"name" : "family",
"type" : "bytes"
}, {
"name" : "qualifier",
"type" : "bytes"
}, {
"name" : "amount",
"type" : "long"
}, {
"name" : "writeToWAL",
"type" : "boolean"
} ],
"response" : "long",
"errors" : [ "AIOError" ]
},
"scannerOpen" : {
"request" : [ {
"name" : "table",
"type" : "bytes"
}, {
"name" : "scan",
"type" : "AScan"
} ],
"response" : "int",
"errors" : [ "AIOError" ]
},
"scannerClose" : {
"request" : [ {
"name" : "scannerId",
"type" : "int"
} ],
"response" : "null",
"errors" : [ "AIOError", "AIllegalArgument" ]
},
"scannerGetRows" : {
"request" : [ {
"name" : "scannerId",
"type" : "int"
}, {
"name" : "numberOfRows",
"type" : "int"
} ],
"response" : {
"type" : "array",
"items" : "AResult"
},
"errors" : [ "AIOError", "AIllegalArgument" ]
}
}
}

View File

@ -1,618 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.avro;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.HashMap;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.generic.GenericData;
import org.apache.avro.ipc.HttpServer;
import org.apache.avro.ipc.specific.SpecificResponder;
import org.apache.avro.util.Utf8;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.avro.generated.AClusterStatus;
import org.apache.hadoop.hbase.avro.generated.ADelete;
import org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor;
import org.apache.hadoop.hbase.avro.generated.AGet;
import org.apache.hadoop.hbase.avro.generated.AIOError;
import org.apache.hadoop.hbase.avro.generated.AIllegalArgument;
import org.apache.hadoop.hbase.avro.generated.AMasterNotRunning;
import org.apache.hadoop.hbase.avro.generated.APut;
import org.apache.hadoop.hbase.avro.generated.AResult;
import org.apache.hadoop.hbase.avro.generated.AScan;
import org.apache.hadoop.hbase.avro.generated.ATableDescriptor;
import org.apache.hadoop.hbase.avro.generated.ATableExists;
import org.apache.hadoop.hbase.avro.generated.HBase;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.HTablePool;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Start an Avro server
*/
@Deprecated
@InterfaceAudience.Private
public class AvroServer {
/**
* The HBaseImpl is a glue object that connects Avro RPC calls to the
* HBase client API primarily defined in the HBaseAdmin and HTable objects.
*/
public static class HBaseImpl implements HBase {
//
// PROPERTIES
//
protected Configuration conf = null;
protected HBaseAdmin admin = null;
protected HTablePool htablePool = null;
protected final Log LOG = LogFactory.getLog(this.getClass().getName());
// nextScannerId and scannerMap are used to manage scanner state
protected int nextScannerId = 0;
protected HashMap<Integer, ResultScanner> scannerMap = null;
//
// UTILITY METHODS
//
/**
* Assigns a unique ID to the scanner and adds the mapping to an internal
* hash-map.
*
* @param scanner
* @return integer scanner id
*/
protected synchronized int addScanner(ResultScanner scanner) {
int id = nextScannerId++;
scannerMap.put(id, scanner);
return id;
}
/**
* Returns the scanner associated with the specified ID.
*
* @param id
* @return a Scanner, or null if ID was invalid.
*/
protected synchronized ResultScanner getScanner(int id) {
return scannerMap.get(id);
}
/**
* Removes the scanner associated with the specified ID from the internal
* id->scanner hash-map.
*
* @param id
* @return a Scanner, or null if ID was invalid.
*/
protected synchronized ResultScanner removeScanner(int id) {
return scannerMap.remove(id);
}
//
// CTOR METHODS
//
// TODO(hammer): figure out appropriate setting of maxSize for htablePool
/**
* Constructs an HBaseImpl object.
* @throws IOException
*/
HBaseImpl() throws IOException {
this(HBaseConfiguration.create());
}
HBaseImpl(final Configuration c) throws IOException {
conf = c;
admin = new HBaseAdmin(conf);
htablePool = new HTablePool(conf, 10);
scannerMap = new HashMap<Integer, ResultScanner>();
}
//
// SERVICE METHODS
//
// TODO(hammer): Investigate use of the Command design pattern
//
// Cluster metadata
//
public Utf8 getHBaseVersion() throws AIOError {
try {
return new Utf8(admin.getClusterStatus().getHBaseVersion());
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
public AClusterStatus getClusterStatus() throws AIOError {
try {
return AvroUtil.csToACS(admin.getClusterStatus());
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
public GenericArray<ATableDescriptor> listTables() throws AIOError {
try {
HTableDescriptor[] tables = admin.listTables();
Schema atdSchema = Schema.createArray(ATableDescriptor.SCHEMA$);
GenericData.Array<ATableDescriptor> result = null;
result = new GenericData.Array<ATableDescriptor>(tables.length, atdSchema);
for (HTableDescriptor table : tables) {
result.add(AvroUtil.htdToATD(table));
}
return result;
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
//
// Table metadata
//
// TODO(hammer): Handle the case where the table does not exist explicitly?
public ATableDescriptor describeTable(ByteBuffer table) throws AIOError {
try {
return AvroUtil.htdToATD(admin.getTableDescriptor(Bytes.toBytes(table)));
} catch (TableNotFoundException e) {
return null;
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
public boolean isTableEnabled(ByteBuffer table) throws AIOError {
try {
return admin.isTableEnabled(Bytes.toBytes(table));
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
public boolean tableExists(ByteBuffer table) throws AIOError {
try {
return admin.tableExists(Bytes.toBytes(table));
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
//
// Family metadata
//
// TODO(hammer): Handle the case where the family does not exist explicitly?
public AFamilyDescriptor describeFamily(ByteBuffer table, ByteBuffer family) throws AIOError {
try {
HTableDescriptor htd = admin.getTableDescriptor(Bytes.toBytes(table));
return AvroUtil.hcdToAFD(htd.getFamily(Bytes.toBytes(family)));
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
//
// Table admin
//
public Void createTable(ATableDescriptor table) throws AIOError,
AIllegalArgument,
ATableExists,
AMasterNotRunning {
try {
admin.createTable(AvroUtil.atdToHTD(table));
return null;
} catch (IllegalArgumentException e) {
AIllegalArgument iae = new AIllegalArgument();
iae.message = new Utf8(e.getMessage());
throw iae;
} catch (TableExistsException e) {
ATableExists tee = new ATableExists();
tee.message = new Utf8(e.getMessage());
throw tee;
} catch (MasterNotRunningException e) {
AMasterNotRunning mnre = new AMasterNotRunning();
mnre.message = new Utf8(e.getMessage());
throw mnre;
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
// Note that disable, flush and major compaction of .META. needed in client
// TODO(hammer): more selective cache dirtying than flush?
public Void deleteTable(ByteBuffer table) throws AIOError {
try {
admin.deleteTable(Bytes.toBytes(table));
return null;
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
// NB: Asynchronous operation
public Void modifyTable(ByteBuffer tableName, ATableDescriptor tableDescriptor) throws AIOError {
try {
admin.modifyTable(Bytes.toBytes(tableName),
AvroUtil.atdToHTD(tableDescriptor));
return null;
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
public Void enableTable(ByteBuffer table) throws AIOError {
try {
admin.enableTable(Bytes.toBytes(table));
return null;
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
public Void disableTable(ByteBuffer table) throws AIOError {
try {
admin.disableTable(Bytes.toBytes(table));
return null;
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
// NB: Asynchronous operation
public Void flush(ByteBuffer table) throws AIOError {
try {
admin.flush(Bytes.toBytes(table));
return null;
} catch (InterruptedException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
// NB: Asynchronous operation
public Void split(ByteBuffer table) throws AIOError {
try {
admin.split(Bytes.toBytes(table));
return null;
} catch (InterruptedException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
//
// Family admin
//
public Void addFamily(ByteBuffer table, AFamilyDescriptor family) throws AIOError {
try {
admin.addColumn(Bytes.toBytes(table),
AvroUtil.afdToHCD(family));
return null;
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
// NB: Asynchronous operation
public Void deleteFamily(ByteBuffer table, ByteBuffer family) throws AIOError {
try {
admin.deleteColumn(Bytes.toBytes(table), Bytes.toBytes(family));
return null;
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
// NB: Asynchronous operation
public Void modifyFamily(ByteBuffer table, ByteBuffer familyName, AFamilyDescriptor familyDescriptor) throws AIOError {
try {
admin.modifyColumn(Bytes.toBytes(table), AvroUtil.afdToHCD(familyDescriptor));
return null;
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
//
// Single-row DML
//
// TODO(hammer): Java with statement for htablepool concision?
// TODO(hammer): Can Get have timestamp and timerange simultaneously?
// TODO(hammer): Do I need to catch the RuntimeException of getTable?
// TODO(hammer): Handle gets with no results
// TODO(hammer): Uses exists(Get) to ensure columns exist
public AResult get(ByteBuffer table, AGet aget) throws AIOError {
HTableInterface htable = htablePool.getTable(Bytes.toBytes(table));
try {
return AvroUtil.resultToAResult(htable.get(AvroUtil.agetToGet(aget)));
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
} finally {
try {
htablePool.putTable(htable);
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
}
public boolean exists(ByteBuffer table, AGet aget) throws AIOError {
HTableInterface htable = htablePool.getTable(Bytes.toBytes(table));
try {
return htable.exists(AvroUtil.agetToGet(aget));
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
} finally {
try {
htablePool.putTable(htable);
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
}
public Void put(ByteBuffer table, APut aput) throws AIOError {
HTableInterface htable = htablePool.getTable(Bytes.toBytes(table));
try {
htable.put(AvroUtil.aputToPut(aput));
return null;
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
} finally {
try {
htablePool.putTable(htable);
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
}
public Void delete(ByteBuffer table, ADelete adelete) throws AIOError {
HTableInterface htable = htablePool.getTable(Bytes.toBytes(table));
try {
htable.delete(AvroUtil.adeleteToDelete(adelete));
return null;
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
} finally {
try {
htablePool.putTable(htable);
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
}
public long incrementColumnValue(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, long amount, boolean writeToWAL) throws AIOError {
HTableInterface htable = htablePool.getTable(Bytes.toBytes(table));
try {
return htable.incrementColumnValue(Bytes.toBytes(row), Bytes.toBytes(family), Bytes.toBytes(qualifier), amount, writeToWAL);
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
} finally {
try {
htablePool.putTable(htable);
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
}
//
// Multi-row DML
//
public int scannerOpen(ByteBuffer table, AScan ascan) throws AIOError {
HTableInterface htable = htablePool.getTable(Bytes.toBytes(table));
try {
Scan scan = AvroUtil.ascanToScan(ascan);
return addScanner(htable.getScanner(scan));
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
} finally {
try {
htablePool.putTable(htable);
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
}
public Void scannerClose(int scannerId) throws AIOError, AIllegalArgument {
try {
ResultScanner scanner = getScanner(scannerId);
if (scanner == null) {
AIllegalArgument aie = new AIllegalArgument();
aie.message = new Utf8("scanner ID is invalid: " + scannerId);
throw aie;
}
scanner.close();
removeScanner(scannerId);
return null;
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
public GenericArray<AResult> scannerGetRows(int scannerId, int numberOfRows) throws AIOError, AIllegalArgument {
try {
ResultScanner scanner = getScanner(scannerId);
if (scanner == null) {
AIllegalArgument aie = new AIllegalArgument();
aie.message = new Utf8("scanner ID is invalid: " + scannerId);
throw aie;
}
return AvroUtil.resultsToAResults(scanner.next(numberOfRows));
} catch (IOException e) {
AIOError ioe = new AIOError();
ioe.message = new Utf8(e.getMessage());
throw ioe;
}
}
}
//
// MAIN PROGRAM
//
private static void printUsageAndExit() {
printUsageAndExit(null);
}
private static void printUsageAndExit(final String message) {
if (message != null) {
System.err.println(message);
}
System.out.println("Usage: java org.apache.hadoop.hbase.avro.AvroServer " +
"--help | [--port=PORT] start");
System.out.println("Arguments:");
System.out.println(" start Start Avro server");
System.out.println(" stop Stop Avro server");
System.out.println("Options:");
System.out.println(" port Port to listen on. Default: 9090");
System.out.println(" help Print this message and exit");
System.exit(0);
}
protected static void doMain(final String[] args) throws Exception {
if (args.length < 1) {
printUsageAndExit();
}
int port = 9090;
final String portArgKey = "--port=";
for (String cmd: args) {
if (cmd.startsWith(portArgKey)) {
port = Integer.parseInt(cmd.substring(portArgKey.length()));
continue;
} else if (cmd.equals("--help") || cmd.equals("-h")) {
printUsageAndExit();
} else if (cmd.equals("start")) {
continue;
} else if (cmd.equals("stop")) {
printUsageAndExit("To shutdown the Avro server run " +
"bin/hbase-daemon.sh stop avro or send a kill signal to " +
"the Avro server pid");
}
// Print out usage if we get to here.
printUsageAndExit();
}
Log LOG = LogFactory.getLog("AvroServer");
LOG.info("starting HBase Avro server on port " + Integer.toString(port));
SpecificResponder r = new SpecificResponder(HBase.class, new HBaseImpl());
HttpServer server = new HttpServer(r, port);
server.start();
server.join();
}
// TODO(hammer): Look at Cassandra's daemonization and integration with JSVC
// TODO(hammer): Don't eat it after a single exception
// TODO(hammer): Figure out why we do doMain()
// TODO(hammer): Figure out if we want String[] or String [] syntax
public static void main(String[] args) throws Exception {
doMain(args);
}
}

View File

@ -1,417 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.avro;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.List;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.generic.GenericData;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.avro.generated.AClusterStatus;
import org.apache.hadoop.hbase.avro.generated.AColumn;
import org.apache.hadoop.hbase.avro.generated.AColumnValue;
import org.apache.hadoop.hbase.avro.generated.ACompressionAlgorithm;
import org.apache.hadoop.hbase.avro.generated.ADelete;
import org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor;
import org.apache.hadoop.hbase.avro.generated.AGet;
import org.apache.hadoop.hbase.avro.generated.AIllegalArgument;
import org.apache.hadoop.hbase.avro.generated.APut;
import org.apache.hadoop.hbase.avro.generated.ARegionLoad;
import org.apache.hadoop.hbase.avro.generated.AResult;
import org.apache.hadoop.hbase.avro.generated.AResultEntry;
import org.apache.hadoop.hbase.avro.generated.AScan;
import org.apache.hadoop.hbase.avro.generated.AServerAddress;
import org.apache.hadoop.hbase.avro.generated.AServerInfo;
import org.apache.hadoop.hbase.avro.generated.AServerLoad;
import org.apache.hadoop.hbase.avro.generated.ATableDescriptor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.util.Bytes;
@Deprecated
@InterfaceAudience.Private
public class AvroUtil {
//
// Cluster metadata
//
static public AServerAddress hsaToASA(HServerAddress hsa) throws IOException {
AServerAddress asa = new AServerAddress();
asa.hostname = new Utf8(hsa.getHostname());
asa.inetSocketAddress = new Utf8(hsa.getInetSocketAddress().toString());
asa.port = hsa.getPort();
return asa;
}
static public ARegionLoad hrlToARL(RegionLoad rl) throws IOException {
ARegionLoad arl = new ARegionLoad();
arl.memStoreSizeMB = rl.getMemStoreSizeMB();
arl.name = ByteBuffer.wrap(rl.getName());
arl.storefileIndexSizeMB = rl.getStorefileIndexSizeMB();
arl.storefiles = rl.getStorefiles();
arl.storefileSizeMB = rl.getStorefileSizeMB();
arl.stores = rl.getStores();
return arl;
}
static public AServerLoad hslToASL(ServerLoad sl) throws IOException {
AServerLoad asl = new AServerLoad();
asl.load = sl.getLoad();
asl.maxHeapMB = sl.getMaxHeapMB();
asl.memStoreSizeInMB = sl.getMemstoreSizeInMB();
asl.numberOfRegions = sl.getNumberOfRegions();
asl.numberOfRequests = sl.getNumberOfRequests();
Collection<RegionLoad> regionLoads = sl.getRegionsLoad().values();
Schema s = Schema.createArray(ARegionLoad.SCHEMA$);
GenericData.Array<ARegionLoad> aregionLoads = null;
if (regionLoads != null) {
aregionLoads = new GenericData.Array<ARegionLoad>(regionLoads.size(), s);
for (RegionLoad rl : regionLoads) {
aregionLoads.add(hrlToARL(rl));
}
} else {
aregionLoads = new GenericData.Array<ARegionLoad>(0, s);
}
asl.regionsLoad = aregionLoads;
asl.storefileIndexSizeInMB = sl.getStorefileIndexSizeInMB();
asl.storefiles = sl.getStorefiles();
asl.storefileSizeInMB = sl.getStorefileSizeInMB();
asl.usedHeapMB = sl.getUsedHeapMB();
return asl;
}
static public AServerInfo hsiToASI(ServerName sn, ServerLoad sl) throws IOException {
AServerInfo asi = new AServerInfo();
asi.infoPort = -1;
asi.load = hslToASL(sl);
asi.serverAddress = hsaToASA(new HServerAddress(sn.getHostname(), sn.getPort()));
asi.serverName = new Utf8(sn.toString());
asi.startCode = sn.getStartcode();
return asi;
}
static public AClusterStatus csToACS(ClusterStatus cs) throws IOException {
AClusterStatus acs = new AClusterStatus();
acs.averageLoad = cs.getAverageLoad();
Collection<ServerName> deadServerNames = cs.getDeadServerNames();
Schema stringArraySchema = Schema.createArray(Schema.create(Schema.Type.STRING));
GenericData.Array<CharSequence> adeadServerNames = null;
if (deadServerNames != null) {
adeadServerNames = new GenericData.Array<CharSequence>(deadServerNames.size(), stringArraySchema);
for (ServerName deadServerName : deadServerNames) {
adeadServerNames.add(new Utf8(deadServerName.toString()));
}
} else {
adeadServerNames = new GenericData.Array<CharSequence>(0, stringArraySchema);
}
acs.deadServerNames = adeadServerNames;
acs.deadServers = cs.getDeadServers();
acs.hbaseVersion = new Utf8(cs.getHBaseVersion());
acs.regionsCount = cs.getRegionsCount();
acs.requestsCount = cs.getRequestsCount();
Collection<ServerName> hserverInfos = cs.getServers();
Schema s = Schema.createArray(AServerInfo.SCHEMA$);
GenericData.Array<AServerInfo> aserverInfos = null;
if (hserverInfos != null) {
aserverInfos = new GenericData.Array<AServerInfo>(hserverInfos.size(), s);
for (ServerName hsi : hserverInfos) {
aserverInfos.add(hsiToASI(hsi, cs.getLoad(hsi)));
}
} else {
aserverInfos = new GenericData.Array<AServerInfo>(0, s);
}
acs.serverInfos = aserverInfos;
acs.servers = cs.getServers().size();
return acs;
}
//
// Table metadata
//
static public ATableDescriptor htdToATD(HTableDescriptor table) throws IOException {
ATableDescriptor atd = new ATableDescriptor();
atd.name = ByteBuffer.wrap(table.getName());
Collection<HColumnDescriptor> families = table.getFamilies();
Schema afdSchema = Schema.createArray(AFamilyDescriptor.SCHEMA$);
GenericData.Array<AFamilyDescriptor> afamilies = null;
if (families.size() > 0) {
afamilies = new GenericData.Array<AFamilyDescriptor>(families.size(), afdSchema);
for (HColumnDescriptor hcd : families) {
AFamilyDescriptor afamily = hcdToAFD(hcd);
afamilies.add(afamily);
}
} else {
afamilies = new GenericData.Array<AFamilyDescriptor>(0, afdSchema);
}
atd.families = afamilies;
atd.maxFileSize = table.getMaxFileSize();
atd.memStoreFlushSize = table.getMemStoreFlushSize();
atd.rootRegion = table.isRootRegion();
atd.metaRegion = table.isMetaRegion();
atd.metaTable = table.isMetaTable();
atd.readOnly = table.isReadOnly();
atd.deferredLogFlush = table.isDeferredLogFlush();
return atd;
}
static public HTableDescriptor atdToHTD(ATableDescriptor atd) throws IOException, AIllegalArgument {
HTableDescriptor htd = new HTableDescriptor(Bytes.toBytes(atd.name));
if (atd.families != null && atd.families.size() > 0) {
for (AFamilyDescriptor afd : atd.families) {
htd.addFamily(afdToHCD(afd));
}
}
if (atd.maxFileSize != null) {
htd.setMaxFileSize(atd.maxFileSize);
}
if (atd.memStoreFlushSize != null) {
htd.setMemStoreFlushSize(atd.memStoreFlushSize);
}
if (atd.readOnly != null) {
htd.setReadOnly(atd.readOnly);
}
if (atd.deferredLogFlush != null) {
htd.setDeferredLogFlush(atd.deferredLogFlush);
}
if (atd.rootRegion != null || atd.metaRegion != null || atd.metaTable != null) {
AIllegalArgument aie = new AIllegalArgument();
aie.message = new Utf8("Can't set root or meta flag on create table.");
throw aie;
}
return htd;
}
//
// Family metadata
//
static public AFamilyDescriptor hcdToAFD(HColumnDescriptor hcd) throws IOException {
AFamilyDescriptor afamily = new AFamilyDescriptor();
afamily.name = ByteBuffer.wrap(hcd.getName());
String compressionAlgorithm = hcd.getCompressionType().getName();
if (compressionAlgorithm == "LZO") {
afamily.compression = ACompressionAlgorithm.LZO;
} else if (compressionAlgorithm == "GZ") {
afamily.compression = ACompressionAlgorithm.GZ;
} else {
afamily.compression = ACompressionAlgorithm.NONE;
}
afamily.maxVersions = hcd.getMaxVersions();
afamily.blocksize = hcd.getBlocksize();
afamily.inMemory = hcd.isInMemory();
afamily.timeToLive = hcd.getTimeToLive();
afamily.blockCacheEnabled = hcd.isBlockCacheEnabled();
return afamily;
}
static public HColumnDescriptor afdToHCD(AFamilyDescriptor afd) throws IOException {
HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(afd.name));
ACompressionAlgorithm compressionAlgorithm = afd.compression;
if (compressionAlgorithm == ACompressionAlgorithm.LZO) {
hcd.setCompressionType(Compression.Algorithm.LZO);
} else if (compressionAlgorithm == ACompressionAlgorithm.GZ) {
hcd.setCompressionType(Compression.Algorithm.GZ);
} else {
hcd.setCompressionType(Compression.Algorithm.NONE);
}
if (afd.maxVersions != null) {
hcd.setMaxVersions(afd.maxVersions);
}
if (afd.blocksize != null) {
hcd.setBlocksize(afd.blocksize);
}
if (afd.inMemory != null) {
hcd.setInMemory(afd.inMemory);
}
if (afd.timeToLive != null) {
hcd.setTimeToLive(afd.timeToLive);
}
if (afd.blockCacheEnabled != null) {
hcd.setBlockCacheEnabled(afd.blockCacheEnabled);
}
return hcd;
}
//
// Single-Row DML (Get)
//
// TODO(hammer): More concise idiom than if not null assign?
static public Get agetToGet(AGet aget) throws IOException {
Get get = new Get(Bytes.toBytes(aget.row));
if (aget.columns != null) {
for (AColumn acolumn : aget.columns) {
if (acolumn.qualifier != null) {
get.addColumn(Bytes.toBytes(acolumn.family), Bytes.toBytes(acolumn.qualifier));
} else {
get.addFamily(Bytes.toBytes(acolumn.family));
}
}
}
if (aget.timestamp != null) {
get.setTimeStamp(aget.timestamp);
}
if (aget.timerange != null) {
get.setTimeRange(aget.timerange.minStamp, aget.timerange.maxStamp);
}
if (aget.maxVersions != null) {
get.setMaxVersions(aget.maxVersions);
}
return get;
}
// TODO(hammer): Pick one: Timestamp or TimeStamp
static public AResult resultToAResult(Result result) {
AResult aresult = new AResult();
byte[] row = result.getRow();
aresult.row = ByteBuffer.wrap(row != null ? row : new byte[1]);
Schema s = Schema.createArray(AResultEntry.SCHEMA$);
GenericData.Array<AResultEntry> entries = null;
List<KeyValue> resultKeyValues = result.list();
if (resultKeyValues != null && resultKeyValues.size() > 0) {
entries = new GenericData.Array<AResultEntry>(resultKeyValues.size(), s);
for (KeyValue resultKeyValue : resultKeyValues) {
AResultEntry entry = new AResultEntry();
entry.family = ByteBuffer.wrap(resultKeyValue.getFamily());
entry.qualifier = ByteBuffer.wrap(resultKeyValue.getQualifier());
entry.value = ByteBuffer.wrap(resultKeyValue.getValue());
entry.timestamp = resultKeyValue.getTimestamp();
entries.add(entry);
}
} else {
entries = new GenericData.Array<AResultEntry>(0, s);
}
aresult.entries = entries;
return aresult;
}
//
// Single-Row DML (Put)
//
static public Put aputToPut(APut aput) throws IOException {
Put put = new Put(Bytes.toBytes(aput.row));
for (AColumnValue acv : aput.columnValues) {
if (acv.timestamp != null) {
put.add(Bytes.toBytes(acv.family),
Bytes.toBytes(acv.qualifier),
acv.timestamp,
Bytes.toBytes(acv.value));
} else {
put.add(Bytes.toBytes(acv.family),
Bytes.toBytes(acv.qualifier),
Bytes.toBytes(acv.value));
}
}
return put;
}
//
// Single-Row DML (Delete)
//
static public Delete adeleteToDelete(ADelete adelete) throws IOException {
Delete delete = new Delete(Bytes.toBytes(adelete.row));
if (adelete.columns != null) {
for (AColumn acolumn : adelete.columns) {
if (acolumn.qualifier != null) {
delete.deleteColumns(Bytes.toBytes(acolumn.family), Bytes.toBytes(acolumn.qualifier));
} else {
delete.deleteFamily(Bytes.toBytes(acolumn.family));
}
}
}
return delete;
}
//
// Multi-row DML (Scan)
//
static public Scan ascanToScan(AScan ascan) throws IOException {
Scan scan = new Scan();
if (ascan.startRow != null) {
scan.setStartRow(Bytes.toBytes(ascan.startRow));
}
if (ascan.stopRow != null) {
scan.setStopRow(Bytes.toBytes(ascan.stopRow));
}
if (ascan.columns != null) {
for (AColumn acolumn : ascan.columns) {
if (acolumn.qualifier != null) {
scan.addColumn(Bytes.toBytes(acolumn.family), Bytes.toBytes(acolumn.qualifier));
} else {
scan.addFamily(Bytes.toBytes(acolumn.family));
}
}
}
if (ascan.timestamp != null) {
scan.setTimeStamp(ascan.timestamp);
}
if (ascan.timerange != null) {
scan.setTimeRange(ascan.timerange.minStamp, ascan.timerange.maxStamp);
}
if (ascan.maxVersions != null) {
scan.setMaxVersions(ascan.maxVersions);
}
return scan;
}
// TODO(hammer): Better to return null or empty array?
static public GenericArray<AResult> resultsToAResults(Result[] results) {
Schema s = Schema.createArray(AResult.SCHEMA$);
GenericData.Array<AResult> aresults = null;
if (results != null && results.length > 0) {
aresults = new GenericData.Array<AResult>(results.length, s);
for (Result result : results) {
aresults.add(resultToAResult(result));
}
} else {
aresults = new GenericData.Array<AResult>(0, s);
}
return aresults;
}
}

View File

@ -1,72 +0,0 @@
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<html>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<head />
<body bgcolor="white">
Provides an HBase <a href="http://avro.apache.org">Avro</a> service.
This directory contains an Avro interface definition file for an HBase RPC
service and a Java server implementation.
This server has been Deprecated.
<h2><a name="whatisavro">What is Avro?</a></h2>
<p>Avro is a data serialization and RPC system. For more, see the
<a href="http://avro.apache.org/docs/current/spec.html">current specification</a>.
</p>
<h2><a name="description">Description</a></h2>
<p>The <a href="generated/HBase.html">HBase API</a> is defined in the
file hbase.genavro. A server-side implementation of the API is in
<code>org.apache.hadoop.hbase.avro.AvroServer</code>. The generated interfaces,
types, and RPC utility files are checked into SVN under the
<code>org.apache.hadoop.hbase.avro.generated</code> directory.
</p>
<p>The files were generated by running the commands:
<pre>
java -jar avro-tools-1.4.1.jar idl hbase.avdl hbase.avpr
java -jar avro-tools-1.4.1.jar compile protocol hbase.avpr $HBASE_HOME/src/main/java
</pre>
</p>
<p>The 'avro-tools-x.y.z.jar' jarfile is an Avro utility, and it is
distributed as a part of the Avro package. Additionally, specific
language runtime libraries are apart of the Avro package. A version of the
Java runtime is listed as a dendency in Maven.
</p>
<p>To start AvroServer, use:
<pre>
./bin/hbase avro start [--port=PORT]
</pre>
The default port is 9090.
</p>
<p>To stop, use:
<pre>
./bin/hbase-daemon.sh stop avro
</pre>
</p>
</body>
</html>

View File

@ -1,239 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.avro;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.nio.ByteBuffer;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.generic.GenericData;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.avro.generated.AColumn;
import org.apache.hadoop.hbase.avro.generated.AColumnValue;
import org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor;
import org.apache.hadoop.hbase.avro.generated.AGet;
import org.apache.hadoop.hbase.avro.generated.APut;
import org.apache.hadoop.hbase.avro.generated.ATableDescriptor;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Unit testing for AvroServer.HBaseImpl, a part of the
* org.apache.hadoop.hbase.avro package.
*/
@Category(MediumTests.class)
public class TestAvroServer {
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
// Static names for tables, columns, rows, and values
// TODO(hammer): Better style to define these in test method?
private static ByteBuffer tableAname = ByteBuffer.wrap(Bytes.toBytes("tableA"));
private static ByteBuffer tableBname = ByteBuffer.wrap(Bytes.toBytes("tableB"));
private static ByteBuffer familyAname = ByteBuffer.wrap(Bytes.toBytes("FamilyA"));
private static ByteBuffer qualifierAname = ByteBuffer.wrap(Bytes.toBytes("QualifierA"));
private static ByteBuffer rowAname = ByteBuffer.wrap(Bytes.toBytes("RowA"));
private static ByteBuffer valueA = ByteBuffer.wrap(Bytes.toBytes("ValueA"));
/**
* @throws java.lang.Exception
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster();
}
/**
* @throws java.lang.Exception
*/
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
/**
* @throws java.lang.Exception
*/
@Before
public void setUp() throws Exception {
// Nothing to do.
}
/**
* @throws java.lang.Exception
*/
@After
public void tearDown() throws Exception {
// Nothing to do.
}
/**
* Tests for creating, enabling, disabling, modifying, and deleting tables.
*
* @throws Exception
*/
@Test (timeout=300000)
public void testTableAdminAndMetadata() throws Exception {
AvroServer.HBaseImpl impl =
new AvroServer.HBaseImpl(TEST_UTIL.getConfiguration());
assertEquals(impl.listTables().size(), 0);
ATableDescriptor tableA = new ATableDescriptor();
tableA.name = tableAname;
impl.createTable(tableA);
assertEquals(impl.listTables().size(), 1);
assertTrue(impl.isTableEnabled(tableAname));
assertTrue(impl.tableExists(tableAname));
ATableDescriptor tableB = new ATableDescriptor();
tableB.name = tableBname;
impl.createTable(tableB);
assertEquals(impl.listTables().size(), 2);
impl.disableTable(tableBname);
assertFalse(impl.isTableEnabled(tableBname));
impl.deleteTable(tableBname);
assertEquals(impl.listTables().size(), 1);
impl.disableTable(tableAname);
assertFalse(impl.isTableEnabled(tableAname));
long oldMaxFileSize = impl.describeTable(tableAname).maxFileSize;
tableA.maxFileSize = 123456L;
impl.modifyTable(tableAname, tableA);
// It can take a while for the change to take effect. Wait here a while.
while(impl.describeTable(tableAname).maxFileSize == oldMaxFileSize) {
Threads.sleep(100);
}
assertTrue(impl.describeTable(tableAname).maxFileSize == 123456L);
assertEquals(123456L, (long) impl.describeTable(tableAname).maxFileSize);
/* DISABLED FOR NOW TILL WE HAVE BETTER DISABLE/ENABLE
impl.enableTable(tableAname);
assertTrue(impl.isTableEnabled(tableAname));
impl.disableTable(tableAname);
*/
impl.deleteTable(tableAname);
}
/**
* Tests for creating, modifying, and deleting column families.
*
* @throws Exception
*/
@Test
public void testFamilyAdminAndMetadata() throws Exception {
AvroServer.HBaseImpl impl =
new AvroServer.HBaseImpl(TEST_UTIL.getConfiguration());
ATableDescriptor tableA = new ATableDescriptor();
tableA.name = tableAname;
AFamilyDescriptor familyA = new AFamilyDescriptor();
familyA.name = familyAname;
Schema familyArraySchema = Schema.createArray(AFamilyDescriptor.SCHEMA$);
GenericArray<AFamilyDescriptor> families = new GenericData.Array<AFamilyDescriptor>(1, familyArraySchema);
families.add(familyA);
tableA.families = families;
impl.createTable(tableA);
assertEquals(impl.describeTable(tableAname).families.size(), 1);
impl.disableTable(tableAname);
assertFalse(impl.isTableEnabled(tableAname));
familyA.maxVersions = 123456;
impl.modifyFamily(tableAname, familyAname, familyA);
assertEquals((int) impl.describeFamily(tableAname, familyAname).maxVersions, 123456);
impl.deleteFamily(tableAname, familyAname);
assertEquals(impl.describeTable(tableAname).families.size(), 0);
impl.deleteTable(tableAname);
}
/**
* Tests for adding, reading, and deleting data.
*
* @throws Exception
*/
@Test
public void testDML() throws Exception {
AvroServer.HBaseImpl impl =
new AvroServer.HBaseImpl(TEST_UTIL.getConfiguration());
ATableDescriptor tableA = new ATableDescriptor();
tableA.name = tableAname;
AFamilyDescriptor familyA = new AFamilyDescriptor();
familyA.name = familyAname;
Schema familyArraySchema = Schema.createArray(AFamilyDescriptor.SCHEMA$);
GenericArray<AFamilyDescriptor> families = new GenericData.Array<AFamilyDescriptor>(1, familyArraySchema);
families.add(familyA);
tableA.families = families;
impl.createTable(tableA);
assertEquals(impl.describeTable(tableAname).families.size(), 1);
AGet getA = new AGet();
getA.row = rowAname;
Schema columnsSchema = Schema.createArray(AColumn.SCHEMA$);
GenericArray<AColumn> columns = new GenericData.Array<AColumn>(1, columnsSchema);
AColumn column = new AColumn();
column.family = familyAname;
column.qualifier = qualifierAname;
columns.add(column);
getA.columns = columns;
assertFalse(impl.exists(tableAname, getA));
APut putA = new APut();
putA.row = rowAname;
Schema columnValuesSchema = Schema.createArray(AColumnValue.SCHEMA$);
GenericArray<AColumnValue> columnValues = new GenericData.Array<AColumnValue>(1, columnValuesSchema);
AColumnValue acv = new AColumnValue();
acv.family = familyAname;
acv.qualifier = qualifierAname;
acv.value = valueA;
columnValues.add(acv);
putA.columnValues = columnValues;
impl.put(tableAname, putA);
assertTrue(impl.exists(tableAname, getA));
assertEquals(impl.get(tableAname, getA).entries.size(), 1);
impl.disableTable(tableAname);
impl.deleteTable(tableAname);
}
@org.junit.Rule
public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
}

View File

@ -1,48 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.avro;
import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.avro.generated.AResult;
import org.apache.hadoop.hbase.client.Result;
import org.junit.Assert;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
@Category(SmallTests.class)
public class TestAvroUtil {
@Test
public void testGetEmpty() {
Result result = Mockito.mock(Result.class);
Mockito.when(result.getRow()).thenReturn(null);
//Get on a row, that does not exist, returns a result,
//whose row is null.
AResult aresult = AvroUtil.resultToAResult(result);
Assert.assertNotNull(aresult);
}
@org.junit.Rule
public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
}