HBASE-4336 Convert source tree into maven modules; part2

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1342858 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2012-05-26 06:07:07 +00:00
parent 390f32d79f
commit c484c37ec9
26 changed files with 0 additions and 2837 deletions

View File

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

View File

Before

Width:  |  Height:  |  Size: 1.1 KiB

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

Before

Width:  |  Height:  |  Size: 9.2 KiB

After

Width:  |  Height:  |  Size: 9.2 KiB

View File

Before

Width:  |  Height:  |  Size: 1.7 KiB

After

Width:  |  Height:  |  Size: 1.7 KiB

View File

Before

Width:  |  Height:  |  Size: 33 KiB

After

Width:  |  Height:  |  Size: 33 KiB

View File

Before

Width:  |  Height:  |  Size: 56 KiB

After

Width:  |  Height:  |  Size: 56 KiB

View File

Before

Width:  |  Height:  |  Size: 203 KiB

After

Width:  |  Height:  |  Size: 203 KiB

View File

@ -1,21 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ResourceBundle properties file for RowCounter MR job
CounterGroupName= RowCounter
ROWS.name= Rows

View File

@ -1,21 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ResourceBundle properties file for RowCounter MR job
CounterGroupName= RowCounter
ROWS.name= Rows

View File

@ -1,412 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// NOTE: The "required" and "optional" keywords for the service methods are purely for documentation
namespace java org.apache.hadoop.hbase.thrift2.generated
namespace cpp apache.hadoop.hbase.thrift2
namespace rb Apache.Hadoop.Hbase.Thrift2
namespace py hbase
namespace perl Hbase
struct TTimeRange {
1: required i64 minStamp,
2: required i64 maxStamp
}
/**
* Addresses a single cell or multiple cells
* in a HBase table by column family and optionally
* a column qualifier and timestamp
*/
struct TColumn {
1: required binary family,
2: optional binary qualifier,
3: optional i64 timestamp
}
/**
* Represents a single cell and its value.
*/
struct TColumnValue {
1: required binary family,
2: required binary qualifier,
3: required binary value,
4: optional i64 timestamp
}
/**
* Represents a single cell and the amount to increment it by
*/
struct TColumnIncrement {
1: required binary family,
2: required binary qualifier,
3: optional i64 amount = 1
}
/**
* if no Result is found, row and columnValues will not be set.
*/
struct TResult {
1: optional binary row,
2: required list<TColumnValue> columnValues
}
/**
* Specify type of delete:
* - DELETE_COLUMN means exactly one version will be removed,
* - DELETE_COLUMNS means previous versions will also be removed.
*/
enum TDeleteType {
DELETE_COLUMN = 0,
DELETE_COLUMNS = 1
}
/**
* Used to perform Get operations on a single row.
*
* The scope can be further narrowed down by specifying a list of
* columns or column families.
*
* To get everything for a row, instantiate a Get object with just the row to get.
* To further define the scope of what to get you can add a timestamp or time range
* with an optional maximum number of versions to return.
*
* If you specify a time range and a timestamp the range is ignored.
* Timestamps on TColumns are ignored.
*
* TODO: Filter, Locks
*/
struct TGet {
1: required binary row,
2: optional list<TColumn> columns,
3: optional i64 timestamp,
4: optional TTimeRange timeRange,
5: optional i32 maxVersions,
}
/**
* Used to perform Put operations for a single row.
*
* Add column values to this object and they'll be added.
* You can provide a default timestamp if the column values
* don't have one. If you don't provide a default timestamp
* the current time is inserted.
*
* You can also specify if this Put should be written
* to the write-ahead Log (WAL) or not. It defaults to true.
*/
struct TPut {
1: required binary row,
2: required list<TColumnValue> columnValues
3: optional i64 timestamp,
4: optional bool writeToWal = 1
}
/**
* Used to perform Delete operations on a single row.
*
* The scope can be further narrowed down by specifying a list of
* columns or column families as TColumns.
*
* Specifying only a family in a TColumn will delete the whole family.
* If a timestamp is specified all versions with a timestamp less than
* or equal to this will be deleted. If no timestamp is specified the
* current time will be used.
*
* Specifying a family and a column qualifier in a TColumn will delete only
* this qualifier. If a timestamp is specified only versions equal
* to this timestamp will be deleted. If no timestamp is specified the
* most recent version will be deleted. To delete all previous versions,
* specify the DELETE_COLUMNS TDeleteType.
*
* The top level timestamp is only used if a complete row should be deleted
* (i.e. no columns are passed) and if it is specified it works the same way
* as if you had added a TColumn for every column family and this timestamp
* (i.e. all versions older than or equal in all column families will be deleted)
*
*/
struct TDelete {
1: required binary row,
2: optional list<TColumn> columns,
3: optional i64 timestamp,
4: optional TDeleteType deleteType = 1,
5: optional bool writeToWal = 1
}
/**
* Used to perform Increment operations for a single row.
*
* You can specify if this Increment should be written
* to the write-ahead Log (WAL) or not. It defaults to true.
*/
struct TIncrement {
1: required binary row,
2: required list<TColumnIncrement> columns,
3: optional bool writeToWal = 1
}
/**
* Any timestamps in the columns are ignored, use timeRange to select by timestamp.
* Max versions defaults to 1.
*/
struct TScan {
1: optional binary startRow,
2: optional binary stopRow,
3: optional list<TColumn> columns
4: optional i32 caching,
5: optional i32 maxVersions=1,
6: optional TTimeRange timeRange,
}
//
// Exceptions
//
/**
* A TIOError exception signals that an error occurred communicating
* to the HBase master or a HBase region server. Also used to return
* more general HBase error conditions.
*/
exception TIOError {
1: optional string message
}
/**
* A TIllegalArgument exception indicates an illegal or invalid
* argument was passed into a procedure.
*/
exception TIllegalArgument {
1: optional string message
}
service THBaseService {
/**
* Test for the existence of columns in the table, as specified in the TGet.
*
* @return true if the specified TGet matches one or more keys, false if not
*/
bool exists(
/** the table to check on */
1: required binary table,
/** the TGet to check for */
2: required TGet get
) throws (1:TIOError io)
/**
* Method for getting data from a row.
*
* If the row cannot be found an empty Result is returned.
* This can be checked by the empty field of the TResult
*
* @return the result
*/
TResult get(
/** the table to get from */
1: required binary table,
/** the TGet to fetch */
2: required TGet get
) throws (1: TIOError io)
/**
* Method for getting multiple rows.
*
* If a row cannot be found there will be a null
* value in the result list for that TGet at the
* same position.
*
* So the Results are in the same order as the TGets.
*/
list<TResult> getMultiple(
/** the table to get from */
1: required binary table,
/** a list of TGets to fetch, the Result list
will have the Results at corresponding positions
or null if there was an error */
2: required list<TGet> gets
) throws (1: TIOError io)
/**
* Commit a TPut to a table.
*/
void put(
/** the table to put data in */
1: required binary table,
/** the TPut to put */
2: required TPut put
) throws (1: TIOError io)
/**
* Atomically checks if a row/family/qualifier value matches the expected
* value. If it does, it adds the TPut.
*
* @return true if the new put was executed, false otherwise
*/
bool checkAndPut(
/** to check in and put to */
1: required binary table,
/** row to check */
2: required binary row,
/** column family to check */
3: required binary family,
/** column qualifier to check */
4: required binary qualifier,
/** the expected value, if not provided the
check is for the non-existence of the
column in question */
5: binary value,
/** the TPut to put if the check succeeds */
6: required TPut put
) throws (1: TIOError io)
/**
* Commit a List of Puts to the table.
*/
void putMultiple(
/** the table to put data in */
1: required binary table,
/** a list of TPuts to commit */
2: required list<TPut> puts
) throws (1: TIOError io)
/**
* Deletes as specified by the TDelete.
*
* Note: "delete" is a reserved keyword and cannot be used in Thrift
* thus the inconsistent naming scheme from the other functions.
*/
void deleteSingle(
/** the table to delete from */
1: required binary table,
/** the TDelete to delete */
2: required TDelete deleteSingle
) throws (1: TIOError io)
/**
* Bulk commit a List of TDeletes to the table.
*
* This returns a list of TDeletes that were not
* executed. So if everything succeeds you'll
* receive an empty list.
*/
list<TDelete> deleteMultiple(
/** the table to delete from */
1: required binary table,
/** list of TDeletes to delete */
2: required list<TDelete> deletes
) throws (1: TIOError io)
/**
* Atomically checks if a row/family/qualifier value matches the expected
* value. If it does, it adds the delete.
*
* @return true if the new delete was executed, false otherwise
*/
bool checkAndDelete(
/** to check in and delete from */
1: required binary table,
/** row to check */
2: required binary row,
/** column family to check */
3: required binary family,
/** column qualifier to check */
4: required binary qualifier,
/** the expected value, if not provided the
check is for the non-existence of the
column in question */
5: binary value,
/** the TDelete to execute if the check succeeds */
6: required TDelete deleteSingle
) throws (1: TIOError io)
TResult increment(
/** the table to increment the value on */
1: required binary table,
/** the TIncrement to increment */
2: required TIncrement increment
) throws (1: TIOError io)
/**
* Get a Scanner for the provided TScan object.
*
* @return Scanner Id to be used with other scanner procedures
*/
i32 openScanner(
/** the table to get the Scanner for */
1: required binary table,
/** the scan object to get a Scanner for */
2: required TScan scan,
) throws (1: TIOError io)
/**
* Grabs multiple rows from a Scanner.
*
* @return Between zero and numRows TResults
*/
list<TResult> getScannerRows(
/** the Id of the Scanner to return rows from. This is an Id returned from the openScanner function. */
1: required i32 scannerId,
/** number of rows to return */
2: i32 numRows = 1
) throws (
1: TIOError io,
/** if the scannerId is invalid */
2: TIllegalArgument ia
)
/**
* Closes the scanner. Should be called if you need to close
* the Scanner before all results are read.
*
* Exhausted scanners are closed automatically.
*/
void closeScanner(
/** the Id of the Scanner to close **/
1: required i32 scannerId
) throws (
1: TIOError io,
/** if the scannerId is invalid */
2: TIllegalArgument ia
)
}

View File

@ -1,68 +0,0 @@
<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml"/>
<xsl:template match="configuration">
<!--
/**
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
This stylesheet is used making an html version of hbase-default.xml.
-->
<section xml:id="hbase_default_configurations"
version="5.0" xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns:m="http://www.w3.org/1998/Math/MathML"
xmlns:html="http://www.w3.org/1999/xhtml"
xmlns:db="http://docbook.org/ns/docbook">
<title>HBase Default Configuration</title>
<para>
</para>
<glossary xmlns='http://docbook.org/ns/docbook' xml:id="hbase.default.configuration">
<title>HBase Default Configuration</title>
<para>
The documentation below is generated using the default hbase configuration file,
<filename>hbase-default.xml</filename>, as source.
</para>
<xsl:for-each select="property">
<xsl:if test="not(@skipInDoc)">
<glossentry>
<xsl:attribute name="id">
<xsl:value-of select="name" />
</xsl:attribute>
<glossterm>
<varname><xsl:value-of select="name"/></varname>
</glossterm>
<glossdef>
<para><xsl:value-of select="description"/></para>
<para>Default: <varname><xsl:value-of select="value"/></varname></para>
</glossdef>
</glossentry>
</xsl:if>
</xsl:for-each>
</glossary>
</section>
</xsl:template>
</xsl:stylesheet>

View File

@ -1,208 +0,0 @@
/*
* Copyright (c) 2001, 2003, 2010 The FreeBSD Documentation Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD: doc/share/misc/docbook.css,v 1.15 2010/03/20 04:15:01 hrs Exp $
*/
BODY ADDRESS {
line-height: 1.3;
margin: .6em 0;
}
BODY BLOCKQUOTE {
margin-top: .75em;
line-height: 1.5;
margin-bottom: .75em;
}
HTML BODY {
margin: 1em 8% 1em 10%;
line-height: 1.2;
}
.LEGALNOTICE {
font-size: small;
font-variant: small-caps;
}
BODY DIV {
margin: 0;
}
DL {
margin: .8em 0;
line-height: 1.2;
}
BODY FORM {
margin: .6em 0;
}
H1, H2, H3, H4, H5, H6,
DIV.EXAMPLE P B,
.QUESTION,
DIV.TABLE P B,
DIV.PROCEDURE P B {
color: #990000;
}
BODY H1, BODY H2, BODY H3, BODY H4, BODY H5, BODY H6 {
line-height: 1.3;
margin-left: 0;
}
BODY H1, BODY H2 {
margin: .8em 0 0 -4%;
}
BODY H3, BODY H4 {
margin: .8em 0 0 -3%;
}
BODY H5 {
margin: .8em 0 0 -2%;
}
BODY H6 {
margin: .8em 0 0 -1%;
}
BODY HR {
margin: .6em;
border-width: 0 0 1px 0;
border-style: solid;
border-color: #cecece;
}
BODY IMG.NAVHEADER {
margin: 0 0 0 -4%;
}
OL {
margin: 0 0 0 5%;
line-height: 1.2;
}
BODY PRE {
margin: .75em 0;
line-height: 1.0;
font-family: monospace;
}
BODY TD, BODY TH {
line-height: 1.2;
}
UL, BODY DIR, BODY MENU {
margin: 0 0 0 5%;
line-height: 1.2;
}
HTML {
margin: 0;
padding: 0;
}
BODY P B.APPLICATION {
color: #000000;
}
.FILENAME {
color: #007a00;
}
.GUIMENU, .GUIMENUITEM, .GUISUBMENU,
.GUILABEL, .INTERFACE,
.SHORTCUT, .SHORTCUT .KEYCAP {
font-weight: bold;
}
.GUIBUTTON {
background-color: #CFCFCF;
padding: 2px;
}
.ACCEL {
background-color: #F0F0F0;
text-decoration: underline;
}
.SCREEN {
padding: 1ex;
}
.PROGRAMLISTING {
padding: 1ex;
background-color: #eee;
border: 1px solid #ccc;
}
@media screen { /* hide from IE3 */
a[href]:hover { background: #ffa }
}
BLOCKQUOTE.NOTE {
color: #222;
background: #eee;
border: 1px solid #ccc;
padding: 0.4em 0.4em;
width: 85%;
}
BLOCKQUOTE.TIP {
color: #004F00;
background: #d8ecd6;
border: 1px solid green;
padding: 0.2em 2em;
width: 85%;
}
BLOCKQUOTE.IMPORTANT {
font-style:italic;
border: 1px solid #a00;
border-left: 12px solid #c00;
padding: 0.1em 1em;
}
BLOCKQUOTE.WARNING {
color: #9F1313;
background: #f8e8e8;
border: 1px solid #e59595;
padding: 0.2em 2em;
width: 85%;
}
.EXAMPLE {
background: #fefde6;
border: 1px solid #f1bb16;
margin: 1em 0;
padding: 0.2em 2em;
width: 90%;
}
.INFORMALTABLE TABLE.CALSTABLE TR TD {
padding-left: 1em;
padding-right: 1em;
}

View File

@ -1,127 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
a.externalLink, a.externalLink:link, a.externalLink:visited, a.externalLink:active, a.externalLink:hover {
background: none;
padding-right: 0;
}
/*
body ul {
list-style-type: square;
}
*/
#downloadbox {
float: right;
margin: 0 10px 20px 20px;
padding: 5px;
border: 1px solid #999;
background-color: #eee;
}
#downloadbox h5 {
color: #000;
margin: 0;
border-bottom: 1px solid #aaaaaa;
font-size: smaller;
padding: 0;
}
#downloadbox p {
margin-top: 1em;
margin-bottom: 0;
}
#downloadbox ul {
margin-top: 0;
margin-bottom: 1em;
list-style-type: disc;
}
#downloadbox li {
font-size: smaller;
}
/*
h4 {
padding: 0;
border: none;
color: #000;
margin: 0;
font-size: larger;
font-weight: bold;
}
*/
#banner {
background: none;
}
#banner img {
padding: 10px;
margin: auto;
display: block;
background: none;
float: center;
height:;
}
#breadcrumbs {
background-image: url();
}
#footer {
border-top: 0px;
}
.frontpagebox {
float: left;
text-align: center;
width: 15em;
margin-left: 0.5em;
margin-right: 0.5em;
margin-top: 2em;
}
.headline {
font-size: 120%;
font-weight: bold;
padding-top: 1px;
padding-bottom: 5px;
background-image: url(../images/breadcrumbs.jpg);
background-repeat: repeat-x;
}
.section {
padding-bottom: 0;
padding-top: 0;
}
/*
#leftColumn {
display: none !important
}
#bodyColumn {
margin-left: 1.5em;
}
*/

View File

@ -1,57 +0,0 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl"?>
<rdf:RDF xml:lang="en"
xmlns="http://usefulinc.com/ns/doap#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:asfext="http://projects.apache.org/ns/asfext#"
xmlns:foaf="http://xmlns.com/foaf/0.1/">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<Project rdf:about="http://hbase.apache.org">
<created>2012-04-14</created>
<license rdf:resource="http://usefulinc.com/doap/licenses/asl20" />
<name>Apache HBase</name>
<homepage rdf:resource="http://hbase.apache.org" />
<asfext:pmc rdf:resource="http://hbase.apache.org" />
<shortdesc>Apache HBase software is the Hadoop database. Think of it as a distributed, scalable, big data store.</shortdesc>
<description>Use Apache HBase software when you need random, realtime read/write access to your Big Data. This project's goal is the hosting of very large tables -- billions of rows X millions of columns -- atop clusters of commodity hardware. HBase is an open-source, distributed, versioned, column-oriented store modeled after Google's Bigtable: A Distributed Storage System for Structured Data by Chang et al. Just as Bigtable leverages the distributed data storage provided by the Google File System, HBase provides Bigtable-like capabilities on top of Hadoop and HDFS. </description>
<bug-database rdf:resource="http://issues.apache.org/jira/browse/HBASE" />
<mailing-list rdf:resource="http://hbase.apache.org/mail-lists.html" />
<download-page rdf:resource="http://www.apache.org/dyn/closer.cgi/hbase/" />
<programming-language>Java</programming-language>
<category rdf:resource="http://projects.apache.org/category/database" />
<release>
<Version>
<name>Apache hbase 0.92.1</name>
<created>2012-03-19</created>
<revision>0.92.1</revision>
</Version>
</release>
<repository>
<SVNRepository>
<location rdf:resource="http://svn.apache.org/repos/asf/hbase"/>
<browse rdf:resource="http://svn.apache.org/viewvc/hbase"/>
</SVNRepository>
</repository>
<maintainer>
<foaf:Person>
<foaf:name>Apache HBase PMC</foaf:name>
<foaf:mbox rdf:resource="mailto:dev@hbase.apache.org"/>
</foaf:Person>
</maintainer>
</Project>
</rdf:RDF>

View File

@ -1,41 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 15.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
width="792px" height="612px" viewBox="0 0 792 612" enable-background="new 0 0 792 612" xml:space="preserve">
<path fill="#BA160C" d="M233.586,371.672h-9.895v-51.583h9.895V371.672L233.586,371.672z M223.691,307.6v-19.465h9.895V307.6
H223.691z M223.691,371.672h-9.896v-32.117h-63.584v32.117h-19.466v-83.537h19.466v31.954h55.128h8.457h9.896V371.672
L223.691,371.672z M223.691,288.135h-9.896V307.6h9.896V288.135z"/>
<path fill="#BA160C" d="M335.939,329.334c6.812,4.218,10.219,10.652,10.219,19.303c0,6.272-2,11.571-6.002,15.897
c-4.325,4.758-10.165,7.137-17.519,7.137h-28.629v-19.465h28.629c2.812,0,4.218-2.109,4.218-6.327c0-4.216-1.406-6.325-4.218-6.325
h-28.629v-19.303h27.17c2.811,0,4.217-2.109,4.217-6.327c0-4.216-1.406-6.326-4.217-6.326h-27.17v-19.464h27.17
c7.353,0,13.192,2.379,17.519,7.137c3.892,4.325,5.839,9.625,5.839,15.896C344.536,318.954,341.67,325.009,335.939,329.334z
M294.008,371.672h-52.312v-51.42h19.466h5.259h27.588v19.303h-32.847v12.652h32.847V371.672L294.008,371.672z M294.008,307.599
h-32.847v0h-19.466v-19.465h52.312V307.599z"/>
<path fill="#878888" d="M355.123,266.419v-8.92h14.532v-5.353c0-1.932-0.644-2.899-1.933-2.899h-12.6v-8.919h12.6
c3.223,0,5.836,1.164,7.842,3.494c2.007,2.33,3.011,5.104,3.011,8.325v26.463h-8.921v-12.19H355.123L355.123,266.419z
M473.726,278.61h-29.587c-3.469,0-6.417-1.152-8.845-3.458c-2.429-2.304-3.642-5.191-3.642-8.659v-14.049
c0-3.47,1.213-6.356,3.642-8.662c2.428-2.304,5.376-3.455,8.845-3.455h29.587v8.919h-29.587c-2.378,0-3.567,1.066-3.567,3.197
v14.049c0,2.131,1.189,3.196,3.567,3.196h29.587V278.61L473.726,278.61z M567.609,278.61h-8.996v-14.718h-22.895v14.718h-8.92
v-38.282h8.92v14.644h22.895v-14.644h8.996V278.61L567.609,278.61z M661.494,249.247h-31.889v5.725h29.807v8.92h-29.807v5.797
h31.814v8.92h-40.735v-38.282h40.809V249.247z M355.123,240.328v8.919h-12.674c-1.239,0-1.858,0.967-1.858,2.899v5.353h5.575h2.435
h6.522v8.92h-6.522h-2.435h-5.575v12.19h-8.92v-26.463c0-3.221,1.004-5.996,3.011-8.325c2.006-2.33,4.596-3.494,7.768-3.494H355.123
L355.123,240.328z M254.661,266.122v-8.92h13.083c1.288,0,1.933-1.313,1.933-3.939c0-2.676-0.645-4.015-1.933-4.015h-13.083v-8.919
h13.083c3.32,0,5.995,1.363,8.028,4.088c1.883,2.478,2.825,5.425,2.825,8.846c0,3.419-0.942,6.342-2.825,8.771
c-2.033,2.725-4.708,4.088-8.028,4.088H254.661z M177.649,278.61h-8.92v-12.19h-14.532v-8.92h14.532v-5.353
c0-1.932-0.644-2.899-1.932-2.899h-12.6v-8.919h12.6c3.222,0,5.835,1.164,7.842,3.494c2.007,2.33,3.01,5.104,3.01,8.325V278.61
L177.649,278.61z M254.661,240.328v8.919h-15.016v7.954h15.016v8.92h-15.016v12.488h-8.92v-38.282H254.661z M154.198,266.419h-7.604
h-1.354h-5.575v12.19h-8.92v-26.463c0-3.221,1.004-5.996,3.01-8.325c2.007-2.33,4.597-3.494,7.768-3.494h12.674v8.919h-12.674
c-1.239,0-1.858,0.967-1.858,2.899v5.353h5.575h1.354h7.604V266.419z"/>
<path fill="#BA160C" d="M456.325,371.672H436.86V345.07h-31.094h-0.618v-19.466h0.618h31.094v-11.68
c0-4.216-1.406-6.324-4.218-6.324h-27.494v-19.465h27.494c7.03,0,12.733,2.541,17.114,7.623c4.379,5.083,6.569,11.139,6.569,18.167
V371.672z M405.148,345.07h-19.547h-12.165v26.602h-19.466v-57.748c0-7.028,2.19-13.083,6.569-18.167
c4.379-5.083,10.03-7.623,16.952-7.623h27.656V307.6h-27.656c-2.704,0-4.055,2.108-4.055,6.324v11.68h12.165h19.547V345.07z"/>
<path fill="#BA160C" d="M564.329,345.88c0,7.03-2.109,13.031-6.327,18.006c-4.541,5.19-10.273,7.786-17.193,7.786h-72.02v-19.465
h72.02c2.704,0,4.055-2.109,4.055-6.327c0-4.216-1.352-6.325-4.055-6.325h-52.394c-6.92,0-12.652-2.596-17.193-7.787
c-4.327-4.865-6.49-10.813-6.49-17.843c0-7.028,2.218-13.083,6.651-18.167c4.434-5.083,10.112-7.623,17.032-7.623h72.021v19.464
h-72.021c-2.703,0-4.055,2.109-4.055,6.326c0,4.109,1.352,6.164,4.055,6.164h52.394c6.92,0,12.652,2.596,17.193,7.787
C562.22,332.85,564.329,338.852,564.329,345.88z"/>
<polygon fill="#BA160C" points="661.494,307.599 591.906,307.599 591.906,320.089 656.952,320.089 656.952,339.555 591.906,339.555
591.906,352.207 661.331,352.207 661.331,371.672 572.44,371.672 572.44,288.135 661.494,288.135 "/>
</svg>

Before

Width:  |  Height:  |  Size: 4.3 KiB

View File

@ -1,544 +0,0 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
#*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*#
<!-- Generated by Apache Maven Doxia at $dateFormat.format( $currentDate ) -->
#macro ( link $href $name $target $img $position $alt $border $width $height )
#set ( $linkTitle = ' title="' + $name + '"' )
#if( $target )
#set ( $linkTarget = ' target="' + $target + '"' )
#else
#set ( $linkTarget = "" )
#end
#if ( ( $href.toLowerCase().startsWith("http") || $href.toLowerCase().startsWith("https") ) )
#set ( $linkClass = ' class="externalLink"' )
#else
#set ( $linkClass = "" )
#end
#if ( $img )
#if ( $position == "left" )
<a href="$href"$linkClass$linkTarget$linkTitle>#image($img $alt $border $width $height)$name</a>
#else
<a href="$href"$linkClass$linkTarget$linkTitle>$name #image($img $alt $border $width $height)</a>
#end
#else
<a href="$href"$linkClass$linkTarget$linkTitle>$name</a>
#end
#end
##
#macro ( image $img $alt $border $width $height )
#if( $img )
#if ( ! ( $img.toLowerCase().startsWith("http") || $img.toLowerCase().startsWith("https") ) )
#set ( $imgSrc = $PathTool.calculateLink( $img, $relativePath ) )
#set ( $imgSrc = $imgSrc.replaceAll( "\\", "/" ) )
#set ( $imgSrc = ' src="' + $imgSrc + '"' )
#else
#set ( $imgSrc = ' src="' + $img + '"' )
#end
#if( $alt )
#set ( $imgAlt = ' alt="' + $alt + '"' )
#else
#set ( $imgAlt = ' alt=""' )
#end
#if( $border )
#set ( $imgBorder = ' border="' + $border + '"' )
#else
#set ( $imgBorder = "" )
#end
#if( $width )
#set ( $imgWidth = ' width="' + $width + '"' )
#else
#set ( $imgWidth = "" )
#end
#if( $height )
#set ( $imgHeight = ' height="' + $height + '"' )
#else
#set ( $imgHeight = "" )
#end
<img class="imageLink"$imgSrc$imgAlt$imgBorder$imgWidth$imgHeight/>
#end
#end
#macro ( banner $banner $id )
#if ( $banner )
#if( $banner.href )
<a href="$banner.href" id="$id"#if( $banner.alt ) title="$banner.alt"#end>
#else
<div id="$id">
#end
##
#if( $banner.src )
#set ( $src = $banner.src )
#if ( ! ( $src.toLowerCase().startsWith("http") || $src.toLowerCase().startsWith("https") ) )
#set ( $src = $PathTool.calculateLink( $src, $relativePath ) )
#set ( $src = $src.replaceAll( "\\", "/" ) )
#end
#if ( $banner.alt )
#set ( $alt = $banner.alt )
#else
#set ( $alt = $banner.name )
#end
<img src="$src" alt="$alt" />
#else
$banner.name
#end
##
#if( $banner.href )
</a>
#else
</div>
#end
#end
#end
##
#macro ( links $links )
#set ( $counter = 0 )
#foreach( $item in $links )
#set ( $counter = $counter + 1 )
#set ( $currentItemHref = $PathTool.calculateLink( $item.href, $relativePath ) )
#set ( $currentItemHref = $currentItemHref.replaceAll( "\\", "/" ) )
#link( $currentItemHref $item.name $item.target $item.img $item.position $item.alt $item.border $item.width $item.height )
#if ( $links.size() > $counter )
|
#end
#end
#end
##
#macro ( breadcrumbs $breadcrumbs )
#set ( $counter = 0 )
#foreach( $item in $breadcrumbs )
#set ( $counter = $counter + 1 )
#set ( $currentItemHref = $PathTool.calculateLink( $item.href, $relativePath ) )
#set ( $currentItemHref = $currentItemHref.replaceAll( "\\", "/" ) )
##
#if ( $currentItemHref == $alignedFileName || $currentItemHref == "" )
$item.name
#else
#link( $currentItemHref $item.name $item.target $item.img $item.position $item.alt $item.border $item.width $item.height )
#end
#if ( $breadcrumbs.size() > $counter )
&gt;
#end
#end
#end
##
#macro ( displayTree $display $item )
#if ( $item && $item.items && $item.items.size() > 0 )
#foreach( $subitem in $item.items )
#set ( $subitemHref = $PathTool.calculateLink( $subitem.href, $relativePath ) )
#set ( $subitemHref = $subitemHref.replaceAll( "\\", "/" ) )
#if ( $alignedFileName == $subitemHref )
#set ( $display = true )
#end
##
#displayTree( $display $subitem )
#end
#end
#end
##
#macro ( menuItem $item )
#set ( $collapse = "none" )
#set ( $currentItemHref = $PathTool.calculateLink( $item.href, $relativePath ) )
#set ( $currentItemHref = $currentItemHref.replaceAll( "\\", "/" ) )
##
#if ( $item && $item.items && $item.items.size() > 0 )
#if ( $item.collapse == false )
#set ( $collapse = "expanded" )
#else
## By default collapsed
#set ( $collapse = "collapsed" )
#end
##
#set ( $display = false )
#displayTree( $display $item )
##
#if ( $alignedFileName == $currentItemHref || $display )
#set ( $collapse = "expanded" )
#end
#end
<li class="$collapse">
#if ( $item.img )
#if ( $item.position == "left" )
#if ( $alignedFileName == $currentItemHref )
<strong>#image($item.img $item.alt $item.border $item.width $item.height) $item.name</strong>
#else
#link($currentItemHref $item.name $item.target $item.img $item.position $item.alt $item.border $item.width $item.height)
#end
#else
#if ( $alignedFileName == $currentItemHref )
<strong>$item.name #image($item.img $item.alt $item.border $item.width $item.height)</strong>
#else
#link($currentItemHref $item.name $item.target $item.img $item.position $item.alt $item.border $item.width $item.height)
#end
#end
#else
#if ( $alignedFileName == $currentItemHref )
<strong>$item.name</strong>
#else
#link( $currentItemHref $item.name $item.target $item.img $item.position $item.alt $item.border $item.width $item.height )
#end
#end
#if ( $item && $item.items && $item.items.size() > 0 )
#if ( $collapse == "expanded" )
<ul>
#foreach( $subitem in $item.items )
#menuItem( $subitem )
#end
</ul>
#end
#end
</li>
#end
##
#macro ( mainMenu $menus )
#foreach( $menu in $menus )
#if ( $menu.name )
#if ( $menu.img )
#if( $menu.position )
#set ( $position = $menu.position )
#else
#set ( $position = "left" )
#end
##
#if ( ! ( $menu.img.toLowerCase().startsWith("http") || $menu.img.toLowerCase().startsWith("https") ) )
#set ( $src = $PathTool.calculateLink( $menu.img, $relativePath ) )
#set ( $src = $src.replaceAll( "\\", "/" ) )
#set ( $src = ' src="' + $src + '"' )
#else
#set ( $src = ' src="' + $menu.img + '"' )
#end
##
#if( $menu.alt )
#set ( $alt = ' alt="' + $menu.alt + '"' )
#else
#set ( $alt = ' alt="' + $menu.name + '"' )
#end
##
#if( $menu.border )
#set ( $border = ' border="' + $menu.border + '"' )
#else
#set ( $border = ' border="0"' )
#end
##
#if( $menu.width )
#set ( $width = ' width="' + $menu.width + '"' )
#else
#set ( $width = "" )
#end
#if( $menu.height )
#set ( $height = ' height="' + $menu.height + '"' )
#else
#set ( $height = "" )
#end
##
#set ( $img = '<img class="imageLink"' + $src + $alt + $border + $width + $height + "/>" )
##
#if ( $position == "left" )
<h5>$img $menu.name</h5>
#else
<h5>$menu.name $img</h5>
#end
#else
<h5>$menu.name</h5>
#end
#end
#if ( $menu.items && $menu.items.size() > 0 )
<ul>
#foreach( $item in $menu.items )
#menuItem( $item )
#end
</ul>
#end
#end
#end
##
#macro ( copyright )
#if ( $project )
#if ( ${project.organization} && ${project.organization.name} )
#set ( $period = "" )
#else
#set ( $period = "." )
#end
##
#set ( $currentYear = ${currentDate.year} + 1900 )
##
#if ( ${project.inceptionYear} && ( ${project.inceptionYear} != ${currentYear.toString()} ) )
${project.inceptionYear}-${currentYear}${period}
#else
${currentYear}${period}
#end
##
#if ( ${project.organization} )
#if ( ${project.organization.name} && ${project.organization.url} )
<a href="$project.organization.url">${project.organization.name}</a>.
#elseif ( ${project.organization.name} )
${project.organization.name}.
#end
#end
#end
#end
##
#macro ( publishDate $position $publishDate $version )
#if ( $publishDate && $publishDate.format )
#set ( $format = $publishDate.format )
#else
#set ( $format = "yyyy-MM-dd" )
#end
##
$dateFormat.applyPattern( $format )
##
#set ( $dateToday = $dateFormat.format( $currentDate ) )
##
#if ( $publishDate && $publishDate.position )
#set ( $datePosition = $publishDate.position )
#else
#set ( $datePosition = "left" )
#end
##
#if ( $version )
#if ( $version.position )
#set ( $versionPosition = $version.position )
#else
#set ( $versionPosition = "left" )
#end
#else
#set ( $version = "" )
#set ( $versionPosition = "left" )
#end
##
#set ( $breadcrumbs = $decoration.body.breadcrumbs )
#set ( $links = $decoration.body.links )
#if ( $datePosition.equalsIgnoreCase( "right" ) && $links && $links.size() > 0 )
#set ( $prefix = "&nbsp;|" )
#else
#set ( $prefix = "" )
#end
##
#if ( $datePosition.equalsIgnoreCase( $position ) )
#if ( ( $datePosition.equalsIgnoreCase( "right" ) ) || ( $datePosition.equalsIgnoreCase( "bottom" ) ) )
$prefix <span id="publishDate">$i18n.getString( "site-renderer", $locale, "template.lastpublished" ): $dateToday</span>
#if ( $versionPosition.equalsIgnoreCase( $position ) )
&nbsp;| <span id="projectVersion">$i18n.getString( "site-renderer", $locale, "template.version" ): ${project.version}</span>
#end
#elseif ( ( $datePosition.equalsIgnoreCase( "navigation-bottom" ) ) || ( $datePosition.equalsIgnoreCase( "navigation-top" ) ) )
<div id="lastPublished">
<span id="publishDate">$i18n.getString( "site-renderer", $locale, "template.lastpublished" ): $dateToday</span>
#if ( $versionPosition.equalsIgnoreCase( $position ) )
&nbsp;| <span id="projectVersion">$i18n.getString( "site-renderer", $locale, "template.version" ): ${project.version}</span>
#end
</div>
#elseif ( $datePosition.equalsIgnoreCase("left") )
<div class="xleft">
<span id="publishDate">$i18n.getString( "site-renderer", $locale, "template.lastpublished" ): $dateToday</span>
#if ( $versionPosition.equalsIgnoreCase( $position ) )
&nbsp;| <span id="projectVersion">$i18n.getString( "site-renderer", $locale, "template.version" ): ${project.version}</span>
#end
#if ( $breadcrumbs && $breadcrumbs.size() > 0 )
| #breadcrumbs( $breadcrumbs )
#end
</div>
#end
#elseif ( $versionPosition.equalsIgnoreCase( $position ) )
#if ( ( $versionPosition.equalsIgnoreCase( "right" ) ) || ( $versionPosition.equalsIgnoreCase( "bottom" ) ) )
$prefix <span id="projectVersion">$i18n.getString( "site-renderer", $locale, "template.version" ): ${project.version}</span>
#elseif ( ( $versionPosition.equalsIgnoreCase( "navigation-bottom" ) ) || ( $versionPosition.equalsIgnoreCase( "navigation-top" ) ) )
<div id="lastPublished">
<span id="projectVersion">$i18n.getString( "site-renderer", $locale, "template.version" ): ${project.version}</span>
</div>
#elseif ( $versionPosition.equalsIgnoreCase("left") )
<div class="xleft">
<span id="projectVersion">$i18n.getString( "site-renderer", $locale, "template.version" ): ${project.version}</span>
#if ( $breadcrumbs && $breadcrumbs.size() > 0 )
| #breadcrumbs( $breadcrumbs )
#end
</div>
#end
#elseif ( $position.equalsIgnoreCase( "left" ) )
#if ( $breadcrumbs && $breadcrumbs.size() > 0 )
<div class="xleft">
#breadcrumbs( $breadcrumbs )
</div>
#end
#end
#end
##
#macro ( poweredByLogo $poweredBy )
#if( $poweredBy )
#foreach ($item in $poweredBy)
#if( $item.href )
#set ( $href = $PathTool.calculateLink( $item.href, $relativePath ) )
#set ( $href = $href.replaceAll( "\\", "/" ) )
#else
#set ( $href="http://maven.apache.org/" )
#end
##
#if( $item.name )
#set ( $name = $item.name )
#else
#set ( $name = $i18n.getString( "site-renderer", $locale, "template.builtby" ) )
#set ( $name = "${name} Maven" )
#end
##
#if( $item.img )
#set ( $img = $item.img )
#else
#set ( $img = "images/logos/maven-feather.png" )
#end
##
#if ( ! ( $img.toLowerCase().startsWith("http") || $img.toLowerCase().startsWith("https") ) )
#set ( $img = $PathTool.calculateLink( $img, $relativePath ) )
#set ( $img = $src.replaceAll( "\\", "/" ) )
#end
##
#if( $item.alt )
#set ( $alt = ' alt="' + $item.alt + '"' )
#else
#set ( $alt = ' alt="' + $name + '"' )
#end
##
#if( $item.border )
#set ( $border = ' border="' + $item.border + '"' )
#else
#set ( $border = "" )
#end
##
#if( $item.width )
#set ( $width = ' width="' + $item.width + '"' )
#else
#set ( $width = "" )
#end
#if( $item.height )
#set ( $height = ' height="' + $item.height + '"' )
#else
#set ( $height = "" )
#end
##
<a href="$href" title="$name" class="poweredBy">
<img class="poweredBy" $alt src="$img" $border $width $height />
</a>
#end
#if( $poweredBy.isEmpty() )
<a href="http://maven.apache.org/" title="$i18n.getString( "site-renderer", $locale, "template.builtby" ) Maven" class="poweredBy">
<img class="poweredBy" alt="$i18n.getString( "site-renderer", $locale, "template.builtby" ) Maven" src="$relativePath/images/logos/maven-feather.png" />
</a>
#end
#else
<a href="http://maven.apache.org/" title="$i18n.getString( "site-renderer", $locale, "template.builtby" ) Maven" class="poweredBy">
<img class="poweredBy" alt="$i18n.getString( "site-renderer", $locale, "template.builtby" ) Maven" src="$relativePath/images/logos/maven-feather.png" />
</a>
#end
#end
##
<html xmlns="http://www.w3.org/1999/xhtml"#if ( $locale ) xml:lang="$locale.language" lang="$locale.language"#end>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=${outputEncoding}" />
<title>$title</title>
<style type="text/css" media="all">
@import url("$relativePath/css/maven-base.css");
@import url("$relativePath/css/maven-theme.css");
@import url("$relativePath/css/site.css");
</style>
<link rel="stylesheet" href="$relativePath/css/print.css" type="text/css" media="print" />
<link rel="shortcut icon" href="/images/favicon.ico" />
#foreach( $author in $authors )
<meta name="author" content="$author" />
#end
#if ( $dateCreation )
<meta name="Date-Creation-yyyymmdd" content="$dateCreation" />
#end
#if ( $dateRevision )
<meta name="Date-Revision-yyyymmdd" content="$dateRevision" />
#end
#if ( $locale )
<meta http-equiv="Content-Language" content="$locale.language" />
#end
#if ( $decoration.body.head )
#foreach( $item in $decoration.body.head.getChildren() )
## Workaround for DOXIA-150 due to a non-desired behaviour in p-u
## @see org.codehaus.plexus.util.xml.Xpp3Dom#toString()
## @see org.codehaus.plexus.util.xml.Xpp3Dom#toUnescapedString()
#set ( $documentHeader = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" )
#set ( $documentHeader = $documentHeader.replaceAll( "\\", "" ) )
#if ( $item.name == "script" )
$StringUtils.replace( $item.toUnescapedString(), $documentHeader, "" )
#else
$StringUtils.replace( $item.toString(), $documentHeader, "" )
#end
#end
#end
## $headContent
<!--Google Analytics-->
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-30210968-1']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
</head>
<body class="composite">
<div id="banner">
#banner( $decoration.bannerLeft "bannerLeft" )
<!-- Commented out since we do not use it. St.Ack 20110906
-->
## #banner( $decoration.bannerRight "bannerRight" )
<div class="clear">
<hr/>
</div>
</div>
<div id="breadcrumbs">
<div class="xright" style="padding-left: 8px; margin-top: -4px;">
<form method="GET" action="http://search-hadoop.com/">
<input type="text" style="width: 192px; height: 15px; font-size: inherit; border: 1px solid darkgray" name="q" value="Search wiki, mailing lists & more" onfocus="this.value=''"/>
<input type="hidden" name="fc_project" value="HBase"/>
<button style="height: 20px; width: 60px;">Search</button>
</form>
</div>
<div class="clear">
<hr/>
</div>
</div>
<div id="leftColumn">
<div id="navcolumn">
#publishDate( "navigation-top" $decoration.publishDate $decoration.version )
#mainMenu( $decoration.body.menus )
#poweredByLogo( $decoration.poweredBy )
#publishDate( "navigation-bottom" $decoration.publishDate $decoration.version )
</div>
</div>
<div id="bodyColumn">
<div id="contentBox">
$bodyContent
</div>
</div>
<div class="clear">
<hr/>
</div>
<div id="footer">
<div class="xright"> #publishDate( "right" $decoration.publishDate $decoration.version )&nbsp;| Copyright &#169;#copyright()All Rights Reserved. </div>
<div class="clear">
<hr/>
</div>
</div>
</body>
</html>

View File

@ -1,68 +0,0 @@
<?xml version="1.0" encoding="ISO-8859-1"?>
<!--
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-->
<project xmlns="http://maven.apache.org/DECORATION/1.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/DECORATION/1.0.0 http://maven.apache.org/xsd/decoration-1.0.0.xsd">
<bannerLeft>
<name>HBase</name>
<src>images/hbase_logo.png</src>
<href>http://hbase.apache.org/</href>
</bannerLeft>
<bannerRight />
<version position="right" />
<publishDate position="right" />
<body>
<menu name="HBase Project">
<item name="Overview" href="index.html"/>
<item name="License" href="license.html" />
<item name="Downloads" href="http://www.apache.org/dyn/closer.cgi/hbase/" />
<item name="Release Notes" href="https://issues.apache.org/jira/browse/HBASE?report=com.atlassian.jira.plugin.system.project:changelog-panel#selectedTab=com.atlassian.jira.plugin.system.project%3Achangelog-panel" />
<item name="Issue Tracking" href="issue-tracking.html" />
<item name="Mailing Lists" href="mail-lists.html" />
<item name="Source Repository" href="source-repository.html" />
<item name="ReviewBoard" href="https://reviews.apache.org"/>
<item name="Team" href="team-list.html" />
<item name="Sponsors" href="sponsors.html" />
<item name="Blog" href="http://blogs.apache.org/hbase/" />
</menu>
<menu name="Documentation">
<item name="Getting Started" href="book/quickstart.html" />
<item name="API" href="apidocs/index.html" />
<item name="X-Ref" href="xref/index.html" />
<item name="Ref Guide (multi-page)" href="book/book.html" />
<item name="Ref Guide (single-page)" href="book.html" />
<item name="FAQ" href="book/faq.html" />
<item name="Videos/Presentations" href="book.html#other.info" />
<item name="Wiki" href="http://wiki.apache.org/hadoop/Hbase" />
<item name="ACID Semantics" href="acid-semantics.html" />
<item name="Bulk Loads" href="book.html#arch.bulk.load" />
<item name="Metrics" href="metrics.html" />
<item name="HBase on Windows" href="cygwin.html" />
<item name="Cluster replication" href="replication.html" />
</menu>
</body>
<skin>
<groupId>org.apache.maven.skins</groupId>
<artifactId>maven-stylus-skin</artifactId>
</skin>
</project>

View File

@ -1,232 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright 2010 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN"
"http://forrest.apache.org/dtd/document-v20.dtd">
<document xmlns="http://maven.apache.org/XDOC/2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
<properties>
<title>
HBase ACID Properties
</title>
</properties>
<body>
<section name="About this Document">
<p>HBase is not an ACID compliant database. However, it does guarantee certain specific
properties.</p>
<p>This specification enumerates the ACID properties of HBase.</p>
</section>
<section name="Definitions">
<p>For the sake of common vocabulary, we define the following terms:</p>
<dl>
<dt>Atomicity</dt>
<dd>an operation is atomic if it either completes entirely or not at all</dd>
<dt>Consistency</dt>
<dd>
all actions cause the table to transition from one valid state directly to another
(eg a row will not disappear during an update, etc)
</dd>
<dt>Isolation</dt>
<dd>
an operation is isolated if it appears to complete independently of any other concurrent transaction
</dd>
<dt>Durability</dt>
<dd>any update that reports &quot;successful&quot; to the client will not be lost</dd>
<dt>Visibility</dt>
<dd>an update is considered visible if any subsequent read will see the update as having been committed</dd>
</dl>
<p>
The terms <em>must</em> and <em>may</em> are used as specified by RFC 2119.
In short, the word &quot;must&quot; implies that, if some case exists where the statement
is not true, it is a bug. The word &quot;may&quot; implies that, even if the guarantee
is provided in a current release, users should not rely on it.
</p>
</section>
<section name="APIs to consider">
<ul>
<li>Read APIs
<ul>
<li>get</li>
<li>scan</li>
</ul>
</li>
<li>Write APIs</li>
<ul>
<li>put</li>
<li>batch put</li>
<li>delete</li>
</ul>
<li>Combination (read-modify-write) APIs</li>
<ul>
<li>incrementColumnValue</li>
<li>checkAndPut</li>
</ul>
</ul>
</section>
<section name="Guarantees Provided">
<section name="Atomicity">
<ol>
<li>All mutations are atomic within a row. Any put will either wholely succeed or wholely fail.[3]</li>
<ol>
<li>An operation that returns a &quot;success&quot; code has completely succeeded.</li>
<li>An operation that returns a &quot;failure&quot; code has completely failed.</li>
<li>An operation that times out may have succeeded and may have failed. However,
it will not have partially succeeded or failed.</li>
</ol>
<li> This is true even if the mutation crosses multiple column families within a row.</li>
<li> APIs that mutate several rows will _not_ be atomic across the multiple rows.
For example, a multiput that operates on rows 'a','b', and 'c' may return having
mutated some but not all of the rows. In such cases, these APIs will return a list
of success codes, each of which may be succeeded, failed, or timed out as described above.</li>
<li> The checkAndPut API happens atomically like the typical compareAndSet (CAS) operation
found in many hardware architectures.</li>
<li> The order of mutations is seen to happen in a well-defined order for each row, with no
interleaving. For example, if one writer issues the mutation &quot;a=1,b=1,c=1&quot; and
another writer issues the mutation &quot;a=2,b=2,c=2&quot;, the row must either
be &quot;a=1,b=1,c=1&quot; or &quot;a=2,b=2,c=2&quot; and must <em>not</em> be something
like &quot;a=1,b=2,c=1&quot;.</li>
<ol>
<li>Please note that this is not true _across rows_ for multirow batch mutations.</li>
</ol>
</ol>
</section>
<section name="Consistency and Isolation">
<ol>
<li>All rows returned via any access API will consist of a complete row that existed at
some point in the table's history.</li>
<li>This is true across column families - i.e a get of a full row that occurs concurrent
with some mutations 1,2,3,4,5 will return a complete row that existed at some point in time
between mutation i and i+1 for some i between 1 and 5.</li>
<li>The state of a row will only move forward through the history of edits to it.</li>
</ol>
<section name="Consistency of Scans">
<p>
A scan is <strong>not</strong> a consistent view of a table. Scans do
<strong>not</strong> exhibit <em>snapshot isolation</em>.
</p>
<p>
Rather, scans have the following properties:
</p>
<ol>
<li>
Any row returned by the scan will be a consistent view (i.e. that version
of the complete row existed at some point in time) [1]
</li>
<li>
A scan will always reflect a view of the data <em>at least as new as</em>
the beginning of the scan. This satisfies the visibility guarantees
enumerated below.</li>
<ol>
<li>For example, if client A writes data X and then communicates via a side
channel to client B, any scans started by client B will contain data at least
as new as X.</li>
<li>A scan _must_ reflect all mutations committed prior to the construction
of the scanner, and _may_ reflect some mutations committed subsequent to the
construction of the scanner.</li>
<li>Scans must include <em>all</em> data written prior to the scan (except in
the case where data is subsequently mutated, in which case it _may_ reflect
the mutation)</li>
</ol>
</ol>
<p>
Those familiar with relational databases will recognize this isolation level as &quot;read committed&quot;.
</p>
<p>
Please note that the guarantees listed above regarding scanner consistency
are referring to &quot;transaction commit time&quot;, not the &quot;timestamp&quot;
field of each cell. That is to say, a scanner started at time <em>t</em> may see edits
with a timestamp value greater than <em>t</em>, if those edits were committed with a
&quot;forward dated&quot; timestamp before the scanner was constructed.
</p>
</section>
</section>
<section name="Visibility">
<ol>
<li> When a client receives a &quot;success&quot; response for any mutation, that
mutation is immediately visible to both that client and any client with whom it
later communicates through side channels. [3]</li>
<li> A row must never exhibit so-called &quot;time-travel&quot; properties. That
is to say, if a series of mutations moves a row sequentially through a series of
states, any sequence of concurrent reads will return a subsequence of those states.</li>
<ol>
<li>For example, if a row's cells are mutated using the &quot;incrementColumnValue&quot;
API, a client must never see the value of any cell decrease.</li>
<li>This is true regardless of which read API is used to read back the mutation.</li>
</ol>
<li> Any version of a cell that has been returned to a read operation is guaranteed to
be durably stored.</li>
</ol>
</section>
<section name="Durability">
<ol>
<li> All visible data is also durable data. That is to say, a read will never return
data that has not been made durable on disk[2]</li>
<li> Any operation that returns a &quot;success&quot; code (eg does not throw an exception)
will be made durable.[3]</li>
<li> Any operation that returns a &quot;failure&quot; code will not be made durable
(subject to the Atomicity guarantees above)</li>
<li> All reasonable failure scenarios will not affect any of the guarantees of this document.</li>
</ol>
</section>
<section name="Tunability">
<p>All of the above guarantees must be possible within HBase. For users who would like to trade
off some guarantees for performance, HBase may offer several tuning options. For example:</p>
<ul>
<li>Visibility may be tuned on a per-read basis to allow stale reads or time travel.</li>
<li>Durability may be tuned to only flush data to disk on a periodic basis</li>
</ul>
</section>
</section>
<section name="More Information">
<p>
For more information, see the <a href="book.html#client">client architecture</a> or <a href="book.html#datamodel">data model</a> sections in the HBase Reference Guide.
</p>
</section>
<section name="Footnotes">
<p>[1] A consistent view is not guaranteed intra-row scanning -- i.e. fetching a portion of
a row in one RPC then going back to fetch another portion of the row in a subsequent RPC.
Intra-row scanning happens when you set a limit on how many values to return per Scan#next
(See <a href="http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html#setBatch(int)">Scan#setBatch(int)</a>).
</p>
<p>[2] In the context of HBase, &quot;durably on disk&quot; implies an hflush() call on the transaction
log. This does not actually imply an fsync() to magnetic media, but rather just that the data has been
written to the OS cache on all replicas of the log. In the case of a full datacenter power loss, it is
possible that the edits are not truly durable.</p>
<p>[3] Puts will either wholely succeed or wholely fail, provided that they are actually sent
to the RegionServer. If the writebuffer is used, Puts will not be sent until the writebuffer is filled
or it is explicitly flushed.</p>
</section>
</body>
</document>

View File

@ -1,31 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright 2010 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<document xmlns="http://maven.apache.org/XDOC/2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
<properties>
<title>
Bulk Loads in HBase
</title>
</properties>
<body>
<p>This page has been retired. The contents have been moved to the
<a href="http://hbase.apache.org/book.html#arch.bulk.load">Bulk Loading</a> section
in the Reference Guide.
</p>
</body>
</document>

View File

@ -1,242 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright 2010 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<document xmlns="http://maven.apache.org/XDOC/2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
<properties>
<title>Installing HBase on Windows using Cygwin</title>
</properties>
<body>
<section name="Introduction">
<p><a title="HBase project" href="http://hbase.apache.org" target="_blank">HBase</a> is a distributed, column-oriented store, modeled after Google's <a title="Google's BigTable" href="http://research.google.com/archive/bigtable.html" target="_blank">BigTable</a>. HBase is built on top of <a title="Hadoop project" href="http://hadoop.apache.org">Hadoop</a> for its <a title="Hadoop MapReduce project" href="http://hadoop.apache.org/mapreduce" target="_blank">MapReduce </a>and <a title="Hadoop DFS project" href="http://hadoop.apache.org/hdfs">distributed file system</a> implementation. All these projects are open-source and part of the <a title="The Apache Software Foundation" href="http://www.apache.org/" target="_blank">Apache Software Foundation</a>.</p>
<p style="text-align: justify; ">As being distributed, large scale platforms, the Hadoop and HBase projects mainly focus on <em><strong>*nix</strong></em><strong> environments</strong> for production installations. However, being developed in <strong>Java</strong>, both projects are fully <strong>portable</strong> across platforms and, hence, also to the <strong>Windows operating system</strong>. For ease of development the projects rely on <a title="Cygwin site" href="http://www.cygwin.com/" target="_blank">Cygwin</a> to have a *nix-like environment on Windows to run the shell scripts.</p>
</section>
<section name="Purpose">
<p style="text-align: justify; ">This document explains the <strong>intricacies of running HBase on Windows using Cygwin</strong> as an all-in-one single-node installation for testing and development. The HBase <a title="HBase Overview" href="http://hbase.apache.org/apidocs/overview-summary.html#overview_description" target="_blank">Overview</a> and <a title="HBase QuickStart" href="http://hbase.apache.org/book/quickstart.html" target="_blank">QuickStart</a> guides on the other hand go a long way in explaning how to setup <a title="HBase project" href="http://hadoop.apache.org/hbase" target="_blank">HBase</a> in more complex deployment scenario's.</p>
</section>
<section name="Installation">
<p style="text-align: justify; ">For running HBase on Windows, 3 technologies are required: <strong>Java, Cygwin and SSH</strong>. The following paragraphs detail the installation of each of the aforementioned technologies.</p>
<section name="Java">
<p style="text-align: justify; ">HBase depends on the <a title="Java Platform, Standard Edition, 6 Release" href="http://java.sun.com/javase/6/" target="_blank">Java Platform, Standard Edition, 6 Release</a>. So the target system has to be provided with at least the Java Runtime Environment (JRE); however if the system will also be used for development, the Jave Development Kit (JDK) is preferred. You can download the latest versions for both from <a title="Java SE Downloads" href="http://java.sun.com/javase/downloads/index.jsp" target="_blank">Sun's download page</a>. Installation is a simple GUI wizard that guides you through the process.</p>
</section>
<section name="Cygwin">
<p style="text-align: justify; ">Cygwin is probably the oddest technology in this solution stack. It provides a dynamic link library that emulates most of a *nix environment on Windows. On top of that a whole bunch of the most common *nix tools are supplied. Combined, the DLL with the tools form a very *nix-alike environment on Windows.</p>
<p style="text-align: justify; ">For installation, Cygwin provides the <a title="Cygwin Setup Utility" href="http://cygwin.com/setup.exe" target="_blank"><strong><code>setup.exe</code> utility</strong></a> that tracks the versions of all installed components on the target system and provides the mechanism for <strong>installing</strong> or <strong>updating </strong>everything from the mirror sites of Cygwin.</p>
<p style="text-align: justify; ">To support installation, the <code>setup.exe</code> utility uses 2 directories on the target system. The <strong>Root</strong> directory for Cygwin (defaults to <code>C:\cygwin)</code> which will become <code>/</code> within the eventual Cygwin installation; and the <strong>Local Package </strong>directory (e.g. <code>C:\cygsetup</code> that is the cache where <code>setup.exe</code> stores the packages before they are installed. The cache must not be the same folder as the Cygwin root.</p>
<p style="text-align: justify; ">Perform following steps to install Cygwin, which are elaboratly detailed in the <a title="Setting Up Cygwin" href="http://cygwin.com/cygwin-ug-net/setup-net.html" target="_self">2nd chapter</a> of the <a title="Cygwin User's Guide" href="http://cygwin.com/cygwin-ug-net/cygwin-ug-net.html" target="_blank">Cygwin User's Guide</a>:</p>
<ol style="text-align: justify; ">
<li>Make sure you have <code>Administrator</code> privileges on the target system.</li>
<li>Choose and create you <strong>Root</strong> and <strong>Local Package</strong> directories. A good suggestion is to use <code>C:\cygwin\root</code> and <code>C:\cygwin\setup</code> folders.</li>
<li>Download the <code>setup.exe</code> utility and save it to the <strong>Local Package</strong> directory.</li>
<li>Run the <code>setup.exe</code> utility,
<ol>
<li>Choose the <code>Install from Internet</code> option,</li>
<li>Choose your <strong>Root</strong> and <strong>Local Package</strong> folders</li>
<li>and select an appropriate mirror.</li>
<li>Don't select any additional packages yet, as we only want to install Cygwin for now.</li>
<li>Wait for download and install</li>
<li>Finish the installation</li>
</ol>
</li>
<li>Optionally, you can now also add a shortcut to your Start menu pointing to the <code>setup.exe</code> utility in the <strong>Local Package </strong>folder.</li>
<li>Add <code>CYGWIN_HOME</code> system-wide environment variable that points to your <strong>Root </strong>directory.</li>
<li>Add <code>%CYGWIN_HOME%\bin</code> to the end of your <code>PATH</code> environment variable.</li>
<li>Reboot the sytem after making changes to the environment variables otherwise the OS will not be able to find the Cygwin utilities.</li>
<li>Test your installation by running your freshly created shortcuts or the <code>Cygwin.bat</code> command in the <strong>Root</strong> folder. You should end up in a terminal window that is running a <a title="Bash Reference Manual" href="http://www.gnu.org/software/bash/manual/bashref.html" target="_blank">Bash shell</a>. Test the shell by issuing following commands:
<ol>
<li><code>cd /</code> should take you to thr <strong>Root</strong> directory in Cygwin;</li>
<li>the <code>LS</code> commands that should list all files and folders in the current directory.</li>
<li>Use the <code>exit</code> command to end the terminal.</li>
</ol>
</li>
<li>When needed, to <strong>uninstall</strong> Cygwin you can simply delete the <strong>Root</strong> and <strong>Local Package</strong> directory, and the <strong>shortcuts</strong> that were created during installation.</li>
</ol>
</section>
<section name="SSH">
<p style="text-align: justify; ">HBase (and Hadoop) rely on <a title="Secure Shell" href="http://nl.wikipedia.org/wiki/Secure_Shell" target="_blank"><strong>SSH</strong></a> for interprocess/-node <strong>communication</strong> and launching<strong> remote commands</strong>. SSH will be provisioned on the target system via Cygwin, which supports running Cygwin programs as <strong>Windows services</strong>!</p>
<ol style="text-align: justify; ">
<li>Rerun the <code><strong>setup.exe</strong></code><strong> utility</strong>.</li>
<li>Leave all parameters as is, skipping through the wizard using the <code>Next</code> button until the <code>Select Packages</code> panel is shown.</li>
<li>Maximize the window and click the <code>View</code> button to toggle to the list view, which is ordered alfabetically on <code>Package</code>, making it easier to find the packages we'll need.</li>
<li>Select the following packages by clicking the status word (normally <code>Skip</code>) so it's marked for installation. Use the <code>Next </code>button to download and install the packages.
<ol>
<li>OpenSSH</li>
<li>tcp_wrappers</li>
<li>diffutils</li>
<li>zlib</li>
</ol>
</li>
<li>Wait for the install to complete and finish the installation.</li>
</ol>
</section>
<section name="HBase">
<p style="text-align: justify; ">Download the <strong>latest release </strong>of HBase from the <a title="HBase Releases" href="http://www.apache.org/dyn/closer.cgi/hbase/" target="_blank">website</a>. As the HBase distributable is just a zipped archive, installation is as simple as unpacking the archive so it ends up in its final <strong>installation</strong> directory. Notice that HBase has to be installed in Cygwin and a good directory suggestion is to use <code>/usr/local/</code> (or [<code><strong>Root</strong> directory]\usr\local</code> in Windows slang). You should end up with a <code>/usr/local/hbase-<em>&lt;version&gt;</em></code> installation in Cygwin.</p>
This finishes installation. We go on with the configuration.
</section>
</section>
<section name="Configuration">
<p style="text-align: justify; ">There are 3 parts left to configure: <strong>Java, SSH and HBase</strong> itself. Following paragraphs explain eacht topic in detail.</p>
<section name="Java">
<p style="text-align: justify; ">One important thing to remember in shell scripting in general (i.e. *nix and Windows) is that managing, manipulating and assembling path names that contains spaces can be very hard, due to the need to escape and quote those characters and strings. So we try to stay away from spaces in path names. *nix environments can help us out here very easily by using <strong>symbolic links</strong>.</p>
<ol style="text-align: justify; ">
<li style="text-align: justify; ">Create a link in <code>/usr/local</code> to the Java home directory by using the following command and substituting the name of your chosen Java environment:
<pre>LN -s /cygdrive/c/Program\ Files/Java/<em>&lt;jre name&gt; </em>/usr/local/<em>&lt;jre name&gt;</em></pre>
</li>
<li>Test your java installation by changing directories to your Java folder <code>CD /usr/local/<em>&lt;jre name&gt;</em></code> and issueing the command <code>./bin/java -version</code>. This should output your version of the chosen JRE.</li>
</ol>
</section>
<section>
<title>SSH</title>
<p style="text-align: justify; ">Configuring <strong>SSH </strong>is quite elaborate, but primarily a question of launching it by default as a<strong> Windows service</strong>.</p>
<ol style="text-align: justify; ">
<li style="text-align: justify; ">On Windows Vista and above make sure you run the Cygwin shell with <strong>elevated privileges</strong>, by right-clicking on the shortcut an using <code>Run as Administrator</code>.</li>
<li style="text-align: justify; ">First of all, we have to make sure the <strong>rights on some crucial files</strong> are correct. Use the commands underneath. You can verify all rights by using the <code>LS -L</code> command on the different files. Also, notice the auto-completion feature in the shell using <code>&lt;TAB&gt;</code> is extremely handy in these situations.
<ol>
<li><code>chmod +r /etc/passwd</code> to make the passwords file readable for all</li>
<li><code>chmod u+w /etc/passwd</code> to make the passwords file writable for the owner</li>
<li><code>chmod +r /etc/group</code> to make the groups file readable for all</li>
</ol>
<ol>
<li><code>chmod u+w /etc/group</code> to make the groups file writable for the owner</li>
</ol>
<ol>
<li><code>chmod 755 /var</code> to make the var folder writable to owner and readable and executable to all</li>
</ol>
</li>
<li>Edit the <strong>/etc/hosts.allow</strong> file using your favorite editor (why not VI in the shell!) and make sure the following two lines are in there before the <code>PARANOID</code> line:
<ol>
<li><code>ALL : localhost 127.0.0.1/32 : allow</code></li>
<li><code>ALL : [::1]/128 : allow</code></li>
</ol>
</li>
<li>Next we have to <strong>configure SSH</strong> by using the script <code>ssh-host-config</code>
<ol>
<li>If this script asks to overwrite an existing <code>/etc/ssh_config</code>, answer <code>yes</code>.</li>
<li>If this script asks to overwrite an existing <code>/etc/sshd_config</code>, answer <code>yes</code>.</li>
<li>If this script asks to use privilege separation, answer <code>yes</code>.</li>
<li>If this script asks to install <code>sshd</code> as a service, answer <code>yes</code>. Make sure you started your shell as Adminstrator!</li>
<li>If this script asks for the CYGWIN value, just <code>&lt;enter&gt;</code> as the default is <code>ntsec</code>.</li>
<li>If this script asks to create the <code>sshd</code> account, answer <code>yes</code>.</li>
<li>If this script asks to use a different user name as service account, answer <code>no</code> as the default will suffice.</li>
<li>If this script asks to create the <code>cyg_server</code> account, answer <code>yes</code>. Enter a password for the account.</li>
</ol>
</li>
<li><strong>Start the SSH service</strong> using <code>net start sshd</code> or <code>cygrunsrv --start sshd</code>. Notice that <code>cygrunsrv</code> is the utility that make the process run as a Windows service. Confirm that you see a message stating that <code>the CYGWIN sshd service was started succesfully.</code></li>
<li>Harmonize Windows and Cygwin<strong> user account</strong> by using the commands:
<ol>
<li><code>mkpasswd -cl &gt; /etc/passwd</code></li>
<li><code>mkgroup --local &gt; /etc/group</code></li>
</ol>
</li>
<li><strong>Test </strong>the installation of SSH:
<ol>
<li>Open a new Cygwin terminal</li>
<li>Use the command <code>whoami</code> to verify your userID</li>
<li>Issue an <code>ssh localhost</code> to connect to the system itself
<ol>
<li>Answer <code>yes</code> when presented with the server's fingerprint</li>
<li>Issue your password when prompted</li>
<li>test a few commands in the remote session</li>
<li>The <code>exit</code> command should take you back to your first shell in Cygwin</li>
</ol>
</li>
<li><code>Exit</code> should terminate the Cygwin shell.</li>
</ol>
</li>
</ol>
</section>
<section name="HBase">
If all previous configurations are working properly, we just need some tinkering at the <strong>HBase config</strong> files to properly resolve on Windows/Cygwin. All files and paths referenced here start from the HBase <code>[<strong>installation</strong> directory]</code> as working directory.
<ol>
<li>HBase uses the <code>./conf/<strong>hbase-env.sh</strong></code> to configure its dependencies on the runtime environment. Copy and uncomment following lines just underneath their original, change them to fit your environemnt. They should read something like:
<ol>
<li><code>export JAVA_HOME=/usr/local/<em>&lt;jre name&gt;</em></code></li>
<li><code>export HBASE_IDENT_STRING=$HOSTNAME</code> as this most likely does not inlcude spaces.</li>
</ol>
</li>
<li>HBase uses the ./conf/<code><strong>hbase-default.xml</strong></code> file for configuration. Some properties do not resolve to existing directories because the JVM runs on Windows. This is the major issue to keep in mind when working with Cygwin: within the shell all paths are *nix-alike, hence relative to the root <code>/</code>. However, every parameter that is to be consumed within the windows processes themself, need to be Windows settings, hence <code>C:\</code>-alike. Change following propeties in the configuration file, adjusting paths where necessary to conform with your own installation:
<ol>
<li><code>hbase.rootdir</code> must read e.g. <code>file:///C:/cygwin/root/tmp/hbase/data</code></li>
<li><code>hbase.tmp.dir</code> must read <code>C:/cygwin/root/tmp/hbase/tmp</code></li>
<li><code>hbase.zookeeper.quorum</code> must read <code>127.0.0.1</code> because for some reason <code>localhost</code> doesn't seem to resolve properly on Cygwin.</li>
</ol>
</li>
<li>Make sure the configured <code>hbase.rootdir</code> and <code>hbase.tmp.dir</code> <strong>directories exist</strong> and have the proper<strong> rights</strong> set up e.g. by issuing a <code>chmod 777</code> on them.</li>
</ol>
</section>
</section>
<section>
<title>Testing</title>
<p>
This should conclude the installation and configuration of HBase on Windows using Cygwin. So it's time <strong>to test it</strong>.
<ol>
<li>Start a Cygwin<strong> terminal</strong>, if you haven't already.</li>
<li>Change directory to HBase <strong>installation</strong> using <code>CD /usr/local/hbase-<em>&lt;version&gt;</em></code>, preferably using auto-completion.</li>
<li><strong>Start HBase</strong> using the command <code>./bin/start-hbase.sh</code>
<ol>
<li>When prompted to accept the SSH fingerprint, answer <code>yes</code>.</li>
<li>When prompted, provide your password. Maybe multiple times.</li>
<li>When the command completes, the HBase server should have started.</li>
<li>However, to be absolutely certain, check the logs in the <code>./logs</code> directory for any exceptions.</li>
</ol>
</li>
<li>Next we <strong>start the HBase shell</strong> using the command <code>./bin/hbase shell</code></li>
<li>We run some simple <strong>test commands</strong>
<ol>
<li>Create a simple table using command <code>create 'test', 'data'</code></li>
<li>Verify the table exists using the command <code>list</code></li>
<li>Insert data into the table using e.g.
<pre>put 'test', 'row1', 'data:1', 'value1'
put 'test', 'row2', 'data:2', 'value2'
put 'test', 'row3', 'data:3', 'value3'</pre>
</li>
<li>List all rows in the table using the command <code>scan 'test'</code> that should list all the rows previously inserted. Notice how 3 new columns where added without changing the schema!</li>
<li>Finally we get rid of the table by issuing <code>disable 'test'</code> followed by <code>drop 'test'</code> and verified by <code>list</code> which should give an empty listing.</li>
</ol>
</li>
<li><strong>Leave the shell</strong> by <code>exit</code></li>
<li>To <strong>stop the HBase server</strong> issue the <code>./bin/stop-hbase.sh</code> command. And wait for it to complete!!! Killing the process might corrupt your data on disk.</li>
<li>In case of <strong>problems</strong>,
<ol>
<li>verify the HBase logs in the <code>./logs</code> directory.</li>
<li>Try to fix the problem</li>
<li>Get help on the forums or IRC (<code>#hbase@freenode.net</code>). People are very active and keen to help out!</li>
<li>Stopr, restart and retest the server.</li>
</ol>
</li>
</ol>
</p>
</section>
<section name="Conclusion">
<p>
Now your <strong>HBase </strong>server is running, <strong>start coding</strong> and build that next killer app on this particular, but scalable datastore!
</p>
</section>
</body>
</document>

View File

@ -1,77 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright 2010 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<document xmlns="http://maven.apache.org/XDOC/2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
<properties>
<title>HBase Home</title>
<link rel="shortcut icon" href="/images/favicon.ico" />
</properties>
<body>
<section name="Welcome to Apache HBase!">
<p>HBase is the <a href="http://hadoop.apache.org">Hadoop</a> database. Think of it as a distributed, scalable, big data store.
</p>
<h4>When Would I Use HBase?</h4>
<p>
Use HBase when you need random, realtime read/write access to your Big Data.
This project's goal is the hosting of very large tables -- billions of rows X millions of columns -- atop clusters of commodity hardware.
HBase is an open-source, distributed, versioned, column-oriented store modeled after Google's <a href="http://research.google.com/archive/bigtable.html">Bigtable: A Distributed Storage System for Structured Data</a> by Chang et al.
Just as Bigtable leverages the distributed data storage provided by the Google File System, HBase provides Bigtable-like capabilities on top of Hadoop and HDFS.
</p>
<h4>Features</h4>
<p>
<ul>
<li>Linear and modular scalability.
</li>
<li>Strictly consistent reads and writes.
</li>
<li>Automatic and configurable sharding of tables
</li>
<li>Automatic failover support between RegionServers.
</li>
<li>Convenient base classes for backing Hadoop MapReduce jobs with HBase tables.
</li>
<li>Easy to use Java API for client access.
</li>
<li>Block cache and Bloom Filters for real-time queries.
</li>
<li>Query predicate push down via server side Filters
</li>
<li>Thrift gateway and a REST-ful Web service that supports XML, Protobuf, and binary data encoding options
</li>
<li>Extensible jruby-based (JIRB) shell
</li>
<li>Support for exporting metrics via the Hadoop metrics subsystem to files or Ganglia; or via JMX
</li>
</ul>
</p>
<h4>Where Can I Get More Information?</h4>
<p>See the <a href="http://hbase.apache.org/book/architecture.html#arch.overview">Architecture Overview</a>, the <a href="http://hbase.apache.org/book/faq.html">Apache HBase Reference Guide FAQ</a>,
and the other documentation links on the left!
</p>
</section>
<section name="News">
<p>June 15th, 2012 <a href="http://www.meetup.com/hbaseusergroup/events/59829652/">Birds-of-a-feather</a> in San Jose, day after <a href="http://hadoopsummit.org">Hadoop Summit</a></p>
<p>May 23rd, 2012 <a href="http://www.meetup.com/hackathon/events/58953522/">HackConAthon</a> in Palo Alto</p>
<p>May 22nd, 2012 <a href="http://www.hbasecon.com">HBaseCon2012</a> in San Francisco</p>
<p><small><a href="old_news.html">Old News</a></small></p>
</section>
</body>
</document>

View File

@ -1,147 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright 2010 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<document xmlns="http://maven.apache.org/XDOC/2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
<properties>
<title>
HBase Metrics
</title>
</properties>
<body>
<section name="Introduction">
<p>
HBase emits Hadoop <a href="http://hadoop.apache.org/core/docs/current/api/org/apache/hadoop/metrics/package-summary.html">metrics</a>.
</p>
</section>
<section name="Setup">
<p>First read up on Hadoop <a href="http://hadoop.apache.org/core/docs/current/api/org/apache/hadoop/metrics/package-summary.html">metrics</a>.
If you are using ganglia, the <a href="http://wiki.apache.org/hadoop/GangliaMetrics">GangliaMetrics</a>
wiki page is useful read.</p>
<p>To have HBase emit metrics, edit <code>$HBASE_HOME/conf/hadoop-metrics.properties</code>
and enable metric 'contexts' per plugin. As of this writing, hadoop supports
<strong>file</strong> and <strong>ganglia</strong> plugins.
Yes, the hbase metrics files is named hadoop-metrics rather than
<em>hbase-metrics</em> because currently at least the hadoop metrics system has the
properties filename hardcoded. Per metrics <em>context</em>,
comment out the NullContext and enable one or more plugins instead.
</p>
<p>
If you enable the <em>hbase</em> context, on regionservers you'll see total requests since last
metric emission, count of regions and storefiles as well as a count of memstore size.
On the master, you'll see a count of the cluster's requests.
</p>
<p>
Enabling the <em>rpc</em> context is good if you are interested in seeing
metrics on each hbase rpc method invocation (counts and time taken).
</p>
<p>
The <em>jvm</em> context is
useful for long-term stats on running hbase jvms -- memory used, thread counts, etc.
As of this writing, if more than one jvm is running emitting metrics, at least
in ganglia, the stats are aggregated rather than reported per instance.
</p>
</section>
<section name="Using with JMX">
<p>
In addition to the standard output contexts supported by the Hadoop
metrics package, you can also export HBase metrics via Java Management
Extensions (JMX). This will allow viewing HBase stats in JConsole or
any other JMX client.
</p>
<section name="Enable HBase stats collection">
<p>
To enable JMX support in HBase, first edit
<code>$HBASE_HOME/conf/hadoop-metrics.properties</code> to support
metrics refreshing. (If you've running 0.94.1 and above, or have already configured
<code>hadoop-metrics.properties</code> for another output context,
you can skip this step).
</p>
<source>
# Configuration of the "hbase" context for null
hbase.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
hbase.period=60
# Configuration of the "jvm" context for null
jvm.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
jvm.period=60
# Configuration of the "rpc" context for null
rpc.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
rpc.period=60
</source>
</section>
<section name="Setup JMX remote access">
<p>
For remote access, you will need to configure JMX remote passwords
and access profiles. Create the files:
</p>
<dl>
<dt><code>$HBASE_HOME/conf/jmxremote.passwd</code> (set permissions
to 600)</dt>
<dd>
<source>
monitorRole monitorpass
controlRole controlpass
</source>
</dd>
<dt><code>$HBASE_HOME/conf/jmxremote.access</code></dt>
<dd>
<source>
monitorRole readonly
controlRole readwrite
</source>
</dd>
</dl>
</section>
<section name="Configure JMX in HBase startup">
<p>
Finally, edit the <code>$HBASE_HOME/conf/hbase-env.sh</code>
script to add JMX support:
</p>
<dl>
<dt><code>$HBASE_HOME/conf/hbase-env.sh</code></dt>
<dd>
<p>Add the lines:</p>
<source>
HBASE_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false"
HBASE_JMX_OPTS="$HBASE_JMX_OPTS -Dcom.sun.management.jmxremote.password.file=$HBASE_HOME/conf/jmxremote.passwd"
HBASE_JMX_OPTS="$HBASE_JMX_OPTS -Dcom.sun.management.jmxremote.access.file=$HBASE_HOME/conf/jmxremote.access"
export HBASE_MASTER_OPTS="$HBASE_JMX_OPTS -Dcom.sun.management.jmxremote.port=10101"
export HBASE_REGIONSERVER_OPTS="$HBASE_JMX_OPTS -Dcom.sun.management.jmxremote.port=10102"
</source>
</dd>
</dl>
<p>
After restarting the processes you want to monitor, you should now be
able to run JConsole (included with the JDK since JDK 5.0) to view
the statistics via JMX. HBase MBeans are exported under the
<strong><code>hadoop</code></strong> domain in JMX.
</p>
</section>
<section name="Understanding HBase Metrics">
<p>
For more information on understanding HBase metrics, see the <a href="book.html#hbase_metrics">metrics section</a> in the HBase Reference Guide.
</p>
</section>
</section>
</body>
</document>

View File

@ -1,66 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright 2010 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN"
"http://forrest.apache.org/dtd/document-v20.dtd">
<document xmlns="http://maven.apache.org/XDOC/2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
<properties>
<title>
Old News
</title>
</properties>
<body>
<section name="Old News">
<p>March 27th, 2012 <a href="http://www.meetup.com/hbaseusergroup/events/56021562/">Meetup @ StumbleUpon</a> in San Francisco</p>
<p>January 19th, 2012 <a href="http://www.meetup.com/hbaseusergroup/events/46702842/">Meetup @ EBay</a></p>
<p>January 23rd, 2012 HBase 0.92.0 released. <a href="http://www.apache.org/dyn/closer.cgi/hbase/">Download it!</a></p>
<p>December 23rd, 2011 HBase 0.90.5 released. <a href="http://www.apache.org/dyn/closer.cgi/hbase/">Download it!</a></p>
<p>November 29th, 2011 <a href="http://www.meetup.com/hackathon/events/41025972/">Developer Pow-Wow in SF</a> at Salesforce HQ</p>
<p>November 7th, 2011 <a href="http://www.meetup.com/hbaseusergroup/events/35682812/">HBase Meetup in NYC (6PM)</a> at the AppNexus office</p>
<p>August 22nd, 2011 <a href="http://www.meetup.com/hbaseusergroup/events/28518471/">HBase Hackathon (11AM) and Meetup (6PM)</a> at FB in PA</p>
<p>June 30th, 2011 <a href="http://www.meetup.com/hbaseusergroup/events/20572251/">HBase Contributor Day</a>, the day after the <a href="http://developer.yahoo.com/events/hadoopsummit2011/">Hadoop Summit</a> hosted by Y!</p>
<p>June 8th, 2011 <a href="http://berlinbuzzwords.de/wiki/hbase-workshop-and-hackathon">HBase Hackathon</a> in Berlin to coincide with <a href="http://berlinbuzzwords.de/">Berlin Buzzwords</a></p>
<p>May 19th, 2011 HBase 0.90.3 released. <a href="http://www.apache.org/dyn/closer.cgi/hbase/">Download it!</a></p>
<p>April 12th, 2011 HBase 0.90.2 released. <a href="http://www.apache.org/dyn/closer.cgi/hbase/">Download it!</a></p>
<p>March 21st, <a href="http://www.meetup.com/hackathon/events/16770852/">HBase 0.92 Hackathon at StumbleUpon, SF</a></p>
<p>February 22nd, <a href="http://www.meetup.com/hbaseusergroup/events/16492913/">HUG12: February HBase User Group at StumbleUpon SF</a></p>
<p>December 13th, <a href="http://www.meetup.com/hackathon/calendar/15597555/">HBase Hackathon: Coprocessor Edition</a></p>
<p>November 19th, <a href="http://huguk.org/">Hadoop HUG in London</a> is all about HBase</p>
<p>November 15-19th, <a href="http://www.devoxx.com/display/Devoxx2K10/Home">Devoxx</a> features HBase Training and multiple HBase presentations</p>
<p>October 12th, HBase-related presentations by core contributors and users at <a href="http://www.cloudera.com/company/press-center/hadoop-world-nyc/">Hadoop World 2010</a></p>
<p>October 11th, <a href="http://www.meetup.com/hbaseusergroup/calendar/14606174/">HUG-NYC: HBase User Group NYC Edition</a> (Night before Hadoop World)</p>
<p>June 30th, <a href="http://www.meetup.com/hbaseusergroup/calendar/13562846/">HBase Contributor Workshop</a> (Day after Hadoop Summit)</p>
<p>May 10th, 2010: HBase graduates from Hadoop sub-project to Apache Top Level Project </p>
<p>Signup for <a href="http://www.meetup.com/hbaseusergroup/calendar/12689490/">HBase User Group Meeting, HUG10</a> hosted by Trend Micro, April 19th, 2010</p>
<p><a href="http://www.meetup.com/hbaseusergroup/calendar/12689351/">HBase User Group Meeting, HUG9</a> hosted by Mozilla, March 10th, 2010</p>
<p>Sign up for the <a href="http://www.meetup.com/hbaseusergroup/calendar/12241393/">HBase User Group Meeting, HUG8</a>, January 27th, 2010 at StumbleUpon in SF</p>
<p>September 8th, 2010: HBase 0.20.0 is faster, stronger, slimmer, and sweeter tasting than any previous HBase release. Get it off the <a href="http://www.apache.org/dyn/closer.cgi/hbase/">Releases</a> page.</p>
<p><a href="http://dev.us.apachecon.com/c/acus2009/">ApacheCon</a> in Oakland: November 2-6th, 2009:
The Apache Foundation will be celebrating its 10th anniversary in beautiful Oakland by the Bay. Lots of good talks and meetups including an HBase presentation by a couple of the lads.</p>
<p>HBase at Hadoop World in NYC: October 2nd, 2009: A few of us will be talking on Practical HBase out east at <a href="http://www.cloudera.com/hadoop-world-nyc">Hadoop World: NYC</a>.</p>
<p>HUG7 and HBase Hackathon: August 7th-9th, 2009 at StumbleUpon in SF: Sign up for the <a href="http://www.meetup.com/hbaseusergroup/calendar/10950511/">HBase User Group Meeting, HUG7</a> or for the <a href="http://www.meetup.com/hackathon/calendar/10951718/">Hackathon</a> or for both (all are welcome!).</p>
<p>June, 2009 -- HBase at HadoopSummit2009 and at NOSQL: See the <a href="http://wiki.apache.org/hadoop/HBase/HBasePresentations">presentations</a></p>
<p>March 3rd, 2009 -- HUG6: <a href="http://www.meetup.com/hbaseusergroup/calendar/9764004/">HBase User Group 6</a></p>
<p>January 30th, 2009 -- LA Hbackathon:<a href="http://www.meetup.com/hbasela/calendar/9450876/">HBase January Hackathon Los Angeles</a> at <a href="http://streamy.com" >Streamy</a> in Manhattan Beach</p>
</section>
</body>
</document>

View File

@ -1,39 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright 2010 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN"
"http://forrest.apache.org/dtd/document-v20.dtd">
<document xmlns="http://maven.apache.org/XDOC/2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
<properties>
<title>
Running HBase in pseudo-distributed mode
</title>
</properties>
<body>
<p>This page has been retired. The contents have been moved to the
<a href="http://hbase.apache.org/book.html#distributed">Distributed Operation: Pseudo- and Fully-distributed modes</a> section
in the Reference Guide.
</p>
</body>
</document>

View File

@ -1,401 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright 2010 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN"
"http://forrest.apache.org/dtd/document-v20.dtd">
<document xmlns="http://maven.apache.org/XDOC/2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
<properties>
<title>
HBase Replication
</title>
</properties>
<body>
<section name="Overview">
<p>
HBase replication is a way to copy data between HBase deployments. It
can serve as a disaster recovery solution and can contribute to provide
higher availability at the HBase layer. It can also serve more practically;
for example, as a way to easily copy edits from a web-facing cluster to a "MapReduce"
cluster which will process old and new data and ship back the results
automatically.
</p>
<p>
The basic architecture pattern used for HBase replication is (HBase cluster) master-push;
it is much easier to keep track of whats currently being replicated since
each region server has its own write-ahead-log (aka WAL or HLog), just like
other well known solutions like MySQL master/slave replication where
theres only one bin log to keep track of. One master cluster can
replicate to any number of slave clusters, and each region server will
participate to replicate their own stream of edits. For more information
on the different properties of master/slave replication and other types
of replication, please consult <a href="http://highscalability.com/blog/2009/8/24/how-google-serves-data-from-multiple-datacenters.html">
How Google Serves Data From Multiple Datacenters</a>.
</p>
<p>
The replication is done asynchronously, meaning that the clusters can
be geographically distant, the links between them can be offline for
some time, and rows inserted on the master cluster wont be
available at the same time on the slave clusters (eventual consistency).
</p>
<p>
The replication format used in this design is conceptually the same as
<a href="http://dev.mysql.com/doc/refman/5.1/en/replication-formats.html">
MySQLs statement-based replication </a>. Instead of SQL statements, whole
WALEdits (consisting of multiple cell inserts coming from the clients'
Put and Delete) are replicated in order to maintain atomicity.
</p>
<p>
The HLogs from each region server are the basis of HBase replication,
and must be kept in HDFS as long as they are needed to replicate data
to any slave cluster. Each RS reads from the oldest log it needs to
replicate and keeps the current position inside ZooKeeper to simplify
failure recovery. That position can be different for every slave
cluster, same for the queue of HLogs to process.
</p>
<p>
The clusters participating in replication can be of asymmetric sizes
and the master cluster will do its “best effort” to balance the stream
of replication on the slave clusters by relying on randomization.
</p>
<p>
As of version 0.92 HBase supports master/master and cyclic replication as
well as replication to multiple slaves.
</p>
<img src="images/replication_overview.png"/>
</section>
<section name="Enabling replication">
<p>
The guide on enabling and using cluster replication is contained
in the API documentation shipped with your HBase distribution.
</p>
<p>
The most up-to-date documentation is
<a href="apidocs/org/apache/hadoop/hbase/replication/package-summary.html#requirements">
available at this address</a>.
</p>
</section>
<section name="Life of a log edit">
<p>
The following sections describe the life of a single edit going from a
client that communicates with a master cluster all the way to a single
slave cluster.
</p>
<section name="Normal processing">
<p>
The client uses a HBase API that sends a Put, Delete or ICV to a region
server. The key values are transformed into a WALEdit by the region
server and is inspected by the replication code that, for each family
that is scoped for replication, adds the scope to the edit. The edit
is appended to the current WAL and is then applied to its MemStore.
</p>
<p>
In a separate thread, the edit is read from the log (as part of a batch)
and only the KVs that are replicable are kept (that is, that they are part
of a family scoped GLOBAL in the family's schema, non-catalog so not
.META. or -ROOT-, and did not originate in the target slave cluster - in
case of cyclic replication).
</p>
<p>
The edit is then tagged with the master's cluster UUID.
When the buffer is filled, or the reader hits the end of the file,
the buffer is sent to a random region server on the slave cluster.
</p>
<p>
Synchronously, the region server that receives the edits reads them
sequentially and separates each of them into buffers, one per table.
Once all edits are read, each buffer is flushed using the normal HBase
client (HTables managed by a HTablePool). This is done in order to
leverage parallel insertion (MultiPut).
The master's cluster UUID is retained in the edits applied at the
slave cluster in order to allow cyclic replication.
</p>
<p>
Back in the master cluster's region server, the offset for the current
WAL that's being replicated is registered in ZooKeeper.
</p>
</section>
<section name="Non-responding slave clusters">
<p>
The edit is inserted in the same way.
</p>
<p>
In the separate thread, the region server reads, filters and buffers
the log edits the same way as during normal processing. The slave
region server that's contacted doesn't answer to the RPC, so the master
region server will sleep and retry up to a configured number of times.
If the slave RS still isn't available, the master cluster RS will select a
new subset of RS to replicate to and will retry sending the buffer of
edits.
</p>
<p>
In the mean time, the WALs will be rolled and stored in a queue in
ZooKeeper. Logs that are archived by their region server (archiving is
basically moving a log from the region server's logs directory to a
central logs archive directory) will update their paths in the in-memory
queue of the replicating thread.
</p>
<p>
When the slave cluster is finally available, the buffer will be applied
the same way as during normal processing. The master cluster RS will then
replicate the backlog of logs.
</p>
</section>
</section>
<section name="Internals">
<p>
This section describes in depth how each of replication's internal
features operate.
</p>
<section name="Choosing region servers to replicate to">
<p>
When a master cluster RS initiates a replication source to a slave cluster,
it first connects to the slave's ZooKeeper ensemble using the provided
cluster key (that key is composed of the value of hbase.zookeeper.quorum,
zookeeper.znode.parent and hbase.zookeeper.property.clientPort). It
then scans the "rs" directory to discover all the available sinks
(region servers that are accepting incoming streams of edits to replicate)
and will randomly choose a subset of them using a configured
ratio (which has a default value of 10%). For example, if a slave
cluster has 150 machines, 15 will be chosen as potential recipient for
edits that this master cluster RS will be sending. Since this is done by all
master cluster RSs, the probability that all slave RSs are used is very high,
and this method works for clusters of any size. For example, a master cluster
of 10 machines replicating to a slave cluster of 5 machines with a ratio
of 10% means that the master cluster RSs will choose one machine each
at random, thus the chance of overlapping and full usage of the slave
cluster is higher.
</p>
</section>
<section name="Keeping track of logs">
<p>
Every master cluster RS has its own znode in the replication znodes hierarchy.
It contains one znode per peer cluster (if 5 slave clusters, 5 znodes
are created), and each of these contain a queue
of HLogs to process. Each of these queues will track the HLogs created
by that RS, but they can differ in size. For example, if one slave
cluster becomes unavailable for some time then the HLogs should not be deleted,
thus they need to stay in the queue (while the others are processed).
See the section named "Region server failover" for an example.
</p>
<p>
When a source is instantiated, it contains the current HLog that the
region server is writing to. During log rolling, the new file is added
to the queue of each slave cluster's znode just before it's made available.
This ensures that all the sources are aware that a new log exists
before HLog is able to append edits into it, but this operations is
now more expensive.
The queue items are discarded when the replication thread cannot read
more entries from a file (because it reached the end of the last block)
and that there are other files in the queue.
This means that if a source is up-to-date and replicates from the log
that the region server writes to, reading up to the "end" of the
current file won't delete the item in the queue.
</p>
<p>
When a log is archived (because it's not used anymore or because there's
too many of them per hbase.regionserver.maxlogs typically because insertion
rate is faster than region flushing), it will notify the source threads that the path
for that log changed. If the a particular source was already done with
it, it will just ignore the message. If it's in the queue, the path
will be updated in memory. If the log is currently being replicated,
the change will be done atomically so that the reader doesn't try to
open the file when it's already moved. Also, moving a file is a NameNode
operation so, if the reader is currently reading the log, it won't
generate any exception.
</p>
</section>
<section name="Reading, filtering and sending edits">
<p>
By default, a source will try to read from a log file and ship log
entries as fast as possible to a sink. This is first limited by the
filtering of log entries; only KeyValues that are scoped GLOBAL and
that don't belong to catalog tables will be retained. A second limit
is imposed on the total size of the list of edits to replicate per slave,
which by default is 64MB. This means that a master cluster RS with 3 slaves
will use at most 192MB to store data to replicate. This doesn't account
the data filtered that wasn't garbage collected.
</p>
<p>
Once the maximum size of edits was buffered or the reader hits the end
of the log file, the source thread will stop reading and will choose
at random a sink to replicate to (from the list that was generated by
keeping only a subset of slave RSs). It will directly issue a RPC to
the chosen machine and will wait for the method to return. If it's
successful, the source will determine if the current file is emptied
or if it should continue to read from it. If the former, it will delete
the znode in the queue. If the latter, it will register the new offset
in the log's znode. If the RPC threw an exception, the source will retry
10 times until trying to find a different sink.
</p>
</section>
<section name="Cleaning logs">
<p>
If replication isn't enabled, the master's logs cleaning thread will
delete old logs using a configured TTL. This doesn't work well with
replication since archived logs passed their TTL may still be in a
queue. Thus, the default behavior is augmented so that if a log is
passed its TTL, the cleaning thread will lookup every queue until it
finds the log (while caching the ones it finds). If it's not found,
the log will be deleted. The next time it has to look for a log,
it will first use its cache.
</p>
</section>
<section name="Region server failover">
<p>
As long as region servers don't fail, keeping track of the logs in ZK
doesn't add any value. Unfortunately, they do fail, so since ZooKeeper
is highly available we can count on it and its semantics to help us
managing the transfer of the queues.
</p>
<p>
All the master cluster RSs keep a watcher on every other one of them to be
notified when one dies (just like the master does). When it happens,
they all race to create a znode called "lock" inside the dead RS' znode
that contains its queues. The one that creates it successfully will
proceed by transferring all the queues to its own znode (one by one
since ZK doesn't support the rename operation) and will delete all the
old ones when it's done. The recovered queues' znodes will be named
with the id of the slave cluster appended with the name of the dead
server.
</p>
<p>
Once that is done, the master cluster RS will create one new source thread per
copied queue, and each of them will follow the read/filter/ship pattern.
The main difference is that those queues will never have new data since
they don't belong to their new region server, which means that when
the reader hits the end of the last log, the queue's znode will be
deleted and the master cluster RS will close that replication source.
</p>
<p>
For example, consider a master cluster with 3 region servers that's
replicating to a single slave with id '2'. The following hierarchy
represents what the znodes layout could be at some point in time. We
can see the RSs' znodes all contain a "peers" znode that contains a
single queue. The znode names in the queues represent the actual file
names on HDFS in the form "address,port.timestamp".
</p>
<pre>
/hbase/replication/rs/
1.1.1.1,60020,123456780/
2/
1.1.1.1,60020.1234 (Contains a position)
1.1.1.1,60020.1265
1.1.1.2,60020,123456790/
2/
1.1.1.2,60020.1214 (Contains a position)
1.1.1.2,60020.1248
1.1.1.2,60020.1312
1.1.1.3,60020, 123456630/
2/
1.1.1.3,60020.1280 (Contains a position)
</pre>
<p>
Now let's say that 1.1.1.2 loses its ZK session. The survivors will race
to create a lock, and for some reasons 1.1.1.3 wins. It will then start
transferring all the queues to its local peers znode by appending the
name of the dead server. Right before 1.1.1.3 is able to clean up the
old znodes, the layout will look like the following:
</p>
<pre>
/hbase/replication/rs/
1.1.1.1,60020,123456780/
2/
1.1.1.1,60020.1234 (Contains a position)
1.1.1.1,60020.1265
1.1.1.2,60020,123456790/
lock
2/
1.1.1.2,60020.1214 (Contains a position)
1.1.1.2,60020.1248
1.1.1.2,60020.1312
1.1.1.3,60020,123456630/
2/
1.1.1.3,60020.1280 (Contains a position)
2-1.1.1.2,60020,123456790/
1.1.1.2,60020.1214 (Contains a position)
1.1.1.2,60020.1248
1.1.1.2,60020.1312
</pre>
<p>
Some time later, but before 1.1.1.3 is able to finish replicating the
last HLog from 1.1.1.2, let's say that it dies too (also some new logs
were created in the normal queues). The last RS will then try to lock
1.1.1.3's znode and will begin transferring all the queues. The new
layout will be:
</p>
<pre>
/hbase/replication/rs/
1.1.1.1,60020,123456780/
2/
1.1.1.1,60020.1378 (Contains a position)
2-1.1.1.3,60020,123456630/
1.1.1.3,60020.1325 (Contains a position)
1.1.1.3,60020.1401
2-1.1.1.2,60020,123456790-1.1.1.3,60020,123456630/
1.1.1.2,60020.1312 (Contains a position)
1.1.1.3,60020,123456630/
lock
2/
1.1.1.3,60020.1325 (Contains a position)
1.1.1.3,60020.1401
2-1.1.1.2,60020,123456790/
1.1.1.2,60020.1312 (Contains a position)
</pre>
</section>
</section>
<section name="FAQ">
<section name="GLOBAL means replicate? Any provision to replicate only to cluster X and not to cluster Y? or is that for later?">
<p>
Yes, this is for much later.
</p>
</section>
<section name="You need a bulk edit shipper? Something that allows you transfer 64MB of edits in one go?">
<p>
You can use the HBase-provided utility called CopyTable from the package
org.apache.hadoop.hbase.mapreduce in order to have a discp-like tool to
bulk copy data.
</p>
</section>
<section name="Is it a mistake that WALEdit doesn't carry Put and Delete objects, that we have to reinstantiate not only when replicating but when replaying edits also?">
<p>
Yes, this behavior would help a lot but it's not currently available
in HBase (BatchUpdate had that, but it was lost in the new API).
</p>
</section>
</section>
<section name="Known bugs/missing features">
<p>
Here's a list of all the jiras that relate to major issues or missing
features in the replication implementation.
</p>
<ol>
<li>
HBASE-2611, basically if a region server dies while recovering the
queues of another dead RS, we will miss the data from the queues
that weren't copied.
</li>
</ol>
</section>
</body>
</document>

View File

@ -1,35 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<document xmlns="http://maven.apache.org/XDOC/2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
<properties>
<title>Installing HBase on Windows using Cygwin</title>
</properties>
<body>
<section name="Sponsors">
<p>The below companies have been gracious enough to provide their commerical tool offerings free of charge to the Apache HBase project.
<ul>
<li>The crew at <a href="http://www.ej-technologies.com/">ej-technologies</a> have
been let us use <a href="http://www.ej-technologies.com/products/jprofiler/overview.html">JProfiler</a> for years now.</li>
<li>The lads at <a href="http://headwaysoftware.com/">headway software</a> have
given us a license for <a href="http://headwaysoftware.com/products/?code=Restructure101">Restructure101</a>
so we can untangle our interdependency mess.</li>
</ul>
</p>
</section>
</body>
</document>