HBASE-639 Add HBaseAdmin.getTableDescriptor function

HBASE-632   HTable.getMetadata is very inefficient
HBASE-654   API HTable.getMetadata().addFamily shouldn't be exposed to user


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@662975 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2008-06-04 04:54:47 +00:00
parent fc7351288a
commit f7dc508514
10 changed files with 242 additions and 23 deletions

View File

@ -38,6 +38,7 @@ Hbase Change Log
HBASE-663 Incorrect sequence number for cache flush
HBASE-655 Need programmatic way to add column family: need programmatic way
to enable/disable table
HBASE-654 API HTable.getMetadata().addFamily shouldn't be exposed to user
IMPROVEMENTS
HBASE-559 MR example job to count table rows
@ -71,6 +72,10 @@ Hbase Change Log
(Clint Morgan via Stack)
HBASE-579 Add hadoop 0.17.x
HBASE-660 [Migration] addColumn/deleteColumn functionality in MetaUtils
HBASE-632 HTable.getMetadata is very inefficient
NEW FEATURES
HBASE-639 Add HBaseAdmin.getTableDescriptor function
Release 0.1.2 - 05/13/2008

View File

@ -31,6 +31,7 @@ public interface HConstants {
static final Long ZERO_L = Long.valueOf(0L);
static final String NINES = "99999999999999";
static final String ZEROES = "00000000000000";
// For migration

View File

@ -182,6 +182,7 @@ public class HTableDescriptor implements WritableComparable {
* descriptors.
* @see #getNameAsString()
*/
@Override
public String toString() {
return "name: " + Bytes.toString(this.name) + ", families: " +
this.families.values();

View File

@ -60,6 +60,14 @@ public interface HConnection {
*/
public HTableDescriptor[] listTables() throws IOException;
/**
* @param tableName
* @return table metadata
* @throws IOException
*/
public HTableDescriptor getHTableDescriptor(byte[] tableName)
throws IOException;
/**
* Find the location of the region of <i>tableName</i> that <i>row</i>
* lives in.

View File

@ -202,9 +202,8 @@ public class HConnectionManager implements HConstants {
if (this.master == null) {
if (masterLocation == null) {
throw new MasterNotRunningException();
} else {
throw new MasterNotRunningException(masterLocation.toString());
}
throw new MasterNotRunningException(masterLocation.toString());
}
return this.master;
}
@ -267,8 +266,11 @@ public class HConnectionManager implements HConstants {
MetaScannerVisitor visitor = new MetaScannerVisitor() {
public boolean processRow(RowResult rowResult,
HRegionLocation metaLocation, HRegionInfo info) throws IOException {
/** {@inheritDoc} */
public boolean processRow(
@SuppressWarnings("unused") RowResult rowResult,
@SuppressWarnings("unused") HRegionLocation metaLocation,
HRegionInfo info) {
// Only examine the rows where the startKey is zero length
if (info.getStartKey().length == 0) {
@ -283,6 +285,40 @@ public class HConnectionManager implements HConstants {
return uniqueTables.toArray(new HTableDescriptor[uniqueTables.size()]);
}
/**
* @param tableName
* @return table metadata
* @throws IOException
*/
public HTableDescriptor getHTableDescriptor(byte[] tableName)
throws IOException {
if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) {
return new UnmodifyableHTableDescriptor(HTableDescriptor.ROOT_TABLEDESC);
}
HTable meta = new HTable(conf,
Bytes.equals(tableName, HConstants.META_TABLE_NAME) ?
HConstants.ROOT_TABLE_NAME : HConstants.META_TABLE_NAME);
Scanner s = meta.getScanner(HConstants.COL_REGIONINFO_ARRAY,
HRegionInfo.createRegionName(tableName, null, HConstants.ZEROES));
try {
RowResult r = null;
while ((r = s.next()) != null) {
Cell c = r.get(HConstants.COL_REGIONINFO);
if (c != null) {
HRegionInfo info = Writables.getHRegionInfoOrNull(c.getValue());
if (info != null) {
if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
return new UnmodifyableHTableDescriptor(info.getTableDesc());
}
}
}
}
return null;
} finally {
s.close();
}
}
/** {@inheritDoc} */
public HRegionLocation locateRegion(final byte [] tableName,
final byte [] row)

View File

@ -270,7 +270,9 @@ public class HTable {
// The root region is always online
return false;
}
HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
HTable meta = new HTable(conf,
Bytes.equals(tableName, HConstants.META_TABLE_NAME) ?
HConstants.ROOT_TABLE_NAME : HConstants.META_TABLE_NAME);
Scanner s = meta.getScanner(HConstants.COL_REGIONINFO_ARRAY,
HRegionInfo.createRegionName(tableName, null, HConstants.NINES));
try {
@ -336,20 +338,12 @@ public class HTable {
}
/**
* TODO: Make the return read-only.
* @return table metadata
* @throws IOException
*/
@Deprecated
public HTableDescriptor getMetadata() throws IOException {
HTableDescriptor [] metas = this.connection.listTables();
HTableDescriptor result = null;
for (int i = 0; i < metas.length; i++) {
if (Bytes.equals(metas[i].getName(), this.tableName)) {
result = metas[i];
break;
}
}
return result;
return this.connection.getHTableDescriptor(this.tableName);
}
/**
@ -388,16 +382,15 @@ public class HTable {
* @return A map of HRegionInfo with it's server address
* @throws IOException
*/
@SuppressWarnings("null")
public Map<HRegionInfo, HServerAddress> getRegionsInfo() throws IOException {
final HashMap<HRegionInfo, HServerAddress> regionMap =
new HashMap<HRegionInfo, HServerAddress>();
MetaScannerVisitor visitor = new MetaScannerVisitor() {
@SuppressWarnings("unused")
public boolean processRow(@SuppressWarnings("unused") RowResult rowResult,
HRegionLocation metaLocation, HRegionInfo info)
throws IOException {
HRegionLocation metaLocation, HRegionInfo hri) {
HRegionInfo info = new UnmodifyableHRegionInfo(hri);
if (!(Bytes.equals(info.getTableDesc().getName(), getTableName()))) {
return false;
}

View File

@ -89,6 +89,7 @@ class MetaScanner implements HConstants {
*
* @param rowResult
* @param metaLocation
* @param info
* @return A boolean to know if it should continue to loop in the region
* @throws IOException
*/

View File

@ -0,0 +1,87 @@
/**
* Copyright 2008 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
class UnmodifyableHRegionInfo extends HRegionInfo {
/* Default constructor - creates empty object */
UnmodifyableHRegionInfo() {
super(new UnmodifyableHTableDescriptor(), null, null);
}
/*
* Construct HRegionInfo with explicit parameters
*
* @param tableDesc the table descriptor
* @param startKey first key in region
* @param endKey end of key range
* @throws IllegalArgumentException
*/
UnmodifyableHRegionInfo(final HTableDescriptor tableDesc,
final byte [] startKey, final byte [] endKey)
throws IllegalArgumentException {
super(new UnmodifyableHTableDescriptor(tableDesc), startKey, endKey, false);
}
/*
* Construct HRegionInfo with explicit parameters
*
* @param tableDesc the table descriptor
* @param startKey first key in region
* @param endKey end of key range
* @param split true if this region has split and we have daughter regions
* regions that may or may not hold references to this region.
* @throws IllegalArgumentException
*/
UnmodifyableHRegionInfo(HTableDescriptor tableDesc,
final byte [] startKey, final byte [] endKey, final boolean split)
throws IllegalArgumentException {
super(new UnmodifyableHTableDescriptor(tableDesc), startKey, endKey, split);
}
/*
* Creates an unmodifyable copy of an HRegionInfo
*
* @param info
*/
UnmodifyableHRegionInfo(HRegionInfo info) {
super(new UnmodifyableHTableDescriptor(info.getTableDesc()),
info.getStartKey(), info.getEndKey(), info.isSplit());
}
/**
* @param split set split status
*/
@Override
public void setSplit(boolean split) {
throw new UnsupportedOperationException("HRegionInfo is read-only");
}
/**
* @param offLine set online - offline status
*/
@Override
public void setOffline(boolean offLine) {
throw new UnsupportedOperationException("HRegionInfo is read-only");
}
}

View File

@ -0,0 +1,89 @@
/**
* Copyright 2008 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.util.Bytes;
class UnmodifyableHTableDescriptor extends HTableDescriptor {
/*
* Constructs an empty object.
* For deserializing an HTableDescriptor instance only.
*/
UnmodifyableHTableDescriptor() {
super();
}
/*
* Constructor.
* @param name Table name.
* @throws IllegalArgumentException if passed a table name
* that is made of other than 'word' characters, underscore or period: i.e.
* <code>[a-zA-Z_0-9.].
* @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
*/
UnmodifyableHTableDescriptor(final String name) {
this(Bytes.toBytes(name));
}
/*
* Constructor.
* @param name Table name.
* @throws IllegalArgumentException if passed a table name
* that is made of other than 'word' characters, underscore or period: i.e.
* <code>[a-zA-Z_0-9.].
* @see <a href="HADOOP-1581">HADOOP-1581 HBASE: Un-openable tablename bug</a>
*/
UnmodifyableHTableDescriptor(final byte [] name) {
super(name);
}
/*
* Create an unmodifyable copy of an HTableDescriptor
* @param desc
*/
UnmodifyableHTableDescriptor(final HTableDescriptor desc) {
super(desc.getName());
for (HColumnDescriptor c: desc.getFamilies()) {
super.addFamily(c);
}
}
/**
* Does NOT add a column family. This object is immutable
* @param family HColumnDescriptor of familyto add.
*/
@Override
public void addFamily(final HColumnDescriptor family) {
throw new UnsupportedOperationException("HTableDescriptor is read-only");
}
/**
* @param column
* @return Column descriptor for the passed family name or the family on
* passed in column.
*/
@Override
public HColumnDescriptor removeFamily(final byte [] column) {
throw new UnsupportedOperationException("HTableDescriptor is read-only");
}
}

View File

@ -22,8 +22,6 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseClusterTestCase;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
@ -39,7 +37,6 @@ import org.apache.hadoop.io.Text;
* Tests HTable
*/
public class TestHTable extends HBaseClusterTestCase implements HConstants {
private static final Log LOG = LogFactory.getLog(TestHTable.class);
private static final HColumnDescriptor column =
new HColumnDescriptor(COLUMN_FAMILY);
@ -84,7 +81,8 @@ public class TestHTable extends HBaseClusterTestCase implements HConstants {
HTable a = new HTable(conf, tableAname);
// Assert the metadata is good.
HTableDescriptor meta = a.getMetadata();
HTableDescriptor meta =
a.getConnection().getHTableDescriptor(tableAdesc.getName());
assertTrue(meta.equals(tableAdesc));
BatchUpdate batchUpdate = new BatchUpdate(row);