HADOOP-1391. Part1: includes create/delete table; enable/disable table; add/remove column.
Patch has been tested locally and a new test has been added for administrative functions. Still to do: merge regions. git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@542592 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
e4194e10e5
commit
f35462842b
|
@ -62,7 +62,7 @@ public abstract class HAbstractScanner implements HInternalScannerInterface {
|
||||||
try {
|
try {
|
||||||
int colpos = column.indexOf(":") + 1;
|
int colpos = column.indexOf(":") + 1;
|
||||||
if(colpos == 0) {
|
if(colpos == 0) {
|
||||||
throw new IllegalArgumentException("Column name has no family indicator.");
|
throw new InvalidColumnNameException("Column name has no family indicator.");
|
||||||
}
|
}
|
||||||
|
|
||||||
String columnkey = column.substring(colpos);
|
String columnkey = column.substring(colpos);
|
||||||
|
|
|
@ -15,11 +15,13 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase;
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import java.lang.Class;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.NoSuchElementException;
|
import java.util.NoSuchElementException;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
import java.util.SortedMap;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
import java.util.TreeSet;
|
import java.util.TreeSet;
|
||||||
|
|
||||||
|
@ -30,6 +32,7 @@ import org.apache.hadoop.io.BytesWritable;
|
||||||
import org.apache.hadoop.io.DataInputBuffer;
|
import org.apache.hadoop.io.DataInputBuffer;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* HClient manages a connection to a single HRegionServer.
|
* HClient manages a connection to a single HRegionServer.
|
||||||
|
@ -41,6 +44,10 @@ public class HClient implements HConstants {
|
||||||
COLUMN_FAMILY
|
COLUMN_FAMILY
|
||||||
};
|
};
|
||||||
|
|
||||||
|
private static final Text[] REGIONINFO = {
|
||||||
|
COL_REGIONINFO
|
||||||
|
};
|
||||||
|
|
||||||
private static final Text EMPTY_START_ROW = new Text();
|
private static final Text EMPTY_START_ROW = new Text();
|
||||||
|
|
||||||
private long clientTimeout;
|
private long clientTimeout;
|
||||||
|
@ -61,11 +68,11 @@ public class HClient implements HConstants {
|
||||||
|
|
||||||
// Map tableName -> (Map startRow -> (HRegionInfo, HServerAddress)
|
// Map tableName -> (Map startRow -> (HRegionInfo, HServerAddress)
|
||||||
|
|
||||||
private TreeMap<Text, TreeMap<Text, TableInfo>> tablesToServers;
|
private TreeMap<Text, SortedMap<Text, TableInfo>> tablesToServers;
|
||||||
|
|
||||||
// For the "current" table: Map startRow -> (HRegionInfo, HServerAddress)
|
// For the "current" table: Map startRow -> (HRegionInfo, HServerAddress)
|
||||||
|
|
||||||
private TreeMap<Text, TableInfo> tableServers;
|
private SortedMap<Text, TableInfo> tableServers;
|
||||||
|
|
||||||
// Known region HServerAddress.toString() -> HRegionInterface
|
// Known region HServerAddress.toString() -> HRegionInterface
|
||||||
|
|
||||||
|
@ -87,21 +94,44 @@ public class HClient implements HConstants {
|
||||||
this.numRetries = conf.getInt("hbase.client.retries.number", 2);
|
this.numRetries = conf.getInt("hbase.client.retries.number", 2);
|
||||||
|
|
||||||
this.master = null;
|
this.master = null;
|
||||||
this.tablesToServers = new TreeMap<Text, TreeMap<Text, TableInfo>>();
|
this.tablesToServers = new TreeMap<Text, SortedMap<Text, TableInfo>>();
|
||||||
this.tableServers = null;
|
this.tableServers = null;
|
||||||
this.servers = new TreeMap<String, HRegionInterface>();
|
this.servers = new TreeMap<String, HRegionInterface>();
|
||||||
|
|
||||||
// For row mutation operations
|
// For row mutation operations
|
||||||
|
|
||||||
this.currentRegion = null;
|
this.currentRegion = null;
|
||||||
this.currentServer = null;
|
this.currentServer = null;
|
||||||
this.rand = new Random();
|
this.rand = new Random();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
private void handleRemoteException(RemoteException e) throws IOException {
|
||||||
* Find the address of the master and connect to it
|
String msg = e.getMessage();
|
||||||
*/
|
if(e.getClassName().equals("org.apache.hadoop.hbase.InvalidColumnNameException")) {
|
||||||
private void checkMaster() {
|
throw new InvalidColumnNameException(msg);
|
||||||
|
|
||||||
|
} else if(e.getClassName().equals("org.apache.hadoop.hbase.LockException")) {
|
||||||
|
throw new LockException(msg);
|
||||||
|
|
||||||
|
} else if(e.getClassName().equals("org.apache.hadoop.hbase.MasterNotRunningException")) {
|
||||||
|
throw new MasterNotRunningException(msg);
|
||||||
|
|
||||||
|
} else if(e.getClassName().equals("org.apache.hadoop.hbase.NoServerForRegionException")) {
|
||||||
|
throw new NoServerForRegionException(msg);
|
||||||
|
|
||||||
|
} else if(e.getClassName().equals("org.apache.hadoop.hbase.NotServingRegionException")) {
|
||||||
|
throw new NotServingRegionException(msg);
|
||||||
|
|
||||||
|
} else if(e.getClassName().equals("org.apache.hadoop.hbase.TableNotDisabledException")) {
|
||||||
|
throw new TableNotDisabledException(msg);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Find the address of the master and connect to it */
|
||||||
|
private void checkMaster() throws IOException {
|
||||||
if (this.master != null) {
|
if (this.master != null) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -136,103 +166,406 @@ public class HClient implements HConstants {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(this.master == null) {
|
if(this.master == null) {
|
||||||
throw new IllegalStateException("Master is not running");
|
throw new MasterNotRunningException();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Administrative methods
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new table
|
||||||
|
*
|
||||||
|
* @param desc - table descriptor for table
|
||||||
|
*
|
||||||
|
* @throws IllegalArgumentException - if the table name is reserved
|
||||||
|
* @throws MasterNotRunningException - if master is not running
|
||||||
|
* @throws NoServerForRegionException - if root region is not being served
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
public synchronized void createTable(HTableDescriptor desc) throws IOException {
|
public synchronized void createTable(HTableDescriptor desc) throws IOException {
|
||||||
if(desc.getName().equals(ROOT_TABLE_NAME)
|
checkReservedTableName(desc.getName());
|
||||||
|| desc.getName().equals(META_TABLE_NAME)) {
|
|
||||||
|
|
||||||
throw new IllegalArgumentException(desc.getName().toString()
|
|
||||||
+ " is a reserved table name");
|
|
||||||
}
|
|
||||||
checkMaster();
|
checkMaster();
|
||||||
locateRootRegion();
|
try {
|
||||||
this.master.createTable(desc);
|
this.master.createTable(desc);
|
||||||
|
|
||||||
|
} catch(RemoteException e) {
|
||||||
|
handleRemoteException(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the current table
|
||||||
|
|
||||||
|
SortedMap<Text, TableInfo> oldServers = this.tableServers;
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Wait for new table to come on-line
|
||||||
|
|
||||||
|
findServersForTable(desc.getName());
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
if(oldServers != null && oldServers.size() != 0) {
|
||||||
|
// Restore old current table if there was one
|
||||||
|
|
||||||
|
this.tableServers = oldServers;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void deleteTable(Text tableName) throws IOException {
|
public synchronized void deleteTable(Text tableName) throws IOException {
|
||||||
|
checkReservedTableName(tableName);
|
||||||
checkMaster();
|
checkMaster();
|
||||||
locateRootRegion();
|
TableInfo firstMetaServer = getFirstMetaServerForTable(tableName);
|
||||||
this.master.deleteTable(tableName);
|
|
||||||
|
try {
|
||||||
|
this.master.deleteTable(tableName);
|
||||||
|
|
||||||
|
} catch(RemoteException e) {
|
||||||
|
handleRemoteException(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until first region is deleted
|
||||||
|
|
||||||
|
HRegionInterface server = getHRegionConnection(firstMetaServer.serverAddress);
|
||||||
|
|
||||||
|
DataInputBuffer inbuf = new DataInputBuffer();
|
||||||
|
HStoreKey key = new HStoreKey();
|
||||||
|
HRegionInfo info = new HRegionInfo();
|
||||||
|
for(int tries = 0; tries < numRetries; tries++) {
|
||||||
|
long scannerId = -1L;
|
||||||
|
try {
|
||||||
|
scannerId = server.openScanner(firstMetaServer.regionInfo.regionName,
|
||||||
|
REGIONINFO, tableName);
|
||||||
|
LabelledData[] values = server.next(scannerId, key);
|
||||||
|
if(values == null || values.length == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
boolean found = false;
|
||||||
|
for(int j = 0; j < values.length; j++) {
|
||||||
|
if(values[j].getLabel().equals(COL_REGIONINFO)) {
|
||||||
|
byte[] bytes = new byte[values[j].getData().getSize()];
|
||||||
|
System.arraycopy(values[j].getData().get(), 0, bytes, 0, bytes.length);
|
||||||
|
inbuf.reset(bytes, bytes.length);
|
||||||
|
info.readFields(inbuf);
|
||||||
|
if(info.tableDesc.getName().equals(tableName)) {
|
||||||
|
found = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(!found) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
if(scannerId != -1L) {
|
||||||
|
try {
|
||||||
|
server.close(scannerId);
|
||||||
|
|
||||||
|
} catch(Exception e) {
|
||||||
|
LOG.warn(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Sleep. Waiting for first region to be deleted from " + tableName);
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
Thread.sleep(clientTimeout);
|
||||||
|
|
||||||
|
} catch(InterruptedException e) {
|
||||||
|
}
|
||||||
|
if(LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Wake. Waiting for first region to be deleted from " + tableName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("table deleted " + tableName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized void addColumn(Text tableName, HColumnDescriptor column) throws IOException {
|
||||||
|
checkReservedTableName(tableName);
|
||||||
|
checkMaster();
|
||||||
|
try {
|
||||||
|
this.master.addColumn(tableName, column);
|
||||||
|
|
||||||
|
} catch(RemoteException e) {
|
||||||
|
handleRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized void deleteColumn(Text tableName, Text columnName) throws IOException {
|
||||||
|
checkReservedTableName(tableName);
|
||||||
|
checkMaster();
|
||||||
|
try {
|
||||||
|
this.master.deleteColumn(tableName, columnName);
|
||||||
|
|
||||||
|
} catch(RemoteException e) {
|
||||||
|
handleRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized void mergeRegions(Text regionName1, Text regionName2) throws IOException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized void enableTable(Text tableName) throws IOException {
|
||||||
|
checkReservedTableName(tableName);
|
||||||
|
checkMaster();
|
||||||
|
TableInfo firstMetaServer = getFirstMetaServerForTable(tableName);
|
||||||
|
|
||||||
|
try {
|
||||||
|
this.master.enableTable(tableName);
|
||||||
|
|
||||||
|
} catch(RemoteException e) {
|
||||||
|
handleRemoteException(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until first region is enabled
|
||||||
|
|
||||||
|
HRegionInterface server = getHRegionConnection(firstMetaServer.serverAddress);
|
||||||
|
|
||||||
|
DataInputBuffer inbuf = new DataInputBuffer();
|
||||||
|
HStoreKey key = new HStoreKey();
|
||||||
|
HRegionInfo info = new HRegionInfo();
|
||||||
|
for(int tries = 0; tries < numRetries; tries++) {
|
||||||
|
int valuesfound = 0;
|
||||||
|
long scannerId = -1L;
|
||||||
|
try {
|
||||||
|
scannerId = server.openScanner(firstMetaServer.regionInfo.regionName,
|
||||||
|
REGIONINFO, tableName);
|
||||||
|
LabelledData[] values = server.next(scannerId, key);
|
||||||
|
if(values == null || values.length == 0) {
|
||||||
|
if(valuesfound == 0) {
|
||||||
|
throw new NoSuchElementException("table " + tableName + " not found");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
valuesfound += 1;
|
||||||
|
boolean isenabled = false;
|
||||||
|
for(int j = 0; j < values.length; j++) {
|
||||||
|
if(values[j].getLabel().equals(COL_REGIONINFO)) {
|
||||||
|
byte[] bytes = new byte[values[j].getData().getSize()];
|
||||||
|
System.arraycopy(values[j].getData().get(), 0, bytes, 0, bytes.length);
|
||||||
|
inbuf.reset(bytes, bytes.length);
|
||||||
|
info.readFields(inbuf);
|
||||||
|
isenabled = !info.offLine;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(isenabled) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
if(scannerId != -1L) {
|
||||||
|
try {
|
||||||
|
server.close(scannerId);
|
||||||
|
|
||||||
|
} catch(Exception e) {
|
||||||
|
LOG.warn(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Sleep. Waiting for first region to be enabled from " + tableName);
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
Thread.sleep(clientTimeout);
|
||||||
|
|
||||||
|
} catch(InterruptedException e) {
|
||||||
|
}
|
||||||
|
if(LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Wake. Waiting for first region to be enabled from " + tableName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized void disableTable(Text tableName) throws IOException {
|
||||||
|
checkReservedTableName(tableName);
|
||||||
|
checkMaster();
|
||||||
|
TableInfo firstMetaServer = getFirstMetaServerForTable(tableName);
|
||||||
|
|
||||||
|
try {
|
||||||
|
this.master.disableTable(tableName);
|
||||||
|
|
||||||
|
} catch(RemoteException e) {
|
||||||
|
handleRemoteException(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until first region is disabled
|
||||||
|
|
||||||
|
HRegionInterface server = getHRegionConnection(firstMetaServer.serverAddress);
|
||||||
|
|
||||||
|
DataInputBuffer inbuf = new DataInputBuffer();
|
||||||
|
HStoreKey key = new HStoreKey();
|
||||||
|
HRegionInfo info = new HRegionInfo();
|
||||||
|
for(int tries = 0; tries < numRetries; tries++) {
|
||||||
|
int valuesfound = 0;
|
||||||
|
long scannerId = -1L;
|
||||||
|
try {
|
||||||
|
scannerId = server.openScanner(firstMetaServer.regionInfo.regionName,
|
||||||
|
REGIONINFO, tableName);
|
||||||
|
LabelledData[] values = server.next(scannerId, key);
|
||||||
|
if(values == null || values.length == 0) {
|
||||||
|
if(valuesfound == 0) {
|
||||||
|
throw new NoSuchElementException("table " + tableName + " not found");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
valuesfound += 1;
|
||||||
|
boolean disabled = false;
|
||||||
|
for(int j = 0; j < values.length; j++) {
|
||||||
|
if(values[j].getLabel().equals(COL_REGIONINFO)) {
|
||||||
|
byte[] bytes = new byte[values[j].getData().getSize()];
|
||||||
|
System.arraycopy(values[j].getData().get(), 0, bytes, 0, bytes.length);
|
||||||
|
inbuf.reset(bytes, bytes.length);
|
||||||
|
info.readFields(inbuf);
|
||||||
|
disabled = info.offLine;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(disabled) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
if(scannerId != -1L) {
|
||||||
|
try {
|
||||||
|
server.close(scannerId);
|
||||||
|
|
||||||
|
} catch(Exception e) {
|
||||||
|
LOG.warn(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Sleep. Waiting for first region to be disabled from " + tableName);
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
Thread.sleep(clientTimeout);
|
||||||
|
|
||||||
|
} catch(InterruptedException e) {
|
||||||
|
}
|
||||||
|
if(LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Wake. Waiting for first region to be disabled from " + tableName);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void shutdown() throws IOException {
|
public synchronized void shutdown() throws IOException {
|
||||||
checkMaster();
|
checkMaster();
|
||||||
this.master.shutdown();
|
this.master.shutdown();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Verifies that the specified table name is not a reserved name
|
||||||
|
* @param tableName - the table name to be checked
|
||||||
|
* @throws IllegalArgumentException - if the table name is reserved
|
||||||
|
*/
|
||||||
|
private void checkReservedTableName(Text tableName) {
|
||||||
|
if(tableName.equals(ROOT_TABLE_NAME)
|
||||||
|
|| tableName.equals(META_TABLE_NAME)) {
|
||||||
|
|
||||||
|
throw new IllegalArgumentException(tableName + " is a reserved table name");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private TableInfo getFirstMetaServerForTable(Text tableName) throws IOException {
|
||||||
|
SortedMap<Text, TableInfo> metaservers = findMetaServersForTable(tableName);
|
||||||
|
return metaservers.get(metaservers.firstKey());
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Client API
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Loads information so that a table can be manipulated.
|
||||||
|
*
|
||||||
|
* @param tableName - the table to be located
|
||||||
|
* @throws IOException - if the table can not be located after retrying
|
||||||
|
*/
|
||||||
public synchronized void openTable(Text tableName) throws IOException {
|
public synchronized void openTable(Text tableName) throws IOException {
|
||||||
if(tableName == null || tableName.getLength() == 0) {
|
if(tableName == null || tableName.getLength() == 0) {
|
||||||
throw new IllegalArgumentException("table name cannot be null or zero length");
|
throw new IllegalArgumentException("table name cannot be null or zero length");
|
||||||
}
|
}
|
||||||
this.tableServers = tablesToServers.get(tableName);
|
this.tableServers = tablesToServers.get(tableName);
|
||||||
if(this.tableServers == null ) { // We don't know where the table is
|
if(this.tableServers == null ) {
|
||||||
findTableInMeta(tableName); // Load the information from meta
|
// We don't know where the table is.
|
||||||
|
// Load the information from meta.
|
||||||
|
this.tableServers = findServersForTable(tableName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void findTableInMeta(Text tableName) throws IOException {
|
/*
|
||||||
TreeMap<Text, TableInfo> metaServers =
|
* Locates a table by searching the META region
|
||||||
|
*
|
||||||
|
* @param tableName - name of table to find servers for
|
||||||
|
* @return - map of first row to table info for all regions in the table
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
private SortedMap<Text, TableInfo> findServersForTable(Text tableName)
|
||||||
|
throws IOException {
|
||||||
|
|
||||||
|
SortedMap<Text, TableInfo> servers = null;
|
||||||
|
if(tableName.equals(ROOT_TABLE_NAME)) {
|
||||||
|
servers = locateRootRegion();
|
||||||
|
|
||||||
|
} else if(tableName.equals(META_TABLE_NAME)) {
|
||||||
|
servers = loadMetaFromRoot();
|
||||||
|
|
||||||
|
} else {
|
||||||
|
servers = new TreeMap<Text, TableInfo>();
|
||||||
|
for(TableInfo t: findMetaServersForTable(tableName).values()) {
|
||||||
|
servers.putAll(scanOneMetaRegion(t, tableName));
|
||||||
|
}
|
||||||
|
this.tablesToServers.put(tableName, servers);
|
||||||
|
}
|
||||||
|
return servers;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Finds the meta servers that contain information about the specified table
|
||||||
|
* @param tableName - the name of the table to get information about
|
||||||
|
* @return - returns a SortedMap of the meta servers
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
private SortedMap<Text, TableInfo> findMetaServersForTable(Text tableName)
|
||||||
|
throws IOException {
|
||||||
|
|
||||||
|
SortedMap<Text, TableInfo> metaServers =
|
||||||
this.tablesToServers.get(META_TABLE_NAME);
|
this.tablesToServers.get(META_TABLE_NAME);
|
||||||
|
|
||||||
if (metaServers == null) { // Don't know where the meta is
|
if(metaServers == null) { // Don't know where the meta is
|
||||||
loadMetaFromRoot(tableName);
|
metaServers = loadMetaFromRoot();
|
||||||
if (tableName.equals(META_TABLE_NAME) || tableName.equals(ROOT_TABLE_NAME)) {
|
|
||||||
// All we really wanted was the meta or root table
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
metaServers = this.tablesToServers.get(META_TABLE_NAME);
|
|
||||||
}
|
}
|
||||||
|
Text firstMetaRegion = (metaServers.containsKey(tableName)) ?
|
||||||
|
tableName : metaServers.headMap(tableName).lastKey();
|
||||||
|
|
||||||
this.tableServers = new TreeMap<Text, TableInfo>();
|
return metaServers.tailMap(firstMetaRegion);
|
||||||
for(int tries = 0;
|
|
||||||
this.tableServers.size() == 0 && tries < this.numRetries;
|
|
||||||
tries++) {
|
|
||||||
Text firstMetaRegion = (metaServers.containsKey(tableName))?
|
|
||||||
tableName: metaServers.headMap(tableName).lastKey();
|
|
||||||
for(TableInfo t: metaServers.tailMap(firstMetaRegion).values()) {
|
|
||||||
scanOneMetaRegion(t, tableName);
|
|
||||||
}
|
|
||||||
if (this.tableServers.size() == 0) {
|
|
||||||
// Table not assigned. Sleep and try again
|
|
||||||
if (LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("Sleeping. Table " + tableName
|
|
||||||
+ " not currently being served.");
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
Thread.sleep(this.clientTimeout);
|
|
||||||
} catch(InterruptedException e) {
|
|
||||||
}
|
|
||||||
if(LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("Wake. Retry finding table " + tableName);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (this.tableServers.size() == 0) {
|
|
||||||
throw new IOException("failed to scan " + META_TABLE_NAME + " after "
|
|
||||||
+ this.numRetries + " retries");
|
|
||||||
}
|
|
||||||
this.tablesToServers.put(tableName, this.tableServers);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Load the meta table from the root table.
|
* Load the meta table from the root table.
|
||||||
|
*
|
||||||
|
* @return map of first row to TableInfo for all meta regions
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private void loadMetaFromRoot(Text tableName) throws IOException {
|
private TreeMap<Text, TableInfo> loadMetaFromRoot() throws IOException {
|
||||||
locateRootRegion();
|
SortedMap<Text, TableInfo> rootRegion =
|
||||||
if(tableName.equals(ROOT_TABLE_NAME)) { // All we really wanted was the root
|
this.tablesToServers.get(ROOT_TABLE_NAME);
|
||||||
return;
|
|
||||||
|
if(rootRegion == null) {
|
||||||
|
rootRegion = locateRootRegion();
|
||||||
}
|
}
|
||||||
scanRoot();
|
return scanRoot(rootRegion.get(rootRegion.firstKey()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Repeatedly try to find the root region by asking the HMaster for where it
|
* Repeatedly try to find the root region by asking the master for where it is
|
||||||
* could be.
|
*
|
||||||
|
* @return TreeMap<Text, TableInfo> for root regin if found
|
||||||
|
* @throws NoServerForRegionException - if the root region can not be located after retrying
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private void locateRootRegion() throws IOException {
|
private TreeMap<Text, TableInfo> locateRootRegion() throws IOException {
|
||||||
checkMaster();
|
checkMaster();
|
||||||
|
|
||||||
HServerAddress rootRegionLocation = null;
|
HServerAddress rootRegionLocation = null;
|
||||||
|
@ -256,18 +589,14 @@ public class HClient implements HConstants {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(rootRegionLocation == null) {
|
if(rootRegionLocation == null) {
|
||||||
throw new IOException("Timed out trying to locate root region");
|
throw new NoServerForRegionException(
|
||||||
|
"Timed out trying to locate root region");
|
||||||
}
|
}
|
||||||
|
|
||||||
HRegionInterface rootRegion = getHRegionConnection(rootRegionLocation);
|
HRegionInterface rootRegion = getHRegionConnection(rootRegionLocation);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
rootRegion.getRegionInfo(HGlobals.rootRegionInfo.regionName);
|
rootRegion.getRegionInfo(HGlobals.rootRegionInfo.regionName);
|
||||||
this.tableServers = new TreeMap<Text, TableInfo>();
|
|
||||||
this.tableServers.put(EMPTY_START_ROW,
|
|
||||||
new TableInfo(HGlobals.rootRegionInfo, rootRegionLocation));
|
|
||||||
|
|
||||||
this.tablesToServers.put(ROOT_TABLE_NAME, this.tableServers);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
} catch(NotServingRegionException e) {
|
} catch(NotServingRegionException e) {
|
||||||
|
@ -293,120 +622,149 @@ public class HClient implements HConstants {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rootRegionLocation == null) {
|
if (rootRegionLocation == null) {
|
||||||
throw new IOException("unable to locate root region server");
|
throw new NoServerForRegionException(
|
||||||
|
"unable to locate root region server");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TreeMap<Text, TableInfo> rootServer = new TreeMap<Text, TableInfo>();
|
||||||
|
rootServer.put(EMPTY_START_ROW,
|
||||||
|
new TableInfo(HGlobals.rootRegionInfo, rootRegionLocation));
|
||||||
|
|
||||||
|
this.tablesToServers.put(ROOT_TABLE_NAME, rootServer);
|
||||||
|
return rootServer;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Scans the root region to find all the meta regions
|
* Scans the root region to find all the meta regions
|
||||||
|
* @return - TreeMap of meta region servers
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private void scanRoot() throws IOException {
|
private TreeMap<Text, TableInfo> scanRoot(TableInfo rootRegion)
|
||||||
this.tableServers = new TreeMap<Text, TableInfo>();
|
throws IOException {
|
||||||
TableInfo t = this.tablesToServers.get(ROOT_TABLE_NAME).get(EMPTY_START_ROW);
|
|
||||||
for(int tries = 0;
|
TreeMap<Text, TableInfo> metaservers =
|
||||||
scanOneMetaRegion(t, META_TABLE_NAME) == 0 && tries < this.numRetries;
|
scanOneMetaRegion(rootRegion, META_TABLE_NAME);
|
||||||
tries++) {
|
this.tablesToServers.put(META_TABLE_NAME, metaservers);
|
||||||
|
return metaservers;
|
||||||
// The table is not yet being served. Sleep and retry.
|
|
||||||
|
|
||||||
if(LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("Sleeping. Table " + META_TABLE_NAME
|
|
||||||
+ " not currently being served.");
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
Thread.sleep(this.clientTimeout);
|
|
||||||
|
|
||||||
} catch(InterruptedException e) {
|
|
||||||
}
|
|
||||||
if(LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("Wake. Retry finding table " + META_TABLE_NAME);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if(this.tableServers.size() == 0) {
|
|
||||||
throw new IOException("failed to scan " + ROOT_TABLE_NAME + " after "
|
|
||||||
+ this.numRetries + " retries");
|
|
||||||
}
|
|
||||||
this.tablesToServers.put(META_TABLE_NAME, this.tableServers);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Scans a single meta region
|
* Scans a single meta region
|
||||||
* @param t the table we're going to scan
|
* @param t the meta region we're going to scan
|
||||||
* @param tableName the name of the table we're looking for
|
* @param tableName the name of the table we're looking for
|
||||||
* @return returns the number of servers that are serving the table
|
* @return returns a map of startingRow to TableInfo
|
||||||
|
* @throws NoSuchElementException - if table does not exist
|
||||||
|
* @throws IllegalStateException - if table is offline
|
||||||
|
* @throws NoServerForRegionException - if table can not be found after retrying
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private int scanOneMetaRegion(TableInfo t, Text tableName)
|
private TreeMap<Text, TableInfo> scanOneMetaRegion(TableInfo t, Text tableName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
HRegionInterface server = getHRegionConnection(t.serverAddress);
|
HRegionInterface server = getHRegionConnection(t.serverAddress);
|
||||||
int servers = 0;
|
TreeMap<Text, TableInfo> servers = new TreeMap<Text, TableInfo>();
|
||||||
long scannerId = -1L;
|
for(int tries = 0; servers.size() == 0 && tries < this.numRetries;
|
||||||
try {
|
tries++) {
|
||||||
scannerId =
|
|
||||||
server.openScanner(t.regionInfo.regionName, META_COLUMNS, tableName);
|
long scannerId = -1L;
|
||||||
|
try {
|
||||||
DataInputBuffer inbuf = new DataInputBuffer();
|
scannerId =
|
||||||
while(true) {
|
server.openScanner(t.regionInfo.regionName, META_COLUMNS, tableName);
|
||||||
HRegionInfo regionInfo = null;
|
|
||||||
String serverAddress = null;
|
DataInputBuffer inbuf = new DataInputBuffer();
|
||||||
HStoreKey key = new HStoreKey();
|
while(true) {
|
||||||
LabelledData[] values = server.next(scannerId, key);
|
HRegionInfo regionInfo = null;
|
||||||
if(values.length == 0) {
|
String serverAddress = null;
|
||||||
if(servers == 0) {
|
HStoreKey key = new HStoreKey();
|
||||||
// If we didn't find any servers then the table does not exist
|
LabelledData[] values = server.next(scannerId, key);
|
||||||
|
if(values.length == 0) {
|
||||||
throw new NoSuchElementException("table '" + tableName
|
if(servers.size() == 0) {
|
||||||
+ "' does not exist");
|
// If we didn't find any servers then the table does not exist
|
||||||
|
|
||||||
|
throw new NoSuchElementException("table '" + tableName
|
||||||
|
+ "' does not exist");
|
||||||
|
}
|
||||||
|
|
||||||
|
// We found at least one server for the table and now we're done.
|
||||||
|
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// We found at least one server for the table and now we're done.
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
byte[] bytes = null;
|
byte[] bytes = null;
|
||||||
TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
||||||
for(int i = 0; i < values.length; i++) {
|
for(int i = 0; i < values.length; i++) {
|
||||||
bytes = new byte[values[i].getData().getSize()];
|
bytes = new byte[values[i].getData().getSize()];
|
||||||
System.arraycopy(values[i].getData().get(), 0, bytes, 0, bytes.length);
|
System.arraycopy(values[i].getData().get(), 0, bytes, 0, bytes.length);
|
||||||
results.put(values[i].getLabel(), bytes);
|
results.put(values[i].getLabel(), bytes);
|
||||||
}
|
|
||||||
regionInfo = new HRegionInfo();
|
|
||||||
bytes = results.get(COL_REGIONINFO);
|
|
||||||
inbuf.reset(bytes, bytes.length);
|
|
||||||
regionInfo.readFields(inbuf);
|
|
||||||
|
|
||||||
if(!regionInfo.tableDesc.getName().equals(tableName)) {
|
|
||||||
// We're done
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
bytes = results.get(COL_SERVER);
|
|
||||||
if(bytes == null || bytes.length == 0) {
|
|
||||||
// We need to rescan because the table we want is unassigned.
|
|
||||||
|
|
||||||
if(LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("no server address for " + regionInfo.toString());
|
|
||||||
}
|
}
|
||||||
servers = 0;
|
regionInfo = new HRegionInfo();
|
||||||
this.tableServers.clear();
|
bytes = results.get(COL_REGIONINFO);
|
||||||
break;
|
inbuf.reset(bytes, bytes.length);
|
||||||
}
|
regionInfo.readFields(inbuf);
|
||||||
servers += 1;
|
|
||||||
serverAddress = new String(bytes, UTF8_ENCODING);
|
|
||||||
|
|
||||||
this.tableServers.put(regionInfo.startKey,
|
if(!regionInfo.tableDesc.getName().equals(tableName)) {
|
||||||
new TableInfo(regionInfo, new HServerAddress(serverAddress)));
|
// We're done
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(regionInfo.offLine) {
|
||||||
|
throw new IllegalStateException("table offline: " + tableName);
|
||||||
|
}
|
||||||
|
|
||||||
|
bytes = results.get(COL_SERVER);
|
||||||
|
if(bytes == null || bytes.length == 0) {
|
||||||
|
// We need to rescan because the table we want is unassigned.
|
||||||
|
|
||||||
|
if(LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("no server address for " + regionInfo.toString());
|
||||||
|
}
|
||||||
|
servers.clear();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
serverAddress = new String(bytes, UTF8_ENCODING);
|
||||||
|
|
||||||
|
servers.put(regionInfo.startKey,
|
||||||
|
new TableInfo(regionInfo, new HServerAddress(serverAddress)));
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
if(scannerId != -1L) {
|
||||||
|
try {
|
||||||
|
server.close(scannerId);
|
||||||
|
|
||||||
|
} catch(Exception e) {
|
||||||
|
LOG.warn(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return servers;
|
|
||||||
|
if(servers.size() == 0 && tries == this.numRetries - 1) {
|
||||||
} finally {
|
throw new NoServerForRegionException("failed to find server for "
|
||||||
if(scannerId != -1L) {
|
+ tableName + " after " + this.numRetries + " retries");
|
||||||
server.close(scannerId);
|
}
|
||||||
|
|
||||||
|
// The table is not yet being served. Sleep and retry.
|
||||||
|
|
||||||
|
if(LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Sleeping. Table " + tableName
|
||||||
|
+ " not currently being served.");
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
Thread.sleep(this.clientTimeout);
|
||||||
|
|
||||||
|
} catch(InterruptedException e) {
|
||||||
|
}
|
||||||
|
if(LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Wake. Retry finding table " + tableName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return servers;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Establishes a connection to the region server at the specified address
|
||||||
|
* @param regionServer - the server to connect to
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
synchronized HRegionInterface getHRegionConnection(HServerAddress regionServer)
|
synchronized HRegionInterface getHRegionConnection(HServerAddress regionServer)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
|
@ -436,13 +794,12 @@ public class HClient implements HConstants {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
TreeSet<HTableDescriptor> uniqueTables = new TreeSet<HTableDescriptor>();
|
TreeSet<HTableDescriptor> uniqueTables = new TreeSet<HTableDescriptor>();
|
||||||
|
|
||||||
TreeMap<Text, TableInfo> metaTables =
|
SortedMap<Text, TableInfo> metaTables =
|
||||||
this.tablesToServers.get(META_TABLE_NAME);
|
this.tablesToServers.get(META_TABLE_NAME);
|
||||||
|
|
||||||
if(metaTables == null) {
|
if(metaTables == null) {
|
||||||
// Meta is not loaded yet so go do that
|
// Meta is not loaded yet so go do that
|
||||||
loadMetaFromRoot(META_TABLE_NAME);
|
metaTables = loadMetaFromRoot();
|
||||||
metaTables = tablesToServers.get(META_TABLE_NAME);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (TableInfo t: metaTables.values()) {
|
for (TableInfo t: metaTables.values()) {
|
||||||
|
@ -512,10 +869,11 @@ public class HClient implements HConstants {
|
||||||
|
|
||||||
// Reload information for the whole table
|
// Reload information for the whole table
|
||||||
|
|
||||||
findTableInMeta(info.regionInfo.tableDesc.getName());
|
this.tableServers = findServersForTable(info.regionInfo.tableDesc.getName());
|
||||||
|
|
||||||
if(this.tableServers.get(info.regionInfo.startKey) == null ) {
|
if(this.tableServers.get(info.regionInfo.startKey) == null ) {
|
||||||
throw new IOException("region " + info.regionInfo.regionName + " does not exist");
|
throw new IOException("region " + info.regionInfo.regionName
|
||||||
|
+ " does not exist");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -879,8 +1237,7 @@ public class HClient implements HConstants {
|
||||||
System.err.println(" address is read from configuration.");
|
System.err.println(" address is read from configuration.");
|
||||||
System.err.println("Commands:");
|
System.err.println("Commands:");
|
||||||
System.err.println(" shutdown Shutdown the HBase cluster.");
|
System.err.println(" shutdown Shutdown the HBase cluster.");
|
||||||
System.err.println(" createTable Takes table name, column families, " +
|
System.err.println(" createTable Takes table name, column families,... ");
|
||||||
"and maximum versions.");
|
|
||||||
System.err.println(" deleteTable Takes a table name.");
|
System.err.println(" deleteTable Takes a table name.");
|
||||||
System.err.println(" iistTables List all tables.");
|
System.err.println(" iistTables List all tables.");
|
||||||
System.err.println("Example Usage:");
|
System.err.println("Example Usage:");
|
||||||
|
@ -928,16 +1285,14 @@ public class HClient implements HConstants {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd.equals("createTable")) {
|
if (cmd.equals("createTable")) {
|
||||||
if (i + 3 > args.length) {
|
if (i + 2 > args.length) {
|
||||||
throw new IllegalArgumentException("Must supply a table name " +
|
throw new IllegalArgumentException("Must supply a table name " +
|
||||||
", at least one column family and maximum number of versions");
|
"and at least one column family");
|
||||||
}
|
}
|
||||||
int maxVersions = (Integer.parseInt(args[args.length - 1]));
|
HTableDescriptor desc = new HTableDescriptor(args[i + 1]);
|
||||||
HTableDescriptor desc =
|
|
||||||
new HTableDescriptor(args[i + 1], maxVersions);
|
|
||||||
boolean addedFamily = false;
|
boolean addedFamily = false;
|
||||||
for (int ii = i + 2; ii < (args.length - 1); ii++) {
|
for (int ii = i + 2; ii < (args.length - 1); ii++) {
|
||||||
desc.addFamily(new Text(args[ii]));
|
desc.addFamily(new HColumnDescriptor(args[ii]));
|
||||||
addedFamily = true;
|
addedFamily = true;
|
||||||
}
|
}
|
||||||
if (!addedFamily) {
|
if (!addedFamily) {
|
||||||
|
|
|
@ -0,0 +1,290 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2006 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import java.io.DataInput;
|
||||||
|
import java.io.DataOutput;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import org.apache.hadoop.io.Text;
|
||||||
|
import org.apache.hadoop.io.WritableComparable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A HColumnDescriptor contains information about a column family such as the
|
||||||
|
* number of versions, compression settings, etc.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public class HColumnDescriptor implements WritableComparable {
|
||||||
|
|
||||||
|
// For future backward compatibility
|
||||||
|
|
||||||
|
private static final byte COLUMN_DESCRIPTOR_VERSION = (byte)1;
|
||||||
|
|
||||||
|
// Legal family names can only contain 'word characters' and end in a colon.
|
||||||
|
|
||||||
|
private static final Pattern LEGAL_FAMILY_NAME = Pattern.compile("\\w+:");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The type of compression.
|
||||||
|
* @see org.apache.hadoop.io.SequenceFile.Writer
|
||||||
|
*/
|
||||||
|
public static enum CompressionType {
|
||||||
|
/** Do not compress records. */
|
||||||
|
NONE,
|
||||||
|
/** Compress values only, each separately. */
|
||||||
|
RECORD,
|
||||||
|
/** Compress sequences of records together in blocks. */
|
||||||
|
BLOCK
|
||||||
|
}
|
||||||
|
|
||||||
|
// Internal values for compression type used for serialization
|
||||||
|
|
||||||
|
private static final byte COMPRESSION_NONE = (byte)0;
|
||||||
|
private static final byte COMPRESSION_RECORD = (byte)1;
|
||||||
|
private static final byte COMPRESSION_BLOCK = (byte)2;
|
||||||
|
|
||||||
|
private static final int DEFAULT_N_VERSIONS = 3;
|
||||||
|
|
||||||
|
Text name; // Column family name
|
||||||
|
int maxVersions; // Number of versions to keep
|
||||||
|
byte compressionType; // Compression setting if any
|
||||||
|
boolean inMemory; // Serve reads from in-memory cache
|
||||||
|
int maxValueLength; // Maximum value size
|
||||||
|
boolean bloomFilterEnabled; // True if column has a bloom filter
|
||||||
|
byte versionNumber; // Version number of this class
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default constructor. Must be present for Writable.
|
||||||
|
*/
|
||||||
|
public HColumnDescriptor() {
|
||||||
|
this.name = new Text();
|
||||||
|
this.maxVersions = DEFAULT_N_VERSIONS;
|
||||||
|
this.compressionType = COMPRESSION_NONE;
|
||||||
|
this.inMemory = false;
|
||||||
|
this.maxValueLength = Integer.MAX_VALUE;
|
||||||
|
this.bloomFilterEnabled = false;
|
||||||
|
this.versionNumber = COLUMN_DESCRIPTOR_VERSION;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HColumnDescriptor(String columnName) {
|
||||||
|
this();
|
||||||
|
this.name.set(columnName);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor - specify all parameters.
|
||||||
|
* @param name - Column family name
|
||||||
|
* @param maxVersions - Maximum number of versions to keep
|
||||||
|
* @param compression - Compression type
|
||||||
|
* @param inMemory - If true, column data should be kept in a
|
||||||
|
* HRegionServer's cache
|
||||||
|
* @param maxValueLength - Restrict values to <= this value
|
||||||
|
* @param bloomFilter - Enable a bloom filter for this column
|
||||||
|
*
|
||||||
|
* @throws IllegalArgumentException if passed a family name that is made of
|
||||||
|
* other than 'word' characters: i.e. <code>[a-zA-Z_0-9]</code> and does not
|
||||||
|
* end in a <code>:</code>
|
||||||
|
* @throws IllegalArgumentException if the number of versions is <= 0
|
||||||
|
*/
|
||||||
|
public HColumnDescriptor(Text name, int maxVersions, CompressionType compression,
|
||||||
|
boolean inMemory, int maxValueLength, boolean bloomFilter) {
|
||||||
|
String familyStr = name.toString();
|
||||||
|
Matcher m = LEGAL_FAMILY_NAME.matcher(familyStr);
|
||||||
|
if(m == null || !m.matches()) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"Family names can only contain 'word characters' and must end with a ':'");
|
||||||
|
}
|
||||||
|
this.name = name;
|
||||||
|
|
||||||
|
if(maxVersions <= 0) {
|
||||||
|
// TODO: Allow maxVersion of 0 to be the way you say "Keep all versions".
|
||||||
|
// Until there is support, consider 0 or < 0 -- a configuration error.
|
||||||
|
throw new IllegalArgumentException("Maximum versions must be positive");
|
||||||
|
}
|
||||||
|
this.maxVersions = maxVersions;
|
||||||
|
|
||||||
|
if(compression == CompressionType.NONE) {
|
||||||
|
this.compressionType = COMPRESSION_NONE;
|
||||||
|
|
||||||
|
} else if(compression == CompressionType.BLOCK) {
|
||||||
|
this.compressionType = COMPRESSION_BLOCK;
|
||||||
|
|
||||||
|
} else if(compression == CompressionType.RECORD) {
|
||||||
|
this.compressionType = COMPRESSION_RECORD;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
assert(false);
|
||||||
|
}
|
||||||
|
this.inMemory = inMemory;
|
||||||
|
this.maxValueLength = maxValueLength;
|
||||||
|
this.bloomFilterEnabled = bloomFilter;
|
||||||
|
this.versionNumber = COLUMN_DESCRIPTOR_VERSION;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return - name of column family
|
||||||
|
*/
|
||||||
|
public Text getName() {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return - compression type being used for the column family
|
||||||
|
*/
|
||||||
|
public CompressionType getCompression() {
|
||||||
|
CompressionType value = null;
|
||||||
|
|
||||||
|
if(this.compressionType == COMPRESSION_NONE) {
|
||||||
|
value = CompressionType.NONE;
|
||||||
|
|
||||||
|
} else if(this.compressionType == COMPRESSION_BLOCK) {
|
||||||
|
value = CompressionType.BLOCK;
|
||||||
|
|
||||||
|
} else if(this.compressionType == COMPRESSION_RECORD) {
|
||||||
|
value = CompressionType.RECORD;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
assert(false);
|
||||||
|
}
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return - maximum number of versions
|
||||||
|
*/
|
||||||
|
public int getMaxVersions() {
|
||||||
|
return this.maxVersions;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
String compression = "none";
|
||||||
|
switch(compressionType) {
|
||||||
|
case COMPRESSION_NONE:
|
||||||
|
break;
|
||||||
|
case COMPRESSION_RECORD:
|
||||||
|
compression = "record";
|
||||||
|
break;
|
||||||
|
case COMPRESSION_BLOCK:
|
||||||
|
compression = "block";
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
assert(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
return "(" + name + ", max versions: " + maxVersions + ", compression: "
|
||||||
|
+ compression + ", in memory: " + inMemory + ", max value length: "
|
||||||
|
+ maxValueLength + ", bloom filter:" + bloomFilterEnabled + ")";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object obj) {
|
||||||
|
return compareTo(obj) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
int result = this.name.hashCode();
|
||||||
|
result ^= Integer.valueOf(this.maxVersions).hashCode();
|
||||||
|
result ^= Byte.valueOf(this.compressionType).hashCode();
|
||||||
|
result ^= Boolean.valueOf(this.inMemory).hashCode();
|
||||||
|
result ^= Integer.valueOf(this.maxValueLength).hashCode();
|
||||||
|
result ^= Boolean.valueOf(this.bloomFilterEnabled).hashCode();
|
||||||
|
result ^= Byte.valueOf(this.versionNumber).hashCode();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Writable
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
public void readFields(DataInput in) throws IOException {
|
||||||
|
this.versionNumber = in.readByte();
|
||||||
|
this.name.readFields(in);
|
||||||
|
this.maxVersions = in.readInt();
|
||||||
|
this.compressionType = in.readByte();
|
||||||
|
this.inMemory = in.readBoolean();
|
||||||
|
this.maxValueLength = in.readInt();
|
||||||
|
this.bloomFilterEnabled = in.readBoolean();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void write(DataOutput out) throws IOException {
|
||||||
|
out.writeByte(this.versionNumber);
|
||||||
|
this.name.write(out);
|
||||||
|
out.writeInt(this.maxVersions);
|
||||||
|
out.writeByte(this.compressionType);
|
||||||
|
out.writeBoolean(this.inMemory);
|
||||||
|
out.writeInt(this.maxValueLength);
|
||||||
|
out.writeBoolean(this.bloomFilterEnabled);
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Comparable
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
public int compareTo(Object o) {
|
||||||
|
// NOTE: we don't do anything with the version number yet.
|
||||||
|
// Version numbers will come into play when we introduce an incompatible
|
||||||
|
// change in the future such as the addition of access control lists.
|
||||||
|
|
||||||
|
HColumnDescriptor other = (HColumnDescriptor)o;
|
||||||
|
|
||||||
|
int result = this.name.compareTo(other.getName());
|
||||||
|
|
||||||
|
if(result == 0) {
|
||||||
|
result = Integer.valueOf(this.maxVersions).compareTo(
|
||||||
|
Integer.valueOf(other.maxVersions));
|
||||||
|
}
|
||||||
|
|
||||||
|
if(result == 0) {
|
||||||
|
result = Integer.valueOf(this.compressionType).compareTo(
|
||||||
|
Integer.valueOf(other.compressionType));
|
||||||
|
}
|
||||||
|
|
||||||
|
if(result == 0) {
|
||||||
|
if(this.inMemory == other.inMemory) {
|
||||||
|
result = 0;
|
||||||
|
|
||||||
|
} else if(this.inMemory) {
|
||||||
|
result = -1;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
result = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if(result == 0) {
|
||||||
|
result = other.maxValueLength - this.maxValueLength;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(result == 0) {
|
||||||
|
if(this.bloomFilterEnabled == other.bloomFilterEnabled) {
|
||||||
|
result = 0;
|
||||||
|
|
||||||
|
} else if(this.bloomFilterEnabled) {
|
||||||
|
result = -1;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
result = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -25,12 +25,14 @@ public class HGlobals implements HConstants {
|
||||||
static HTableDescriptor metaTableDesc = null;
|
static HTableDescriptor metaTableDesc = null;
|
||||||
|
|
||||||
static {
|
static {
|
||||||
rootTableDesc = new HTableDescriptor(ROOT_TABLE_NAME.toString(), 1);
|
rootTableDesc = new HTableDescriptor(ROOT_TABLE_NAME.toString());
|
||||||
rootTableDesc.addFamily(COLUMN_FAMILY);
|
rootTableDesc.addFamily(new HColumnDescriptor(COLUMN_FAMILY, 1,
|
||||||
|
HColumnDescriptor.CompressionType.NONE, false, Integer.MAX_VALUE, false));
|
||||||
|
|
||||||
rootRegionInfo = new HRegionInfo(0L, rootTableDesc, null, null);
|
rootRegionInfo = new HRegionInfo(0L, rootTableDesc, null, null);
|
||||||
|
|
||||||
metaTableDesc = new HTableDescriptor(META_TABLE_NAME.toString(), 1);
|
metaTableDesc = new HTableDescriptor(META_TABLE_NAME.toString());
|
||||||
metaTableDesc.addFamily(COLUMN_FAMILY);
|
metaTableDesc.addFamily(new HColumnDescriptor(COLUMN_FAMILY, 1,
|
||||||
|
HColumnDescriptor.CompressionType.NONE, false, Integer.MAX_VALUE, false));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -41,6 +41,14 @@ public interface HMasterInterface extends VersionedProtocol {
|
||||||
public void createTable(HTableDescriptor desc) throws IOException;
|
public void createTable(HTableDescriptor desc) throws IOException;
|
||||||
public void deleteTable(Text tableName) throws IOException;
|
public void deleteTable(Text tableName) throws IOException;
|
||||||
|
|
||||||
|
public void addColumn(Text tableName, HColumnDescriptor column) throws IOException;
|
||||||
|
public void deleteColumn(Text tableName, Text columnName) throws IOException;
|
||||||
|
|
||||||
|
public void mergeRegions(Text regionName1, Text regionName2) throws IOException;
|
||||||
|
|
||||||
|
public void enableTable(Text tableName) throws IOException;
|
||||||
|
public void disableTable(Text tableName) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Shutdown an HBase cluster.
|
* Shutdown an HBase cluster.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -30,12 +30,12 @@ public class HMsg implements Writable {
|
||||||
public static final byte MSG_CALL_SERVER_STARTUP = 4;
|
public static final byte MSG_CALL_SERVER_STARTUP = 4;
|
||||||
public static final byte MSG_REGIONSERVER_STOP = 5;
|
public static final byte MSG_REGIONSERVER_STOP = 5;
|
||||||
public static final byte MSG_REGION_CLOSE_WITHOUT_REPORT = 6;
|
public static final byte MSG_REGION_CLOSE_WITHOUT_REPORT = 6;
|
||||||
public static final byte MSG_REGION_CLOSE_AND_DELETE = 7;
|
|
||||||
|
|
||||||
public static final byte MSG_REPORT_OPEN = 100;
|
public static final byte MSG_REPORT_OPEN = 100;
|
||||||
public static final byte MSG_REPORT_CLOSE = 101;
|
public static final byte MSG_REPORT_CLOSE = 101;
|
||||||
public static final byte MSG_REGION_SPLIT = 102;
|
public static final byte MSG_REGION_SPLIT = 102;
|
||||||
public static final byte MSG_NEW_REGION = 103;
|
public static final byte MSG_NEW_REGION = 103;
|
||||||
|
public static final byte MSG_REPORT_EXITING = 104;
|
||||||
|
|
||||||
byte msg;
|
byte msg;
|
||||||
HRegionInfo info;
|
HRegionInfo info;
|
||||||
|
|
|
@ -333,12 +333,12 @@ public class HRegion implements HConstants {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load in all the HStores.
|
// Load in all the HStores.
|
||||||
for(Iterator<Text> it = this.regionInfo.tableDesc.families().iterator();
|
for(Map.Entry<Text, HColumnDescriptor> e :
|
||||||
it.hasNext(); ) {
|
this.regionInfo.tableDesc.families().entrySet()) {
|
||||||
Text colFamily = HStoreKey.extractFamily(it.next());
|
|
||||||
|
Text colFamily = HStoreKey.extractFamily(e.getKey());
|
||||||
stores.put(colFamily, new HStore(dir, this.regionInfo.regionName,
|
stores.put(colFamily, new HStore(dir, this.regionInfo.regionName,
|
||||||
colFamily, this.regionInfo.tableDesc.getMaxVersions(), fs,
|
e.getValue(), fs, oldLogFile, conf));
|
||||||
oldLogFile, conf));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get rid of any splits or merges that were lost in-progress
|
// Get rid of any splits or merges that were lost in-progress
|
||||||
|
@ -378,14 +378,6 @@ public class HRegion implements HConstants {
|
||||||
return closed;
|
return closed;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Closes and deletes this HRegion. Called when doing a table deletion, for example */
|
|
||||||
public void closeAndDelete() throws IOException {
|
|
||||||
LOG.info("deleting region: " + regionInfo.regionName);
|
|
||||||
close();
|
|
||||||
deleteRegion(fs, dir, regionInfo.regionName);
|
|
||||||
LOG.info("region deleted: " + regionInfo.regionName);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Close down this HRegion. Flush the cache, shut down each HStore, don't
|
* Close down this HRegion. Flush the cache, shut down each HStore, don't
|
||||||
* service any more calls.
|
* service any more calls.
|
||||||
|
|
|
@ -35,6 +35,7 @@ public class HRegionInfo implements Writable {
|
||||||
public Text startKey;
|
public Text startKey;
|
||||||
public Text endKey;
|
public Text endKey;
|
||||||
public Text regionName;
|
public Text regionName;
|
||||||
|
public boolean offLine;
|
||||||
|
|
||||||
public HRegionInfo() {
|
public HRegionInfo() {
|
||||||
this.regionId = 0;
|
this.regionId = 0;
|
||||||
|
@ -42,16 +43,12 @@ public class HRegionInfo implements Writable {
|
||||||
this.startKey = new Text();
|
this.startKey = new Text();
|
||||||
this.endKey = new Text();
|
this.endKey = new Text();
|
||||||
this.regionName = new Text();
|
this.regionName = new Text();
|
||||||
|
this.offLine = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
public HRegionInfo(final byte [] serializedBytes) {
|
public HRegionInfo(final byte [] serializedBytes) throws IOException {
|
||||||
this();
|
this();
|
||||||
try {
|
readFields(new DataInputStream(new ByteArrayInputStream(serializedBytes)));
|
||||||
readFields(new DataInputStream(
|
|
||||||
new ByteArrayInputStream(serializedBytes)));
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public HRegionInfo(long regionId, HTableDescriptor tableDesc,
|
public HRegionInfo(long regionId, HTableDescriptor tableDesc,
|
||||||
|
@ -79,6 +76,8 @@ public class HRegionInfo implements Writable {
|
||||||
this.regionName = new Text(tableDesc.getName() + "_" +
|
this.regionName = new Text(tableDesc.getName() + "_" +
|
||||||
(startKey == null ? "" : startKey.toString()) + "_" +
|
(startKey == null ? "" : startKey.toString()) + "_" +
|
||||||
regionId);
|
regionId);
|
||||||
|
|
||||||
|
this.offLine = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -98,6 +97,7 @@ public class HRegionInfo implements Writable {
|
||||||
startKey.write(out);
|
startKey.write(out);
|
||||||
endKey.write(out);
|
endKey.write(out);
|
||||||
regionName.write(out);
|
regionName.write(out);
|
||||||
|
out.writeBoolean(offLine);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void readFields(DataInput in) throws IOException {
|
public void readFields(DataInput in) throws IOException {
|
||||||
|
@ -106,5 +106,6 @@ public class HRegionInfo implements Writable {
|
||||||
this.startKey.readFields(in);
|
this.startKey.readFields(in);
|
||||||
this.endKey.readFields(in);
|
this.endKey.readFields(in);
|
||||||
this.regionName.readFields(in);
|
this.regionName.readFields(in);
|
||||||
|
this.offLine = in.readBoolean();
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -602,6 +602,13 @@ public class HRegionServer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
try {
|
||||||
|
HMsg[] exitMsg = { new HMsg(HMsg.MSG_REPORT_EXITING) };
|
||||||
|
hbaseMaster.regionServerReport(info, exitMsg);
|
||||||
|
|
||||||
|
} catch(IOException e) {
|
||||||
|
LOG.warn(e);
|
||||||
|
}
|
||||||
try {
|
try {
|
||||||
LOG.info("stopping server at: " + info.getServerAddress().toString());
|
LOG.info("stopping server at: " + info.getServerAddress().toString());
|
||||||
|
|
||||||
|
@ -747,13 +754,6 @@ public class HRegionServer
|
||||||
closeRegion(msg.getRegionInfo(), false);
|
closeRegion(msg.getRegionInfo(), false);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case HMsg.MSG_REGION_CLOSE_AND_DELETE:
|
|
||||||
if (LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("MSG_REGION_CLOSE_AND_DELETE");
|
|
||||||
}
|
|
||||||
closeAndDeleteRegion(msg.getRegionInfo());
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
throw new IOException("Impossible state during msg processing. Instruction: " + msg);
|
throw new IOException("Impossible state during msg processing. Instruction: " + msg);
|
||||||
}
|
}
|
||||||
|
@ -799,27 +799,6 @@ public class HRegionServer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void closeAndDeleteRegion(HRegionInfo info) throws IOException {
|
|
||||||
this.lock.writeLock().lock();
|
|
||||||
HRegion region = null;
|
|
||||||
try {
|
|
||||||
region = regions.remove(info.regionName);
|
|
||||||
} finally {
|
|
||||||
this.lock.writeLock().unlock();
|
|
||||||
}
|
|
||||||
if(region != null) {
|
|
||||||
if(LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("deleting region " + info.regionName);
|
|
||||||
}
|
|
||||||
|
|
||||||
region.closeAndDelete();
|
|
||||||
|
|
||||||
if(LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("region " + info.regionName + " deleted");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Called either when the master tells us to restart or from stop() */
|
/** Called either when the master tells us to restart or from stop() */
|
||||||
private void closeAllRegions() {
|
private void closeAllRegions() {
|
||||||
Vector<HRegion> regionsToClose = new Vector<HRegion>();
|
Vector<HRegion> regionsToClose = new Vector<HRegion>();
|
||||||
|
|
|
@ -101,10 +101,10 @@ public class HRegiondirReader {
|
||||||
private HTableDescriptor getTableDescriptor(final FileSystem fs,
|
private HTableDescriptor getTableDescriptor(final FileSystem fs,
|
||||||
final Path d, final String tableName)
|
final Path d, final String tableName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
HTableDescriptor desc = new HTableDescriptor(tableName, 1);
|
HTableDescriptor desc = new HTableDescriptor(tableName);
|
||||||
Text [] families = getFamilies(fs, d);
|
Text [] families = getFamilies(fs, d);
|
||||||
for (Text f: families) {
|
for (Text f: families) {
|
||||||
desc.addFamily(f);
|
desc.addFamily(new HColumnDescriptor(f.toString()));
|
||||||
}
|
}
|
||||||
return desc;
|
return desc;
|
||||||
}
|
}
|
||||||
|
@ -163,7 +163,7 @@ public class HRegiondirReader {
|
||||||
private void dump(final HRegionInfo info) throws IOException {
|
private void dump(final HRegionInfo info) throws IOException {
|
||||||
HRegion r = new HRegion(this.parentdir, null,
|
HRegion r = new HRegion(this.parentdir, null,
|
||||||
FileSystem.get(this.conf), conf, info, null, null);
|
FileSystem.get(this.conf), conf, info, null, null);
|
||||||
Text [] families = info.tableDesc.families().toArray(new Text [] {});
|
Text [] families = info.tableDesc.families().keySet().toArray(new Text [] {});
|
||||||
HInternalScannerInterface scanner = r.getScanner(families, new Text());
|
HInternalScannerInterface scanner = r.getScanner(families, new Text());
|
||||||
HStoreKey key = new HStoreKey();
|
HStoreKey key = new HStoreKey();
|
||||||
TreeMap<Text, BytesWritable> results = new TreeMap<Text, BytesWritable>();
|
TreeMap<Text, BytesWritable> results = new TreeMap<Text, BytesWritable>();
|
||||||
|
@ -183,8 +183,8 @@ public class HRegiondirReader {
|
||||||
// followed by cell content.
|
// followed by cell content.
|
||||||
while(scanner.next(key, results)) {
|
while(scanner.next(key, results)) {
|
||||||
for (Map.Entry<Text, BytesWritable> es: results.entrySet()) {
|
for (Map.Entry<Text, BytesWritable> es: results.entrySet()) {
|
||||||
Text colname = es.getKey();
|
Text colname = es.getKey();
|
||||||
BytesWritable colvalue = es.getValue();
|
BytesWritable colvalue = es.getValue();
|
||||||
Object value = null;
|
Object value = null;
|
||||||
byte[] bytes = new byte[colvalue.getSize()];
|
byte[] bytes = new byte[colvalue.getSize()];
|
||||||
if (colname.toString().equals("info:regioninfo")) {
|
if (colname.toString().equals("info:regioninfo")) {
|
||||||
|
@ -219,4 +219,4 @@ public class HRegiondirReader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,8 +52,9 @@ public class HStore {
|
||||||
|
|
||||||
Path dir;
|
Path dir;
|
||||||
Text regionName;
|
Text regionName;
|
||||||
Text colFamily;
|
HColumnDescriptor family;
|
||||||
int maxVersions;
|
Text familyName;
|
||||||
|
SequenceFile.CompressionType compression;
|
||||||
FileSystem fs;
|
FileSystem fs;
|
||||||
Configuration conf;
|
Configuration conf;
|
||||||
Path mapdir;
|
Path mapdir;
|
||||||
|
@ -98,23 +99,37 @@ public class HStore {
|
||||||
* <p>It's assumed that after this constructor returns, the reconstructionLog
|
* <p>It's assumed that after this constructor returns, the reconstructionLog
|
||||||
* file will be deleted (by whoever has instantiated the HStore).
|
* file will be deleted (by whoever has instantiated the HStore).
|
||||||
*/
|
*/
|
||||||
public HStore(Path dir, Text regionName, Text colFamily, int maxVersions,
|
public HStore(Path dir, Text regionName, HColumnDescriptor family,
|
||||||
FileSystem fs, Path reconstructionLog, Configuration conf)
|
FileSystem fs, Path reconstructionLog, Configuration conf)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
this.dir = dir;
|
this.dir = dir;
|
||||||
this.regionName = regionName;
|
this.regionName = regionName;
|
||||||
this.colFamily = colFamily;
|
this.family = family;
|
||||||
this.maxVersions = maxVersions;
|
this.familyName = HStoreKey.extractFamily(this.family.getName());
|
||||||
|
this.compression = SequenceFile.CompressionType.NONE;
|
||||||
|
|
||||||
|
if(family.getCompression() != HColumnDescriptor.CompressionType.NONE) {
|
||||||
|
if(family.getCompression() == HColumnDescriptor.CompressionType.BLOCK) {
|
||||||
|
this.compression = SequenceFile.CompressionType.BLOCK;
|
||||||
|
|
||||||
|
} else if(family.getCompression() == HColumnDescriptor.CompressionType.RECORD) {
|
||||||
|
this.compression = SequenceFile.CompressionType.RECORD;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
assert(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
this.fs = fs;
|
this.fs = fs;
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
|
|
||||||
this.mapdir = HStoreFile.getMapDir(dir, regionName, colFamily);
|
this.mapdir = HStoreFile.getMapDir(dir, regionName, familyName);
|
||||||
fs.mkdirs(mapdir);
|
fs.mkdirs(mapdir);
|
||||||
this.loginfodir = HStoreFile.getInfoDir(dir, regionName, colFamily);
|
this.loginfodir = HStoreFile.getInfoDir(dir, regionName, familyName);
|
||||||
fs.mkdirs(loginfodir);
|
fs.mkdirs(loginfodir);
|
||||||
|
|
||||||
if(LOG.isDebugEnabled()) {
|
if(LOG.isDebugEnabled()) {
|
||||||
LOG.debug("starting HStore for " + regionName + "/"+ colFamily);
|
LOG.debug("starting HStore for " + regionName + "/"+ familyName);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Either restart or get rid of any leftover compaction work. Either way,
|
// Either restart or get rid of any leftover compaction work. Either way,
|
||||||
|
@ -123,7 +138,7 @@ public class HStore {
|
||||||
|
|
||||||
this.compactdir = new Path(dir, COMPACTION_DIR);
|
this.compactdir = new Path(dir, COMPACTION_DIR);
|
||||||
Path curCompactStore =
|
Path curCompactStore =
|
||||||
HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
|
HStoreFile.getHStoreDir(compactdir, regionName, familyName);
|
||||||
if(fs.exists(curCompactStore)) {
|
if(fs.exists(curCompactStore)) {
|
||||||
processReadyCompaction();
|
processReadyCompaction();
|
||||||
fs.delete(curCompactStore);
|
fs.delete(curCompactStore);
|
||||||
|
@ -134,7 +149,7 @@ public class HStore {
|
||||||
// corresponding one in 'loginfodir'. Without a corresponding log info
|
// corresponding one in 'loginfodir'. Without a corresponding log info
|
||||||
// file, the entry in 'mapdir' must be deleted.
|
// file, the entry in 'mapdir' must be deleted.
|
||||||
Vector<HStoreFile> hstoreFiles
|
Vector<HStoreFile> hstoreFiles
|
||||||
= HStoreFile.loadHStoreFiles(conf, dir, regionName, colFamily, fs);
|
= HStoreFile.loadHStoreFiles(conf, dir, regionName, familyName, fs);
|
||||||
for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext(); ) {
|
for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext(); ) {
|
||||||
HStoreFile hsf = it.next();
|
HStoreFile hsf = it.next();
|
||||||
mapFiles.put(hsf.loadInfo(fs), hsf);
|
mapFiles.put(hsf.loadInfo(fs), hsf);
|
||||||
|
@ -187,7 +202,7 @@ public class HStore {
|
||||||
Text column = val.getColumn();
|
Text column = val.getColumn();
|
||||||
if (!key.getRegionName().equals(this.regionName) ||
|
if (!key.getRegionName().equals(this.regionName) ||
|
||||||
column.equals(HLog.METACOLUMN) ||
|
column.equals(HLog.METACOLUMN) ||
|
||||||
HStoreKey.extractFamily(column).equals(this.colFamily)) {
|
HStoreKey.extractFamily(column).equals(this.familyName)) {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Passing on edit " + key.getRegionName() + ", "
|
LOG.debug("Passing on edit " + key.getRegionName() + ", "
|
||||||
+ key.getRegionName() + ", " + column.toString() + ": "
|
+ key.getRegionName() + ", " + column.toString() + ": "
|
||||||
|
@ -230,12 +245,12 @@ public class HStore {
|
||||||
new MapFile.Reader(fs, e.getValue().getMapFilePath().toString(), conf));
|
new MapFile.Reader(fs, e.getValue().getMapFilePath().toString(), conf));
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG.info("HStore online for " + this.regionName + "/" + this.colFamily);
|
LOG.info("HStore online for " + this.regionName + "/" + this.familyName);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Turn off all the MapFile readers */
|
/** Turn off all the MapFile readers */
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
LOG.info("closing HStore for " + this.regionName + "/" + this.colFamily);
|
LOG.info("closing HStore for " + this.regionName + "/" + this.familyName);
|
||||||
this.lock.obtainWriteLock();
|
this.lock.obtainWriteLock();
|
||||||
try {
|
try {
|
||||||
for (MapFile.Reader map: maps.values()) {
|
for (MapFile.Reader map: maps.values()) {
|
||||||
|
@ -244,7 +259,7 @@ public class HStore {
|
||||||
maps.clear();
|
maps.clear();
|
||||||
mapFiles.clear();
|
mapFiles.clear();
|
||||||
|
|
||||||
LOG.info("HStore closed for " + this.regionName + "/" + this.colFamily);
|
LOG.info("HStore closed for " + this.regionName + "/" + this.familyName);
|
||||||
} finally {
|
} finally {
|
||||||
this.lock.releaseWriteLock();
|
this.lock.releaseWriteLock();
|
||||||
}
|
}
|
||||||
|
@ -276,13 +291,13 @@ public class HStore {
|
||||||
|
|
||||||
synchronized(flushLock) {
|
synchronized(flushLock) {
|
||||||
if(LOG.isDebugEnabled()) {
|
if(LOG.isDebugEnabled()) {
|
||||||
LOG.debug("flushing HStore " + this.regionName + "/" + this.colFamily);
|
LOG.debug("flushing HStore " + this.regionName + "/" + this.familyName);
|
||||||
}
|
}
|
||||||
|
|
||||||
// A. Write the TreeMap out to the disk
|
// A. Write the TreeMap out to the disk
|
||||||
|
|
||||||
HStoreFile flushedFile
|
HStoreFile flushedFile
|
||||||
= HStoreFile.obtainNewHStoreFile(conf, dir, regionName, colFamily, fs);
|
= HStoreFile.obtainNewHStoreFile(conf, dir, regionName, familyName, fs);
|
||||||
|
|
||||||
Path mapfile = flushedFile.getMapFilePath();
|
Path mapfile = flushedFile.getMapFilePath();
|
||||||
if(LOG.isDebugEnabled()) {
|
if(LOG.isDebugEnabled()) {
|
||||||
|
@ -290,17 +305,17 @@ public class HStore {
|
||||||
}
|
}
|
||||||
|
|
||||||
MapFile.Writer out = new MapFile.Writer(conf, fs, mapfile.toString(),
|
MapFile.Writer out = new MapFile.Writer(conf, fs, mapfile.toString(),
|
||||||
HStoreKey.class, BytesWritable.class);
|
HStoreKey.class, BytesWritable.class, compression);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
for (Map.Entry<HStoreKey, BytesWritable> es: inputCache.entrySet()) {
|
for (Map.Entry<HStoreKey, BytesWritable> es: inputCache.entrySet()) {
|
||||||
HStoreKey curkey = es.getKey();
|
HStoreKey curkey = es.getKey();
|
||||||
if (this.colFamily.equals(HStoreKey.extractFamily(curkey.getColumn()))) {
|
if (this.familyName.equals(HStoreKey.extractFamily(curkey.getColumn()))) {
|
||||||
out.append(curkey, es.getValue());
|
out.append(curkey, es.getValue());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(LOG.isDebugEnabled()) {
|
if(LOG.isDebugEnabled()) {
|
||||||
LOG.debug("HStore " + this.regionName + "/" + this.colFamily + " flushed");
|
LOG.debug("HStore " + this.regionName + "/" + this.familyName + " flushed");
|
||||||
}
|
}
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -325,7 +340,7 @@ public class HStore {
|
||||||
mapFiles.put(logCacheFlushId, flushedFile);
|
mapFiles.put(logCacheFlushId, flushedFile);
|
||||||
if(LOG.isDebugEnabled()) {
|
if(LOG.isDebugEnabled()) {
|
||||||
LOG.debug("HStore available for " + this.regionName + "/"
|
LOG.debug("HStore available for " + this.regionName + "/"
|
||||||
+ this.colFamily + " flush id=" + logCacheFlushId);
|
+ this.familyName + " flush id=" + logCacheFlushId);
|
||||||
}
|
}
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -373,10 +388,10 @@ public class HStore {
|
||||||
void compactHelper(boolean deleteSequenceInfo) throws IOException {
|
void compactHelper(boolean deleteSequenceInfo) throws IOException {
|
||||||
synchronized(compactLock) {
|
synchronized(compactLock) {
|
||||||
if(LOG.isDebugEnabled()) {
|
if(LOG.isDebugEnabled()) {
|
||||||
LOG.debug("started compaction of " + this.regionName + "/" + this.colFamily);
|
LOG.debug("started compaction of " + this.regionName + "/" + this.familyName);
|
||||||
}
|
}
|
||||||
|
|
||||||
Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
|
Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, familyName);
|
||||||
fs.mkdirs(curCompactStore);
|
fs.mkdirs(curCompactStore);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -409,11 +424,11 @@ public class HStore {
|
||||||
}
|
}
|
||||||
|
|
||||||
HStoreFile compactedOutputFile
|
HStoreFile compactedOutputFile
|
||||||
= new HStoreFile(conf, compactdir, regionName, colFamily, -1);
|
= new HStoreFile(conf, compactdir, regionName, familyName, -1);
|
||||||
|
|
||||||
if(toCompactFiles.size() == 1) {
|
if(toCompactFiles.size() == 1) {
|
||||||
if(LOG.isDebugEnabled()) {
|
if(LOG.isDebugEnabled()) {
|
||||||
LOG.debug("nothing to compact for " + this.regionName + "/" + this.colFamily);
|
LOG.debug("nothing to compact for " + this.regionName + "/" + this.familyName);
|
||||||
}
|
}
|
||||||
|
|
||||||
HStoreFile hsf = toCompactFiles.elementAt(0);
|
HStoreFile hsf = toCompactFiles.elementAt(0);
|
||||||
|
@ -426,7 +441,7 @@ public class HStore {
|
||||||
|
|
||||||
MapFile.Writer compactedOut = new MapFile.Writer(conf, fs,
|
MapFile.Writer compactedOut = new MapFile.Writer(conf, fs,
|
||||||
compactedOutputFile.getMapFilePath().toString(), HStoreKey.class,
|
compactedOutputFile.getMapFilePath().toString(), HStoreKey.class,
|
||||||
BytesWritable.class);
|
BytesWritable.class, compression);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
|
||||||
|
@ -507,7 +522,7 @@ public class HStore {
|
||||||
timesSeen = 1;
|
timesSeen = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(timesSeen <= maxVersions) {
|
if(timesSeen <= family.getMaxVersions()) {
|
||||||
|
|
||||||
// Keep old versions until we have maxVersions worth.
|
// Keep old versions until we have maxVersions worth.
|
||||||
// Then just skip them.
|
// Then just skip them.
|
||||||
|
@ -592,7 +607,7 @@ public class HStore {
|
||||||
processReadyCompaction();
|
processReadyCompaction();
|
||||||
|
|
||||||
if(LOG.isDebugEnabled()) {
|
if(LOG.isDebugEnabled()) {
|
||||||
LOG.debug("compaction complete for " + this.regionName + "/" + this.colFamily);
|
LOG.debug("compaction complete for " + this.regionName + "/" + this.familyName);
|
||||||
}
|
}
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -625,7 +640,7 @@ public class HStore {
|
||||||
// 1. Acquiring the write-lock
|
// 1. Acquiring the write-lock
|
||||||
|
|
||||||
|
|
||||||
Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
|
Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, familyName);
|
||||||
this.lock.obtainWriteLock();
|
this.lock.obtainWriteLock();
|
||||||
try {
|
try {
|
||||||
Path doneFile = new Path(curCompactStore, COMPACTION_DONE);
|
Path doneFile = new Path(curCompactStore, COMPACTION_DONE);
|
||||||
|
@ -714,10 +729,10 @@ public class HStore {
|
||||||
}
|
}
|
||||||
|
|
||||||
HStoreFile compactedFile
|
HStoreFile compactedFile
|
||||||
= new HStoreFile(conf, compactdir, regionName, colFamily, -1);
|
= new HStoreFile(conf, compactdir, regionName, familyName, -1);
|
||||||
|
|
||||||
HStoreFile finalCompactedFile
|
HStoreFile finalCompactedFile
|
||||||
= HStoreFile.obtainNewHStoreFile(conf, dir, regionName, colFamily, fs);
|
= HStoreFile.obtainNewHStoreFile(conf, dir, regionName, familyName, fs);
|
||||||
|
|
||||||
fs.rename(compactedFile.getMapFilePath(), finalCompactedFile.getMapFilePath());
|
fs.rename(compactedFile.getMapFilePath(), finalCompactedFile.getMapFilePath());
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,9 @@ import java.io.DataInput;
|
||||||
import java.io.DataOutput;
|
import java.io.DataOutput;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.TreeSet;
|
import java.util.Map;
|
||||||
|
import java.util.Map.Entry;
|
||||||
|
import java.util.TreeMap;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
@ -27,101 +29,73 @@ import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.WritableComparable;
|
import org.apache.hadoop.io.WritableComparable;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* HTableDescriptor contains various facts about an HTable, like
|
* HTableDescriptor contains the name of an HTable, and its
|
||||||
* column families, maximum number of column versions, etc.
|
* column families.
|
||||||
*/
|
*/
|
||||||
public class HTableDescriptor implements WritableComparable {
|
public class HTableDescriptor implements WritableComparable {
|
||||||
Text name;
|
Text name;
|
||||||
int maxVersions;
|
TreeMap<Text, HColumnDescriptor> families;
|
||||||
TreeSet<Text> families = new TreeSet<Text>();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Legal table names can only contain 'word characters':
|
* Legal table names can only contain 'word characters':
|
||||||
* i.e. <code>[a-zA-Z_0-9]</code>.
|
* i.e. <code>[a-zA-Z_0-9]</code>.
|
||||||
*
|
*
|
||||||
* Lets be restrictive until a reason to be otherwise.
|
* Let's be restrictive until a reason to be otherwise.
|
||||||
*/
|
*/
|
||||||
private static final Pattern LEGAL_TABLE_NAME =
|
private static final Pattern LEGAL_TABLE_NAME =
|
||||||
Pattern.compile("[\\w-]+");
|
Pattern.compile("[\\w-]+");
|
||||||
|
|
||||||
/**
|
|
||||||
* Legal family names can only contain 'word characters' and
|
|
||||||
* end in a colon.
|
|
||||||
*/
|
|
||||||
private static final Pattern LEGAL_FAMILY_NAME =
|
|
||||||
Pattern.compile("\\w+:");
|
|
||||||
|
|
||||||
public HTableDescriptor() {
|
public HTableDescriptor() {
|
||||||
this.name = new Text();
|
this.name = new Text();
|
||||||
this.families.clear();
|
this.families = new TreeMap<Text, HColumnDescriptor>();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor.
|
* Constructor.
|
||||||
* @param name Table name.
|
* @param name Table name.
|
||||||
* @param maxVersions Number of versions of a column to keep.
|
|
||||||
* @throws IllegalArgumentException if passed a table name
|
* @throws IllegalArgumentException if passed a table name
|
||||||
* that is made of other than 'word' characters: i.e.
|
* that is made of other than 'word' characters: i.e.
|
||||||
* <code>[a-zA-Z_0-9]
|
* <code>[a-zA-Z_0-9]
|
||||||
*/
|
*/
|
||||||
public HTableDescriptor(String name, int maxVersions) {
|
public HTableDescriptor(String name) {
|
||||||
Matcher m = LEGAL_TABLE_NAME.matcher(name);
|
Matcher m = LEGAL_TABLE_NAME.matcher(name);
|
||||||
if (m == null || !m.matches()) {
|
if (m == null || !m.matches()) {
|
||||||
throw new IllegalArgumentException("Table names can only " +
|
throw new IllegalArgumentException(
|
||||||
"contain 'word characters': i.e. [a-zA-Z_0-9");
|
"Table names can only contain 'word characters': i.e. [a-zA-Z_0-9");
|
||||||
}
|
|
||||||
if (maxVersions <= 0) {
|
|
||||||
// TODO: Allow maxVersion of 0 to be the way you say
|
|
||||||
// "Keep all versions". Until there is support, consider
|
|
||||||
// 0 -- or < 0 -- a configuration error.
|
|
||||||
throw new IllegalArgumentException("Maximum versions " +
|
|
||||||
"must be positive");
|
|
||||||
}
|
}
|
||||||
this.name = new Text(name);
|
this.name = new Text(name);
|
||||||
this.maxVersions = maxVersions;
|
this.families = new TreeMap<Text, HColumnDescriptor>();
|
||||||
}
|
}
|
||||||
|
|
||||||
public Text getName() {
|
public Text getName() {
|
||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getMaxVersions() {
|
|
||||||
return maxVersions;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a column family.
|
* Add a column family.
|
||||||
* @param family Column family name to add. Column family names
|
* @param family HColumnDescriptor of familyto add.
|
||||||
* must end in a <code>:</code>
|
|
||||||
* @throws IllegalArgumentException if passed a table name
|
|
||||||
* that is made of other than 'word' characters: i.e.
|
|
||||||
* <code>[a-zA-Z_0-9]
|
|
||||||
*/
|
*/
|
||||||
public void addFamily(Text family) {
|
public void addFamily(HColumnDescriptor family) {
|
||||||
String familyStr = family.toString();
|
families.put(family.getName(), family);
|
||||||
Matcher m = LEGAL_FAMILY_NAME.matcher(familyStr);
|
|
||||||
if (m == null || !m.matches()) {
|
|
||||||
throw new IllegalArgumentException("Family names can " +
|
|
||||||
"only contain 'word characters' and must end with a " +
|
|
||||||
"':'");
|
|
||||||
}
|
|
||||||
families.add(family);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Do we contain a given column? */
|
/** Do we contain a given column? */
|
||||||
public boolean hasFamily(Text family) {
|
public boolean hasFamily(Text family) {
|
||||||
return families.contains(family);
|
return families.containsKey(family);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** All the column families in this table. */
|
/** All the column families in this table.
|
||||||
public TreeSet<Text> families() {
|
*
|
||||||
|
* TODO: What is this used for? Seems Dangerous to let people play with our
|
||||||
|
* private members.
|
||||||
|
*/
|
||||||
|
public TreeMap<Text, HColumnDescriptor> families() {
|
||||||
return families;
|
return families;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "name: " + this.name.toString() +
|
return "name: " + this.name.toString() + ", families: " + this.families;
|
||||||
", maxVersions: " + this.maxVersions + ", families: " + this.families;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -133,10 +107,9 @@ public class HTableDescriptor implements WritableComparable {
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
// TODO: Cache.
|
// TODO: Cache.
|
||||||
int result = this.name.hashCode();
|
int result = this.name.hashCode();
|
||||||
result ^= Integer.valueOf(this.maxVersions).hashCode();
|
|
||||||
if (this.families != null && this.families.size() > 0) {
|
if (this.families != null && this.families.size() > 0) {
|
||||||
for (Text family: this.families) {
|
for (Map.Entry<Text,HColumnDescriptor> e: this.families.entrySet()) {
|
||||||
result ^= family.hashCode();
|
result ^= e.hashCode();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
|
@ -148,22 +121,21 @@ public class HTableDescriptor implements WritableComparable {
|
||||||
|
|
||||||
public void write(DataOutput out) throws IOException {
|
public void write(DataOutput out) throws IOException {
|
||||||
name.write(out);
|
name.write(out);
|
||||||
out.writeInt(maxVersions);
|
|
||||||
out.writeInt(families.size());
|
out.writeInt(families.size());
|
||||||
for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
|
for(Iterator<HColumnDescriptor> it = families.values().iterator();
|
||||||
|
it.hasNext(); ) {
|
||||||
it.next().write(out);
|
it.next().write(out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void readFields(DataInput in) throws IOException {
|
public void readFields(DataInput in) throws IOException {
|
||||||
this.name.readFields(in);
|
this.name.readFields(in);
|
||||||
this.maxVersions = in.readInt();
|
|
||||||
int numCols = in.readInt();
|
int numCols = in.readInt();
|
||||||
families.clear();
|
families.clear();
|
||||||
for(int i = 0; i < numCols; i++) {
|
for(int i = 0; i < numCols; i++) {
|
||||||
Text t = new Text();
|
HColumnDescriptor c = new HColumnDescriptor();
|
||||||
t.readFields(in);
|
c.readFields(in);
|
||||||
families.add(t);
|
families.put(c.getName(), c);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,24 +144,24 @@ public class HTableDescriptor implements WritableComparable {
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
public int compareTo(Object o) {
|
public int compareTo(Object o) {
|
||||||
HTableDescriptor htd = (HTableDescriptor) o;
|
HTableDescriptor other = (HTableDescriptor) o;
|
||||||
int result = name.compareTo(htd.name);
|
int result = name.compareTo(other.name);
|
||||||
|
|
||||||
if(result == 0) {
|
if(result == 0) {
|
||||||
result = maxVersions - htd.maxVersions;
|
result = families.size() - other.families.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
if(result == 0 && families.size() != other.families.size()) {
|
||||||
|
result = Integer.valueOf(families.size()).compareTo(
|
||||||
|
Integer.valueOf(other.families.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
if(result == 0) {
|
if(result == 0) {
|
||||||
result = families.size() - htd.families.size();
|
for(Iterator<HColumnDescriptor> it = families.values().iterator(),
|
||||||
}
|
it2 = other.families.values().iterator(); it.hasNext(); ) {
|
||||||
|
result = it.next().compareTo(it2.next());
|
||||||
if(result == 0) {
|
|
||||||
Iterator<Text> it2 = htd.families.iterator();
|
|
||||||
for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
|
|
||||||
Text family1 = it.next();
|
|
||||||
Text family2 = it2.next();
|
|
||||||
result = family1.compareTo(family2);
|
|
||||||
if(result != 0) {
|
if(result != 0) {
|
||||||
return result;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,29 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2007 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
public class InvalidColumnNameException extends IOException {
|
||||||
|
private static final long serialVersionUID = 1L << 29 - 1L;
|
||||||
|
public InvalidColumnNameException() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
public InvalidColumnNameException(String s) {
|
||||||
|
super(s);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,29 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2007 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
public class MasterNotRunningException extends IOException {
|
||||||
|
private static final long serialVersionUID = 1L << 23 - 1L;
|
||||||
|
public MasterNotRunningException() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
public MasterNotRunningException(String s) {
|
||||||
|
super(s);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,30 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2007 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
public class NoServerForRegionException extends IOException {
|
||||||
|
private static final long serialVersionUID = 1L << 11 - 1L;
|
||||||
|
|
||||||
|
public NoServerForRegionException() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
public NoServerForRegionException(String s) {
|
||||||
|
super(s);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,29 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2007 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
public class TableNotDisabledException extends IOException {
|
||||||
|
private static final long serialVersionUID = 1L << 19 - 1L;
|
||||||
|
public TableNotDisabledException() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
public TableNotDisabledException(String s) {
|
||||||
|
super(s);
|
||||||
|
}
|
||||||
|
}
|
|
@ -53,8 +53,8 @@ public class EvaluationClient implements HConstants {
|
||||||
private static HTableDescriptor tableDescriptor;
|
private static HTableDescriptor tableDescriptor;
|
||||||
|
|
||||||
static {
|
static {
|
||||||
tableDescriptor = new HTableDescriptor("TestTable", 1);
|
tableDescriptor = new HTableDescriptor("TestTable");
|
||||||
tableDescriptor.addFamily(COLUMN_FAMILY);
|
tableDescriptor.addFamily(new HColumnDescriptor(COLUMN_FAMILY.toString()));
|
||||||
}
|
}
|
||||||
|
|
||||||
private static enum Test {RANDOM_READ,
|
private static enum Test {RANDOM_READ,
|
||||||
|
|
|
@ -71,9 +71,9 @@ public class TestGet extends HBaseTestCase {
|
||||||
Path dir = new Path("/hbase");
|
Path dir = new Path("/hbase");
|
||||||
fs.mkdirs(dir);
|
fs.mkdirs(dir);
|
||||||
|
|
||||||
HTableDescriptor desc = new HTableDescriptor("test", 1);
|
HTableDescriptor desc = new HTableDescriptor("test");
|
||||||
desc.addFamily(CONTENTS);
|
desc.addFamily(new HColumnDescriptor(CONTENTS.toString()));
|
||||||
desc.addFamily(HConstants.COLUMN_FAMILY);
|
desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString()));
|
||||||
|
|
||||||
HRegionInfo info = new HRegionInfo(0L, desc, null, null);
|
HRegionInfo info = new HRegionInfo(0L, desc, null, null);
|
||||||
Path regionDir = HStoreFile.getHRegionDir(dir, info.regionName);
|
Path regionDir = HStoreFile.getHRegionDir(dir, info.regionName);
|
||||||
|
|
|
@ -17,8 +17,8 @@ package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
import java.util.Set;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
import java.util.TreeSet;
|
|
||||||
|
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
|
|
||||||
|
@ -71,9 +71,9 @@ public class TestHBaseCluster extends HBaseClusterTestCase {
|
||||||
|
|
||||||
private void setup() throws IOException {
|
private void setup() throws IOException {
|
||||||
client = new HClient(conf);
|
client = new HClient(conf);
|
||||||
desc = new HTableDescriptor("test", 3);
|
desc = new HTableDescriptor("test");
|
||||||
desc.addFamily(new Text(CONTENTS));
|
desc.addFamily(new HColumnDescriptor(CONTENTS.toString()));
|
||||||
desc.addFamily(new Text(ANCHOR));
|
desc.addFamily(new HColumnDescriptor(ANCHOR.toString()));
|
||||||
client.createTable(desc);
|
client.createTable(desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,7 +182,7 @@ public class TestHBaseCluster extends HBaseClusterTestCase {
|
||||||
HTableDescriptor[] tables = client.listTables();
|
HTableDescriptor[] tables = client.listTables();
|
||||||
assertEquals(1, tables.length);
|
assertEquals(1, tables.length);
|
||||||
assertEquals(desc.getName(), tables[0].getName());
|
assertEquals(desc.getName(), tables[0].getName());
|
||||||
TreeSet<Text> families = tables[0].families();
|
Set<Text> families = tables[0].families().keySet();
|
||||||
assertEquals(2, families.size());
|
assertEquals(2, families.size());
|
||||||
assertTrue(families.contains(new Text(CONTENTS)));
|
assertTrue(families.contains(new Text(CONTENTS)));
|
||||||
assertTrue(families.contains(new Text(ANCHOR)));
|
assertTrue(families.contains(new Text(ANCHOR)));
|
||||||
|
@ -193,11 +193,5 @@ public class TestHBaseCluster extends HBaseClusterTestCase {
|
||||||
// Delete the table we created
|
// Delete the table we created
|
||||||
|
|
||||||
client.deleteTable(desc.getName());
|
client.deleteTable(desc.getName());
|
||||||
try {
|
|
||||||
Thread.sleep(30000); // Wait for table to be deleted
|
|
||||||
|
|
||||||
} catch(InterruptedException e) {
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -102,9 +102,9 @@ public class TestHRegion extends HBaseTestCase implements RegionUnavailableListe
|
||||||
oldlogfile = new Path(parentdir, "oldlogfile");
|
oldlogfile = new Path(parentdir, "oldlogfile");
|
||||||
|
|
||||||
log = new HLog(fs, newlogdir, conf);
|
log = new HLog(fs, newlogdir, conf);
|
||||||
desc = new HTableDescriptor("test", 3);
|
desc = new HTableDescriptor("test");
|
||||||
desc.addFamily(new Text("contents:"));
|
desc.addFamily(new HColumnDescriptor("contents:"));
|
||||||
desc.addFamily(new Text("anchor:"));
|
desc.addFamily(new HColumnDescriptor("anchor:"));
|
||||||
region = new HRegion(parentdir, log, fs, conf,
|
region = new HRegion(parentdir, log, fs, conf,
|
||||||
new HRegionInfo(1, desc, null, null), null, oldlogfile);
|
new HRegionInfo(1, desc, null, null), null, oldlogfile);
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,79 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2006 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import org.apache.hadoop.io.Text;
|
||||||
|
|
||||||
|
public class TestMasterAdmin extends HBaseClusterTestCase {
|
||||||
|
private static final Text COLUMN_NAME = new Text("col1:");
|
||||||
|
private static HTableDescriptor testDesc;
|
||||||
|
static {
|
||||||
|
testDesc = new HTableDescriptor("testadmin1");
|
||||||
|
testDesc.addFamily(new HColumnDescriptor(COLUMN_NAME.toString()));
|
||||||
|
}
|
||||||
|
|
||||||
|
private HClient client;
|
||||||
|
|
||||||
|
public TestMasterAdmin() {
|
||||||
|
super(true);
|
||||||
|
client = new HClient(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testMasterAdmin() {
|
||||||
|
try {
|
||||||
|
client.createTable(testDesc);
|
||||||
|
client.disableTable(testDesc.getName());
|
||||||
|
|
||||||
|
} catch(Exception e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
fail();
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
try {
|
||||||
|
client.openTable(testDesc.getName());
|
||||||
|
|
||||||
|
} catch(IllegalStateException e) {
|
||||||
|
// Expected
|
||||||
|
}
|
||||||
|
|
||||||
|
client.addColumn(testDesc.getName(), new HColumnDescriptor("col2:"));
|
||||||
|
client.enableTable(testDesc.getName());
|
||||||
|
try {
|
||||||
|
client.deleteColumn(testDesc.getName(), new Text("col2:"));
|
||||||
|
|
||||||
|
} catch(TableNotDisabledException e) {
|
||||||
|
// Expected
|
||||||
|
}
|
||||||
|
|
||||||
|
client.disableTable(testDesc.getName());
|
||||||
|
client.deleteColumn(testDesc.getName(), new Text("col2:"));
|
||||||
|
|
||||||
|
} catch(Exception e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
fail();
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
try {
|
||||||
|
client.deleteTable(testDesc.getName());
|
||||||
|
|
||||||
|
} catch(Exception e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
fail();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -50,8 +50,8 @@ public class TestTable extends HBaseClusterTestCase {
|
||||||
fail();
|
fail();
|
||||||
}
|
}
|
||||||
|
|
||||||
HTableDescriptor desc = new HTableDescriptor("test", 1);
|
HTableDescriptor desc = new HTableDescriptor("test");
|
||||||
desc.addFamily(HConstants.COLUMN_FAMILY);
|
desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString()));
|
||||||
|
|
||||||
try {
|
try {
|
||||||
client.createTable(desc);
|
client.createTable(desc);
|
||||||
|
@ -73,5 +73,5 @@ public class TestTable extends HBaseClusterTestCase {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
fail();
|
fail();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,15 +15,24 @@ public class TestToString extends TestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testHRegionInfo() throws Exception {
|
public void testHRegionInfo() throws Exception {
|
||||||
HTableDescriptor htd = new HTableDescriptor("hank", 10);
|
HTableDescriptor htd = new HTableDescriptor("hank");
|
||||||
htd.addFamily(new Text("hankfamily:"));
|
htd.addFamily(new HColumnDescriptor("hankfamily:"));
|
||||||
htd.addFamily(new Text("hankotherfamily:"));
|
htd.addFamily(new HColumnDescriptor(new Text("hankotherfamily:"), 10,
|
||||||
assertEquals("Table descriptor", htd.toString(),
|
HColumnDescriptor.CompressionType.BLOCK, true, 1000, false));
|
||||||
"name: hank, maxVersions: 10, families: [hankfamily:, hankotherfamily:]");
|
assertEquals("Table descriptor", "name: hank, families: "
|
||||||
|
+ "{hankfamily:=(hankfamily:, max versions: 3, compression: none, "
|
||||||
|
+ "in memory: false, max value length: 2147483647, bloom filter:false), "
|
||||||
|
+ "hankotherfamily:=(hankotherfamily:, max versions: 10, "
|
||||||
|
+ "compression: block, in memory: true, max value length: 1000, "
|
||||||
|
+ "bloom filter:false)}", htd.toString());
|
||||||
HRegionInfo hri = new HRegionInfo(-1, htd, new Text(), new Text("10"));
|
HRegionInfo hri = new HRegionInfo(-1, htd, new Text(), new Text("10"));
|
||||||
assertEquals("HRegionInfo",
|
assertEquals("HRegionInfo",
|
||||||
"regionname: hank__-1, startKey: <>, tableDesc: {name: hank, " +
|
"regionname: hank__-1, startKey: <>, tableDesc: {" + "name: hank, "
|
||||||
"maxVersions: 10, families: [hankfamily:, hankotherfamily:]}",
|
+ "families: {hankfamily:=(hankfamily:, max versions: 3, "
|
||||||
|
+ "compression: none, in memory: false, max value length: 2147483647, "
|
||||||
|
+ "bloom filter:false), hankotherfamily:=(hankotherfamily:, "
|
||||||
|
+ "max versions: 10, compression: block, in memory: true, max value "
|
||||||
|
+ "length: 1000, bloom filter:false)}}",
|
||||||
hri.toString());
|
hri.toString());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue