Hadoop-1384

HBase omnibus patch. Contributions by Vuk Ercegovac and Michael Stack.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@539243 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2007-05-18 03:22:54 +00:00
parent 3a70f2f97b
commit a6b7be60d6
25 changed files with 1578 additions and 1280 deletions

View File

@ -183,10 +183,10 @@ public abstract class HAbstractScanner implements HInternalScannerInterface {
abstract boolean getNext(int i) throws IOException; abstract boolean getNext(int i) throws IOException;
/** Mechanism used by concrete implementation to shut down a particular scanner */ /** Mechanism used by concrete implementation to shut down a particular scanner */
abstract void closeSubScanner(int i) throws IOException; abstract void closeSubScanner(int i);
/** Mechanism used to shut down the whole scan */ /** Mechanism used to shut down the whole scan */
public abstract void close() throws IOException; public abstract void close();
/* (non-Javadoc) /* (non-Javadoc)
* @see org.apache.hadoop.hbase.HInternalScannerInterface#isWildcardScanner() * @see org.apache.hadoop.hbase.HInternalScannerInterface#isWildcardScanner()

View File

@ -288,17 +288,35 @@ public class HClient implements HConstants {
throw new IOException("Timed out trying to locate root region"); throw new IOException("Timed out trying to locate root region");
} }
// Verify that this server still serves the root region
HRegionInterface rootRegion = getHRegionConnection(rootRegionLocation); HRegionInterface rootRegion = getHRegionConnection(rootRegionLocation);
if(rootRegion.getRegionInfo(HGlobals.rootRegionInfo.regionName) != null) { try {
rootRegion.getRegionInfo(HGlobals.rootRegionInfo.regionName);
this.tableServers = new TreeMap<Text, TableInfo>(); this.tableServers = new TreeMap<Text, TableInfo>();
this.tableServers.put(EMPTY_START_ROW, this.tableServers.put(EMPTY_START_ROW,
new TableInfo(HGlobals.rootRegionInfo, rootRegionLocation)); new TableInfo(HGlobals.rootRegionInfo, rootRegionLocation));
this.tablesToServers.put(ROOT_TABLE_NAME, this.tableServers); this.tablesToServers.put(ROOT_TABLE_NAME, this.tableServers);
break; break;
} catch(NotServingRegionException e) {
if(tries == numRetries - 1) {
// Don't bother sleeping. We've run out of retries.
break;
}
// Sleep and retry finding root region.
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Root region location changed. Sleeping.");
}
Thread.sleep(this.clientTimeout);
if (LOG.isDebugEnabled()) {
LOG.debug("Wake. Retry finding root region.");
}
} catch(InterruptedException iex) {
}
} }
rootRegionLocation = null; rootRegionLocation = null;
} }
@ -453,7 +471,7 @@ public class HClient implements HConstants {
* Right now, it only exists as part of the META table's region info. * Right now, it only exists as part of the META table's region info.
*/ */
public synchronized HTableDescriptor[] listTables() public synchronized HTableDescriptor[] listTables()
throws IOException { throws IOException {
TreeSet<HTableDescriptor> uniqueTables = new TreeSet<HTableDescriptor>(); TreeSet<HTableDescriptor> uniqueTables = new TreeSet<HTableDescriptor>();
TreeMap<Text, TableInfo> metaTables = TreeMap<Text, TableInfo> metaTables =
@ -523,24 +541,84 @@ public class HClient implements HConstants {
return this.tableServers.get(serverKey); return this.tableServers.get(serverKey);
} }
private synchronized void findRegion(TableInfo info) throws IOException {
// Wipe out everything we know about this table
this.tablesToServers.remove(info.regionInfo.tableDesc.getName());
this.tableServers.clear();
// Reload information for the whole table
findTableInMeta(info.regionInfo.tableDesc.getName());
if(this.tableServers.get(info.regionInfo.startKey) == null ) {
throw new IOException("region " + info.regionInfo.regionName + " does not exist");
}
}
/** Get a single value for the specified row and column */ /** Get a single value for the specified row and column */
public byte[] get(Text row, Text column) throws IOException { public byte[] get(Text row, Text column) throws IOException {
TableInfo info = getTableInfo(row); TableInfo info = null;
return getHRegionConnection(info.serverAddress).get( BytesWritable value = null;
info.regionInfo.regionName, row, column).get();
for(int tries = 0; tries < numRetries && info == null; tries++) {
info = getTableInfo(row);
try {
value = getHRegionConnection(info.serverAddress).get(
info.regionInfo.regionName, row, column);
} catch(NotServingRegionException e) {
if(tries == numRetries - 1) {
// No more tries
throw e;
}
findRegion(info);
info = null;
}
}
if(value != null) {
byte[] bytes = new byte[value.getSize()];
System.arraycopy(value.get(), 0, bytes, 0, bytes.length);
return bytes;
}
return null;
} }
/** Get the specified number of versions of the specified row and column */ /** Get the specified number of versions of the specified row and column */
public byte[][] get(Text row, Text column, int numVersions) throws IOException { public byte[][] get(Text row, Text column, int numVersions) throws IOException {
TableInfo info = getTableInfo(row); TableInfo info = null;
BytesWritable[] values = getHRegionConnection(info.serverAddress).get( BytesWritable[] values = null;
info.regionInfo.regionName, row, column, numVersions);
for(int tries = 0; tries < numRetries && info == null; tries++) {
ArrayList<byte[]> bytes = new ArrayList<byte[]>(); info = getTableInfo(row);
for(int i = 0 ; i < values.length; i++) {
bytes.add(values[i].get()); try {
values = getHRegionConnection(info.serverAddress).get(
info.regionInfo.regionName, row, column, numVersions);
} catch(NotServingRegionException e) {
if(tries == numRetries - 1) {
// No more tries
throw e;
}
findRegion(info);
info = null;
}
} }
return bytes.toArray(new byte[values.length][]);
if(values != null) {
ArrayList<byte[]> bytes = new ArrayList<byte[]>();
for(int i = 0 ; i < values.length; i++) {
byte[] value = new byte[values[i].getSize()];
System.arraycopy(values[i].get(), 0, value, 0, value.length);
bytes.add(value);
}
return bytes.toArray(new byte[values.length][]);
}
return null;
} }
/** /**
@ -548,22 +626,61 @@ public class HClient implements HConstants {
* the specified timestamp. * the specified timestamp.
*/ */
public byte[][] get(Text row, Text column, long timestamp, int numVersions) throws IOException { public byte[][] get(Text row, Text column, long timestamp, int numVersions) throws IOException {
TableInfo info = getTableInfo(row); TableInfo info = null;
BytesWritable[] values = getHRegionConnection(info.serverAddress).get( BytesWritable[] values = null;
info.regionInfo.regionName, row, column, timestamp, numVersions);
ArrayList<byte[]> bytes = new ArrayList<byte[]>();
for(int i = 0 ; i < values.length; i++) {
bytes.add(values[i].get());
}
return bytes.toArray(new byte[values.length][]);
}
for(int tries = 0; tries < numRetries && info == null; tries++) {
info = getTableInfo(row);
try {
values = getHRegionConnection(info.serverAddress).get(
info.regionInfo.regionName, row, column, timestamp, numVersions);
} catch(NotServingRegionException e) {
if(tries == numRetries - 1) {
// No more tries
throw e;
}
findRegion(info);
info = null;
}
}
if(values != null) {
ArrayList<byte[]> bytes = new ArrayList<byte[]>();
for(int i = 0 ; i < values.length; i++) {
byte[] value = new byte[values[i].getSize()];
System.arraycopy(values[i].get(), 0, value, 0, value.length);
bytes.add(value);
}
return bytes.toArray(new byte[values.length][]);
}
return null;
}
/** Get all the data for the specified row */ /** Get all the data for the specified row */
public LabelledData[] getRow(Text row) throws IOException { public LabelledData[] getRow(Text row) throws IOException {
TableInfo info = getTableInfo(row); TableInfo info = null;
return getHRegionConnection(info.serverAddress).getRow( LabelledData[] value = null;
info.regionInfo.regionName, row);
for(int tries = 0; tries < numRetries && info == null; tries++) {
info = getTableInfo(row);
try {
value = getHRegionConnection(info.serverAddress).getRow(
info.regionInfo.regionName, row);
} catch(NotServingRegionException e) {
if(tries == numRetries - 1) {
// No more tries
throw e;
}
findRegion(info);
info = null;
}
}
return value;
} }
/** /**
@ -579,19 +696,34 @@ public class HClient implements HConstants {
/** Start an atomic row insertion or update */ /** Start an atomic row insertion or update */
public long startUpdate(Text row) throws IOException { public long startUpdate(Text row) throws IOException {
TableInfo info = getTableInfo(row); TableInfo info = null;
long lockid; long lockid = -1L;
try {
this.currentServer = getHRegionConnection(info.serverAddress); for(int tries = 0; tries < numRetries && info == null; tries++) {
this.currentRegion = info.regionInfo.regionName; info = getTableInfo(row);
this.clientid = rand.nextLong();
lockid = currentServer.startUpdate(this.currentRegion, this.clientid, row); try {
this.currentServer = getHRegionConnection(info.serverAddress);
this.currentRegion = info.regionInfo.regionName;
this.clientid = rand.nextLong();
lockid = currentServer.startUpdate(this.currentRegion, this.clientid, row);
} catch(NotServingRegionException e) {
if(tries == numRetries - 1) {
// No more tries
throw e;
}
findRegion(info);
info = null;
} catch(IOException e) {
this.currentServer = null;
this.currentRegion = null;
throw e;
}
} catch(IOException e) {
this.currentServer = null;
this.currentRegion = null;
throw e;
} }
return lockid; return lockid;
} }
@ -666,24 +798,27 @@ public class HClient implements HConstants {
private HRegionInterface server; private HRegionInterface server;
private long scannerId; private long scannerId;
private void loadRegions() {
Text firstServer = null;
if(this.startRow == null || this.startRow.getLength() == 0) {
firstServer = tableServers.firstKey();
} else if(tableServers.containsKey(startRow)) {
firstServer = startRow;
} else {
firstServer = tableServers.headMap(startRow).lastKey();
}
Collection<TableInfo> info = tableServers.tailMap(firstServer).values();
this.regions = info.toArray(new TableInfo[info.size()]);
}
public ClientScanner(Text[] columns, Text startRow) throws IOException { public ClientScanner(Text[] columns, Text startRow) throws IOException {
this.columns = columns; this.columns = columns;
this.startRow = startRow; this.startRow = startRow;
this.closed = false; this.closed = false;
Text firstServer = null; loadRegions();
if(this.startRow == null || this.startRow.getLength() == 0) {
firstServer = tableServers.firstKey();
} else if(tableServers.containsKey(startRow)) {
firstServer = startRow;
} else {
firstServer = tableServers.headMap(startRow).lastKey();
}
Collection<TableInfo> info = tableServers.tailMap(firstServer).values();
this.regions = info.toArray(new TableInfo[info.size()]);
this.currentRegion = -1; this.currentRegion = -1;
this.server = null; this.server = null;
this.scannerId = -1L; this.scannerId = -1L;
@ -706,10 +841,26 @@ public class HClient implements HConstants {
} }
try { try {
this.server = getHRegionConnection(this.regions[currentRegion].serverAddress); this.server = getHRegionConnection(this.regions[currentRegion].serverAddress);
this.scannerId = this.server.openScanner(
this.regions[currentRegion].regionInfo.regionName, this.columns,
this.startRow);
for(int tries = 0; tries < numRetries; tries++) {
TableInfo info = this.regions[currentRegion];
try {
this.scannerId = this.server.openScanner(info.regionInfo.regionName,
this.columns, currentRegion == 0 ? this.startRow : EMPTY_START_ROW);
break;
} catch(NotServingRegionException e) {
if(tries == numRetries - 1) {
// No more tries
throw e;
}
findRegion(info);
loadRegions();
}
}
} catch(IOException e) { } catch(IOException e) {
close(); close();
throw e; throw e;
@ -743,6 +894,7 @@ public class HClient implements HConstants {
public void close() throws IOException { public void close() throws IOException {
if(this.scannerId != -1L) { if(this.scannerId != -1L) {
this.server.close(this.scannerId); this.server.close(this.scannerId);
this.scannerId = -1L;
} }
this.server = null; this.server = null;
this.closed = true; this.closed = true;

View File

@ -34,7 +34,7 @@ import org.apache.hadoop.io.Text;
public interface HInternalScannerInterface { public interface HInternalScannerInterface {
public boolean next(HStoreKey key, TreeMap<Text, BytesWritable> results) throws IOException; public boolean next(HStoreKey key, TreeMap<Text, BytesWritable> results) throws IOException;
public void close() throws IOException; public void close();
/** Returns true if the scanner is matching a column family or regex */ /** Returns true if the scanner is matching a column family or regex */
public boolean isWildcardScanner(); public boolean isWildcardScanner();

View File

@ -15,6 +15,8 @@
*/ */
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import java.util.concurrent.atomic.AtomicInteger;
/******************************************************************************* /*******************************************************************************
* HLocking is a set of lock primitives that does not rely on a * HLocking is a set of lock primitives that does not rely on a
* particular thread holding the monitor for an object. This is * particular thread holding the monitor for an object. This is
@ -33,12 +35,12 @@ public class HLocking {
// If lockers > 0, locked for read // If lockers > 0, locked for read
// If lockers == -1 locked for write // If lockers == -1 locked for write
private int lockers; private AtomicInteger lockers;
/** Constructor */ /** Constructor */
public HLocking() { public HLocking() {
this.mutex = new Integer(0); this.mutex = new Integer(0);
this.lockers = 0; this.lockers = new AtomicInteger(0);
} }
/** /**
@ -46,13 +48,13 @@ public class HLocking {
*/ */
public void obtainReadLock() { public void obtainReadLock() {
synchronized(mutex) { synchronized(mutex) {
while(lockers < 0) { while(lockers.get() < 0) {
try { try {
mutex.wait(); mutex.wait();
} catch(InterruptedException ie) { } catch(InterruptedException ie) {
} }
} }
lockers++; lockers.incrementAndGet();
mutex.notifyAll(); mutex.notifyAll();
} }
} }
@ -62,8 +64,7 @@ public class HLocking {
*/ */
public void releaseReadLock() { public void releaseReadLock() {
synchronized(mutex) { synchronized(mutex) {
lockers--; if(lockers.decrementAndGet() < 0) {
if(lockers < 0) {
throw new IllegalStateException("lockers: " + lockers); throw new IllegalStateException("lockers: " + lockers);
} }
mutex.notifyAll(); mutex.notifyAll();
@ -75,13 +76,12 @@ public class HLocking {
*/ */
public void obtainWriteLock() { public void obtainWriteLock() {
synchronized(mutex) { synchronized(mutex) {
while(lockers != 0) { while(!lockers.compareAndSet(0, -1)) {
try { try {
mutex.wait(); mutex.wait();
} catch (InterruptedException ie) { } catch (InterruptedException ie) {
} }
} }
lockers = -1;
mutex.notifyAll(); mutex.notifyAll();
} }
} }
@ -91,10 +91,9 @@ public class HLocking {
*/ */
public void releaseWriteLock() { public void releaseWriteLock() {
synchronized(mutex) { synchronized(mutex) {
if(lockers != -1) { if(!lockers.compareAndSet(-1, 0)) {
throw new IllegalStateException("lockers: " + lockers); throw new IllegalStateException("lockers: " + lockers);
} }
lockers = 0;
mutex.notifyAll(); mutex.notifyAll();
} }
} }

View File

@ -270,8 +270,14 @@ public class HLog implements HConstants {
/** Shut down the log. */ /** Shut down the log. */
public synchronized void close() throws IOException { public synchronized void close() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("closing log writer");
}
this.writer.close(); this.writer.close();
this.closed = true; this.closed = true;
if(LOG.isDebugEnabled()) {
LOG.debug("log writer closed");
}
} }
/** /**

View File

@ -164,7 +164,7 @@ public class HMaster implements HConstants, HMasterInterface,
HRegionInfo info = getRegionInfo(COL_REGIONINFO, results, inbuf); HRegionInfo info = getRegionInfo(COL_REGIONINFO, results, inbuf);
String serverName = getServerName(COL_SERVER, results); String serverName = getServerName(COL_SERVER, results);
long startCode = getStartCode(COL_STARTCODE, results); long startCode = getStartCode(COL_STARTCODE, results);
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug("row: " + info.toString() + ", server: " + serverName LOG.debug("row: " + info.toString() + ", server: " + serverName
+ ", startCode: " + startCode); + ", startCode: " + startCode);
@ -177,11 +177,11 @@ public class HMaster implements HConstants, HMasterInterface,
} }
} finally { } finally {
try { try {
if (scannerId != -1L) { if (scannerId != -1L) {
server.close(scannerId); server.close(scannerId);
} }
} catch (IOException e) { } catch (IOException e) {
e.printStackTrace(); e.printStackTrace();
} }
scannerId = -1L; scannerId = -1L;
} }
@ -581,13 +581,13 @@ public class HMaster implements HConstants, HMasterInterface,
// Main processing loop // Main processing loop
for(PendingOperation op = null; !closed; ) { for(PendingOperation op = null; !closed; ) {
synchronized(msgQueue) { synchronized(msgQueue) {
while(msgQueue.size() == 0 && serversToServerInfo.size() != 0) { while(msgQueue.size() == 0 && !closed) {
try { try {
msgQueue.wait(threadWakeFrequency); msgQueue.wait(threadWakeFrequency);
} catch(InterruptedException iex) { } catch(InterruptedException iex) {
} }
} }
if(msgQueue.size() == 0 || closed) { if(closed) {
continue; continue;
} }
op = msgQueue.remove(msgQueue.size()-1); op = msgQueue.remove(msgQueue.size()-1);
@ -616,14 +616,6 @@ public class HMaster implements HConstants, HMasterInterface,
} }
server.stop(); // Stop server server.stop(); // Stop server
serverLeases.close(); // Turn off the lease monitor serverLeases.close(); // Turn off the lease monitor
try {
fs.close();
client.close(); // Shut down the client
} catch(IOException iex) {
// Print if ever there is an interrupt (Just for kicks. Remove if it
// ever happens).
iex.printStackTrace();
}
// Join up with all threads // Join up with all threads
@ -652,6 +644,7 @@ public class HMaster implements HConstants, HMasterInterface,
// ever happens). // ever happens).
iex.printStackTrace(); iex.printStackTrace();
} }
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug("HMaster main thread exiting"); LOG.debug("HMaster main thread exiting");
} }
@ -774,19 +767,9 @@ public class HMaster implements HConstants, HMasterInterface,
HMsg[] processMsgs(HServerInfo info, HMsg incomingMsgs[]) throws IOException { HMsg[] processMsgs(HServerInfo info, HMsg incomingMsgs[]) throws IOException {
Vector<HMsg> returnMsgs = new Vector<HMsg>(); Vector<HMsg> returnMsgs = new Vector<HMsg>();
// Process the kill list
TreeMap<Text, HRegionInfo> regionsToKill = TreeMap<Text, HRegionInfo> regionsToKill =
killList.remove(info.getServerAddress().toString()); killList.remove(info.getServerAddress().toString());
if(regionsToKill != null) {
for(Iterator<HRegionInfo> i = regionsToKill.values().iterator();
i.hasNext(); ) {
returnMsgs.add(new HMsg(HMsg.MSG_REGION_CLOSE_AND_DELETE, i.next()));
}
}
// Get reports on what the RegionServer did. // Get reports on what the RegionServer did.
for(int i = 0; i < incomingMsgs.length; i++) { for(int i = 0; i < incomingMsgs.length; i++) {
@ -872,18 +855,11 @@ public class HMaster implements HConstants, HMasterInterface,
} else { } else {
boolean reassignRegion = true; boolean reassignRegion = true;
synchronized(regionsToKill) { if(regionsToKill.containsKey(region.regionName)) {
if(regionsToKill.containsKey(region.regionName)) { regionsToKill.remove(region.regionName);
regionsToKill.remove(region.regionName); unassignedRegions.remove(region.regionName);
assignAttempts.remove(region.regionName);
if(regionsToKill.size() > 0) { reassignRegion = false;
killList.put(info.toString(), regionsToKill);
} else {
killList.remove(info.toString());
}
reassignRegion = false;
}
} }
synchronized(msgQueue) { synchronized(msgQueue) {
@ -902,14 +878,15 @@ public class HMaster implements HConstants, HMasterInterface,
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug("new region " + region.regionName); LOG.debug("new region " + region.regionName);
} }
// A region has split and the old server is serving the two new regions.
if(region.regionName.find(META_TABLE_NAME.toString()) == 0) { if(region.regionName.find(META_TABLE_NAME.toString()) == 0) {
// A meta region has split. // A meta region has split.
allMetaRegionsScanned = false; allMetaRegionsScanned = false;
} }
unassignedRegions.put(region.regionName, region);
assignAttempts.put(region.regionName, 0L);
break; break;
default: default:
@ -918,6 +895,16 @@ public class HMaster implements HConstants, HMasterInterface,
} }
} }
// Process the kill list
if(regionsToKill != null) {
for(Iterator<HRegionInfo> i = regionsToKill.values().iterator();
i.hasNext(); ) {
returnMsgs.add(new HMsg(HMsg.MSG_REGION_CLOSE_AND_DELETE, i.next()));
}
}
// Figure out what the RegionServer ought to do, and write back. // Figure out what the RegionServer ought to do, and write back.
if(unassignedRegions.size() > 0) { if(unassignedRegions.size() > 0) {
@ -1460,109 +1447,168 @@ public class HMaster implements HConstants, HMasterInterface,
} else { } else {
firstMetaRegion = knownMetaRegions.headMap(tableName).lastKey(); firstMetaRegion = knownMetaRegions.headMap(tableName).lastKey();
} }
for(Iterator<MetaRegion> it =
knownMetaRegions.tailMap(firstMetaRegion).values().iterator();
it.hasNext(); ) {
// Find all the regions that make up this table synchronized(metaScannerLock) { // Prevent meta scanner from running
for(Iterator<MetaRegion> it =
MetaRegion m = it.next(); knownMetaRegions.tailMap(firstMetaRegion).values().iterator();
HRegionInterface server = client.getHRegionConnection(m.server); it.hasNext(); ) {
Vector<Text> rowsToDelete = new Vector<Text>();
long scannerId = -1L; // Find all the regions that make up this table
try {
scannerId = server.openScanner(m.regionName, METACOLUMNS, tableName);
DataInputBuffer inbuf = new DataInputBuffer();
byte[] bytes;
while(true) {
LabelledData[] values = null;
HStoreKey key = new HStoreKey();
values = server.next(scannerId, key);
if(values == null || values.length == 0) {
break;
}
TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
for(int i = 0; i < values.length; i++) {
bytes = new byte[values[i].getData().getSize()];
System.arraycopy(values[i].getData().get(), 0, bytes, 0, bytes.length);
results.put(values[i].getLabel(), bytes);
}
bytes = results.get(COL_REGIONINFO);
if(bytes == null || bytes.length == 0) {
break;
}
inbuf.reset(bytes, bytes.length);
HRegionInfo info = new HRegionInfo();
info.readFields(inbuf);
if(info.tableDesc.getName().compareTo(tableName) > 0) { MetaRegion m = it.next();
break; // Beyond any more entries for this table HRegionInterface server = client.getHRegionConnection(m.server);
}
rowsToDelete.add(info.regionName);
// Is it being served? // Rows in the meta table we will need to delete
bytes = results.get(COL_SERVER); Vector<Text> rowsToDelete = new Vector<Text>();
if(bytes != null && bytes.length != 0) {
String serverName = new String(bytes, UTF8_ENCODING); // Regions that are being served. We will get the HRegionServers
// to delete them for us, but we don't tell them that until after
bytes = results.get(COL_STARTCODE); // we are done scanning to prevent lock contention
TreeMap<String, TreeMap<Text, HRegionInfo>> localKillList =
new TreeMap<String, TreeMap<Text, HRegionInfo>>();
// Regions that are not being served. We will have to delete
// them ourselves
TreeSet<Text> unservedRegions = new TreeSet<Text>();
long scannerId = -1L;
try {
scannerId = server.openScanner(m.regionName, METACOLUMNS, tableName);
DataInputBuffer inbuf = new DataInputBuffer();
byte[] bytes;
while(true) {
LabelledData[] values = null;
HStoreKey key = new HStoreKey();
values = server.next(scannerId, key);
if(values == null || values.length == 0) {
break;
}
TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
for(int i = 0; i < values.length; i++) {
bytes = new byte[values[i].getData().getSize()];
System.arraycopy(values[i].getData().get(), 0, bytes, 0, bytes.length);
results.put(values[i].getLabel(), bytes);
}
bytes = results.get(COL_REGIONINFO);
if(bytes == null || bytes.length == 0) {
break;
}
inbuf.reset(bytes, bytes.length);
HRegionInfo info = new HRegionInfo();
info.readFields(inbuf);
if(info.tableDesc.getName().compareTo(tableName) > 0) {
break; // Beyond any more entries for this table
}
rowsToDelete.add(info.regionName);
// Is it being served?
bytes = results.get(COL_SERVER);
if(bytes != null && bytes.length != 0) { if(bytes != null && bytes.length != 0) {
long startCode = Long.valueOf(new String(bytes, UTF8_ENCODING)); String serverName = new String(bytes, UTF8_ENCODING);
HServerInfo s = serversToServerInfo.get(serverName); bytes = results.get(COL_STARTCODE);
if(s != null && s.getStartCode() == startCode) { if(bytes != null && bytes.length != 0) {
long startCode = Long.valueOf(new String(bytes, UTF8_ENCODING));
// It is being served. HServerInfo s = serversToServerInfo.get(serverName);
// Tell the server to stop it and not report back. if(s != null && s.getStartCode() == startCode) {
TreeMap<Text, HRegionInfo> regionsToKill = // It is being served.
killList.get(serverName); // Tell the server to stop it and not report back.
if(regionsToKill == null) { TreeMap<Text, HRegionInfo> regionsToKill =
regionsToKill = new TreeMap<Text, HRegionInfo>(); localKillList.get(serverName);
if(regionsToKill == null) {
regionsToKill = new TreeMap<Text, HRegionInfo>();
}
regionsToKill.put(info.regionName, info);
localKillList.put(serverName, regionsToKill);
continue;
} }
regionsToKill.put(info.regionName, info);
killList.put(serverName, regionsToKill);
} }
} }
// Region is not currently being served.
// Prevent it from getting assigned and add it to the list of
// regions we need to delete here.
unassignedRegions.remove(info.regionName);
assignAttempts.remove(info.regionName);
unservedRegions.add(info.regionName);
}
} catch(IOException e) {
e.printStackTrace();
} finally {
if(scannerId != -1L) {
try {
server.close(scannerId);
} catch(IOException e) {
e.printStackTrace();
}
}
scannerId = -1L;
}
// Wipe the existence of the regions out of the meta table
for(Iterator<Text> row = rowsToDelete.iterator(); row.hasNext(); ) {
Text rowName = row.next();
if(LOG.isDebugEnabled()) {
LOG.debug("deleting columns in row: " + rowName);
}
long lockid = -1L;
long clientId = rand.nextLong();
try {
lockid = server.startUpdate(m.regionName, clientId, rowName);
server.delete(m.regionName, clientId, lockid, COL_REGIONINFO);
server.delete(m.regionName, clientId, lockid, COL_SERVER);
server.delete(m.regionName, clientId, lockid, COL_STARTCODE);
server.commit(m.regionName, clientId, lockid);
lockid = -1L;
if(LOG.isDebugEnabled()) {
LOG.debug("deleted columns in row: " + rowName);
}
} catch(Exception e) {
if(lockid != -1L) {
server.abort(m.regionName, clientId, lockid);
}
LOG.error("columns deletion failed in row: " + rowName);
LOG.error(e);
} }
} }
} catch(IOException e) {
e.printStackTrace();
} finally { // Notify region servers that some regions need to be closed and deleted
if(scannerId != -1L) {
if(localKillList.size() != 0) {
killList.putAll(localKillList);
}
// Delete any regions that are not being served
for(Iterator<Text> i = unservedRegions.iterator(); i.hasNext(); ) {
Text regionName = i.next();
try { try {
server.close(scannerId); HRegion.deleteRegion(fs, dir, regionName);
} catch(IOException e) { } catch(IOException e) {
e.printStackTrace(); LOG.error("failed to delete region " + regionName);
LOG.error(e);
} }
} }
scannerId = -1L;
}
for(Iterator<Text> row = rowsToDelete.iterator(); row.hasNext(); ) {
Text rowName = row.next();
if(LOG.isDebugEnabled()) {
LOG.debug("deleting columns in row: " + rowName);
}
try {
long clientId = rand.nextLong();
long lockid = server.startUpdate(m.regionName, clientId, rowName);
server.delete(m.regionName, clientId, lockid, COL_REGIONINFO);
server.delete(m.regionName, clientId, lockid, COL_SERVER);
server.delete(m.regionName, clientId, lockid, COL_STARTCODE);
server.commit(m.regionName, clientId, lockid);
} catch(Exception e) {
e.printStackTrace();
}
} }
} }
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {

View File

@ -296,7 +296,7 @@ public class HMemcache {
for(int i = history.size() - 1; i > 0; i--) { for(int i = history.size() - 1; i > 0; i--) {
backingMaps[i] = history.elementAt(i); backingMaps[i] = history.elementAt(i);
} }
this.keyIterators = new Iterator[backingMaps.length]; this.keyIterators = new Iterator[backingMaps.length];
this.keys = new HStoreKey[backingMaps.length]; this.keys = new HStoreKey[backingMaps.length];
this.vals = new BytesWritable[backingMaps.length]; this.vals = new BytesWritable[backingMaps.length];
@ -322,8 +322,10 @@ public class HMemcache {
} }
} }
} catch(Exception ex) { } catch(IOException ex) {
LOG.error(ex);
close(); close();
throw ex;
} }
} }
@ -365,7 +367,7 @@ public class HMemcache {
} }
/** Shut down map iterators, and release the lock */ /** Shut down map iterators, and release the lock */
public void close() throws IOException { public void close() {
if(! scannerClosed) { if(! scannerClosed) {
try { try {
for(int i = 0; i < keys.length; i++) { for(int i = 0; i < keys.length; i++) {

View File

@ -64,6 +64,19 @@ public class HRegion implements HConstants {
private static final Log LOG = LogFactory.getLog(HRegion.class); private static final Log LOG = LogFactory.getLog(HRegion.class);
/**
* Deletes all the files for a HRegion
*
* @param fs - the file system object
* @param baseDirectory - base directory for HBase
* @param regionName - name of the region to delete
* @throws IOException
*/
public static void deleteRegion(FileSystem fs, Path baseDirectory,
Text regionName) throws IOException {
fs.delete(HStoreFile.getHRegionDir(baseDirectory, regionName));
}
/** /**
* Merge two HRegions. They must be available on the current * Merge two HRegions. They must be available on the current
* HRegionServer. Returns a brand-new active HRegion, also * HRegionServer. Returns a brand-new active HRegion, also
@ -245,7 +258,7 @@ public class HRegion implements HConstants {
TreeMap<Long, TreeMap<Text, BytesWritable>> targetColumns TreeMap<Long, TreeMap<Text, BytesWritable>> targetColumns
= new TreeMap<Long, TreeMap<Text, BytesWritable>>(); = new TreeMap<Long, TreeMap<Text, BytesWritable>>();
HMemcache memcache = new HMemcache(); HMemcache memcache;
Path dir; Path dir;
HLog log; HLog log;
@ -255,9 +268,9 @@ public class HRegion implements HConstants {
Path regiondir; Path regiondir;
class WriteState { class WriteState {
public boolean writesOngoing; public volatile boolean writesOngoing;
public boolean writesEnabled; public volatile boolean writesEnabled;
public boolean closed; public volatile boolean closed;
public WriteState() { public WriteState() {
this.writesOngoing = true; this.writesOngoing = true;
this.writesEnabled = true; this.writesEnabled = true;
@ -265,12 +278,13 @@ public class HRegion implements HConstants {
} }
} }
WriteState writestate = new WriteState(); volatile WriteState writestate = new WriteState();
int recentCommits = 0; int recentCommits = 0;
int commitsSinceFlush = 0; volatile int commitsSinceFlush = 0;
int maxUnflushedEntries = 0; int maxUnflushedEntries = 0;
int compactionThreshold = 0; int compactionThreshold = 0;
HLocking lock = null;
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
// Constructor // Constructor
@ -302,10 +316,14 @@ public class HRegion implements HConstants {
this.fs = fs; this.fs = fs;
this.conf = conf; this.conf = conf;
this.regionInfo = regionInfo; this.regionInfo = regionInfo;
this.memcache = new HMemcache();
this.writestate.writesOngoing = true; this.writestate.writesOngoing = true;
this.writestate.writesEnabled = true; this.writestate.writesEnabled = true;
this.writestate.closed = false; this.writestate.closed = false;
this.lock = new HLocking();
// Declare the regionName. This is a unique string for the region, used to // Declare the regionName. This is a unique string for the region, used to
// build a unique filename. // build a unique filename.
@ -354,12 +372,22 @@ public class HRegion implements HConstants {
public HRegionInfo getRegionInfo() { public HRegionInfo getRegionInfo() {
return this.regionInfo; return this.regionInfo;
} }
/** returns true if region is closed */
public boolean isClosed() {
boolean closed = false;
synchronized(writestate) {
closed = writestate.closed;
}
return closed;
}
/** Closes and deletes this HRegion. Called when doing a table deletion, for example */ /** Closes and deletes this HRegion. Called when doing a table deletion, for example */
public void closeAndDelete() throws IOException { public void closeAndDelete() throws IOException {
LOG.info("deleting region: " + regionInfo.regionName); LOG.info("deleting region: " + regionInfo.regionName);
close(); close();
fs.delete(regiondir); deleteRegion(fs, dir, regionInfo.regionName);
LOG.info("region deleted: " + regionInfo.regionName);
} }
/** /**
@ -373,42 +401,47 @@ public class HRegion implements HConstants {
* time-sensitive thread. * time-sensitive thread.
*/ */
public Vector<HStoreFile> close() throws IOException { public Vector<HStoreFile> close() throws IOException {
boolean shouldClose = false; lock.obtainWriteLock();
synchronized(writestate) { try {
if(writestate.closed) { boolean shouldClose = false;
LOG.info("region " + this.regionInfo.regionName + " closed"); synchronized(writestate) {
return new Vector<HStoreFile>(); if(writestate.closed) {
} LOG.info("region " + this.regionInfo.regionName + " closed");
while(writestate.writesOngoing) { return new Vector<HStoreFile>();
try {
writestate.wait();
} catch (InterruptedException iex) {
} }
while(writestate.writesOngoing) {
try {
writestate.wait();
} catch (InterruptedException iex) {
}
}
writestate.writesOngoing = true;
shouldClose = true;
} }
writestate.writesOngoing = true;
shouldClose = true;
}
if(! shouldClose) { if(! shouldClose) {
return null; return null;
} else { } else {
LOG.info("closing region " + this.regionInfo.regionName); LOG.info("closing region " + this.regionInfo.regionName);
Vector<HStoreFile> allHStoreFiles = internalFlushcache(); Vector<HStoreFile> allHStoreFiles = internalFlushcache();
for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) { for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
HStore store = it.next(); HStore store = it.next();
store.close(); store.close();
} }
try { try {
return allHStoreFiles; return allHStoreFiles;
} finally { } finally {
synchronized(writestate) { synchronized(writestate) {
writestate.closed = true; writestate.closed = true;
writestate.writesOngoing = false; writestate.writesOngoing = false;
}
LOG.info("region " + this.regionInfo.regionName + " closed");
} }
LOG.info("region " + this.regionInfo.regionName + " closed");
} }
} finally {
lock.releaseWriteLock();
} }
} }
@ -418,7 +451,9 @@ public class HRegion implements HConstants {
* *
* Returns two brand-new (and open) HRegions * Returns two brand-new (and open) HRegions
*/ */
public HRegion[] closeAndSplit(Text midKey) throws IOException { public HRegion[] closeAndSplit(Text midKey, RegionUnavailableListener listener)
throws IOException {
if(((regionInfo.startKey.getLength() != 0) if(((regionInfo.startKey.getLength() != 0)
&& (regionInfo.startKey.compareTo(midKey) > 0)) && (regionInfo.startKey.compareTo(midKey) > 0))
|| ((regionInfo.endKey.getLength() != 0) || ((regionInfo.endKey.getLength() != 0)
@ -428,9 +463,6 @@ public class HRegion implements HConstants {
LOG.info("splitting region " + this.regionInfo.regionName); LOG.info("splitting region " + this.regionInfo.regionName);
// Flush this HRegion out to storage, and turn off flushes
// or compactions until close() is called.
Path splits = new Path(regiondir, SPLITDIR); Path splits = new Path(regiondir, SPLITDIR);
if(! fs.exists(splits)) { if(! fs.exists(splits)) {
fs.mkdirs(splits); fs.mkdirs(splits);
@ -453,6 +485,10 @@ public class HRegion implements HConstants {
} }
TreeSet<HStoreFile> alreadySplit = new TreeSet<HStoreFile>(); TreeSet<HStoreFile> alreadySplit = new TreeSet<HStoreFile>();
// Flush this HRegion out to storage, and turn off flushes
// or compactions until close() is called.
Vector<HStoreFile> hstoreFilesToSplit = flushcache(true); Vector<HStoreFile> hstoreFilesToSplit = flushcache(true);
for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext(); ) { for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext(); ) {
HStoreFile hsf = it.next(); HStoreFile hsf = it.next();
@ -472,8 +508,12 @@ public class HRegion implements HConstants {
alreadySplit.add(hsf); alreadySplit.add(hsf);
} }
// We just copied most of the data. Now close the HRegion // We just copied most of the data.
// and copy the small remainder // Notify the caller that we are about to close the region
listener.regionIsUnavailable(this.getRegionName());
// Now close the HRegion and copy the small remainder
hstoreFilesToSplit = close(); hstoreFilesToSplit = close();
for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext(); ) { for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext(); ) {
@ -577,19 +617,26 @@ public class HRegion implements HConstants {
* @return - true if the region should be split * @return - true if the region should be split
*/ */
public boolean needsSplit(Text midKey) { public boolean needsSplit(Text midKey) {
Text key = new Text(); lock.obtainReadLock();
long maxSize = 0;
for(Iterator<HStore> i = stores.values().iterator(); i.hasNext(); ) { try {
long size = i.next().getLargestFileSize(key); Text key = new Text();
long maxSize = 0;
if(size > maxSize) { // Largest so far
maxSize = size; for(Iterator<HStore> i = stores.values().iterator(); i.hasNext(); ) {
midKey.set(key); long size = i.next().getLargestFileSize(key);
if(size > maxSize) { // Largest so far
maxSize = size;
midKey.set(key);
}
} }
}
return (maxSize > (DESIRED_MAX_FILE_SIZE + (DESIRED_MAX_FILE_SIZE / 2))); return (maxSize > (DESIRED_MAX_FILE_SIZE + (DESIRED_MAX_FILE_SIZE / 2)));
} finally {
lock.releaseReadLock();
}
} }
/** /**
@ -597,11 +644,16 @@ public class HRegion implements HConstants {
*/ */
public boolean needsCompaction() { public boolean needsCompaction() {
boolean needsCompaction = false; boolean needsCompaction = false;
for(Iterator<HStore> i = stores.values().iterator(); i.hasNext(); ) { lock.obtainReadLock();
if(i.next().getNMaps() > compactionThreshold) { try {
needsCompaction = true; for(Iterator<HStore> i = stores.values().iterator(); i.hasNext(); ) {
break; if(i.next().getNMaps() > compactionThreshold) {
needsCompaction = true;
break;
}
} }
} finally {
lock.releaseReadLock();
} }
return needsCompaction; return needsCompaction;
} }
@ -621,15 +673,20 @@ public class HRegion implements HConstants {
*/ */
public boolean compactStores() throws IOException { public boolean compactStores() throws IOException {
boolean shouldCompact = false; boolean shouldCompact = false;
synchronized(writestate) { lock.obtainReadLock();
if((! writestate.writesOngoing) try {
&& writestate.writesEnabled synchronized(writestate) {
&& (! writestate.closed) if((! writestate.writesOngoing)
&& recentCommits > MIN_COMMITS_FOR_COMPACTION) { && writestate.writesEnabled
&& (! writestate.closed)
writestate.writesOngoing = true; && recentCommits > MIN_COMMITS_FOR_COMPACTION) {
shouldCompact = true;
writestate.writesOngoing = true;
shouldCompact = true;
}
} }
} finally {
lock.releaseReadLock();
} }
if(! shouldCompact) { if(! shouldCompact) {
@ -637,6 +694,7 @@ public class HRegion implements HConstants {
return false; return false;
} else { } else {
lock.obtainWriteLock();
try { try {
LOG.info("starting compaction on region " + this.regionInfo.regionName); LOG.info("starting compaction on region " + this.regionInfo.regionName);
for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) { for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
@ -652,6 +710,7 @@ public class HRegion implements HConstants {
recentCommits = 0; recentCommits = 0;
writestate.notifyAll(); writestate.notifyAll();
} }
lock.releaseWriteLock();
} }
} }
} }
@ -872,22 +931,28 @@ public class HRegion implements HConstants {
private BytesWritable[] get(HStoreKey key, int numVersions) throws IOException { private BytesWritable[] get(HStoreKey key, int numVersions) throws IOException {
// Check the memcache lock.obtainReadLock();
try {
// Check the memcache
BytesWritable[] result = memcache.get(key, numVersions); BytesWritable[] result = memcache.get(key, numVersions);
if(result != null) { if(result != null) {
return result; return result;
}
// If unavailable in memcache, check the appropriate HStore
Text colFamily = HStoreKey.extractFamily(key.getColumn());
HStore targetStore = stores.get(colFamily);
if(targetStore == null) {
return null;
}
return targetStore.get(key, numVersions);
} finally {
lock.releaseReadLock();
} }
// If unavailable in memcache, check the appropriate HStore
Text colFamily = HStoreKey.extractFamily(key.getColumn());
HStore targetStore = stores.get(colFamily);
if(targetStore == null) {
return null;
}
return targetStore.get(key, numVersions);
} }
/** /**
@ -903,13 +968,19 @@ public class HRegion implements HConstants {
public TreeMap<Text, BytesWritable> getFull(Text row) throws IOException { public TreeMap<Text, BytesWritable> getFull(Text row) throws IOException {
HStoreKey key = new HStoreKey(row, System.currentTimeMillis()); HStoreKey key = new HStoreKey(row, System.currentTimeMillis());
TreeMap<Text, BytesWritable> memResult = memcache.getFull(key); lock.obtainReadLock();
for(Iterator<Text> it = stores.keySet().iterator(); it.hasNext(); ) { try {
Text colFamily = it.next(); TreeMap<Text, BytesWritable> memResult = memcache.getFull(key);
HStore targetStore = stores.get(colFamily); for(Iterator<Text> it = stores.keySet().iterator(); it.hasNext(); ) {
targetStore.getFull(key, memResult); Text colFamily = it.next();
HStore targetStore = stores.get(colFamily);
targetStore.getFull(key, memResult);
}
return memResult;
} finally {
lock.releaseReadLock();
} }
return memResult;
} }
/** /**
@ -917,18 +988,24 @@ public class HRegion implements HConstants {
* columns. This Iterator must be closed by the caller. * columns. This Iterator must be closed by the caller.
*/ */
public HInternalScannerInterface getScanner(Text[] cols, Text firstRow) throws IOException { public HInternalScannerInterface getScanner(Text[] cols, Text firstRow) throws IOException {
TreeSet<Text> families = new TreeSet<Text>(); lock.obtainReadLock();
for(int i = 0; i < cols.length; i++) { try {
families.add(HStoreKey.extractFamily(cols[i])); TreeSet<Text> families = new TreeSet<Text>();
} for(int i = 0; i < cols.length; i++) {
families.add(HStoreKey.extractFamily(cols[i]));
}
HStore[] storelist = new HStore[families.size()]; HStore[] storelist = new HStore[families.size()];
int i = 0; int i = 0;
for(Iterator<Text> it = families.iterator(); it.hasNext(); ) { for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
Text family = it.next(); Text family = it.next();
storelist[i++] = stores.get(family); storelist[i++] = stores.get(family);
}
return new HScanner(cols, firstRow, memcache, storelist);
} finally {
lock.releaseReadLock();
} }
return new HScanner(cols, firstRow, memcache, storelist);
} }
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
@ -949,8 +1026,14 @@ public class HRegion implements HConstants {
// We obtain a per-row lock, so other clients will // We obtain a per-row lock, so other clients will
// block while one client performs an update. // block while one client performs an update.
return obtainLock(row); lock.obtainReadLock();
try {
return obtainLock(row);
} finally {
lock.releaseReadLock();
}
} }
/** /**
@ -1176,9 +1259,16 @@ public class HRegion implements HConstants {
/** Create an HScanner with a handle on many HStores. */ /** Create an HScanner with a handle on many HStores. */
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public HScanner(Text[] cols, Text firstRow, HMemcache memcache, HStore[] stores) throws IOException { public HScanner(Text[] cols, Text firstRow, HMemcache memcache, HStore[] stores)
throws IOException {
long scanTime = System.currentTimeMillis(); long scanTime = System.currentTimeMillis();
this.scanners = new HInternalScannerInterface[stores.length + 1]; this.scanners = new HInternalScannerInterface[stores.length + 1];
for(int i = 0; i < this.scanners.length; i++) {
this.scanners[i] = null;
}
this.resultSets = new TreeMap[scanners.length]; this.resultSets = new TreeMap[scanners.length];
this.keys = new HStoreKey[scanners.length]; this.keys = new HStoreKey[scanners.length];
this.wildcardMatch = false; this.wildcardMatch = false;
@ -1189,28 +1279,38 @@ public class HRegion implements HConstants {
// NOTE: the memcache scanner should be the first scanner // NOTE: the memcache scanner should be the first scanner
HInternalScannerInterface scanner = try {
memcache.getScanner(scanTime, cols, firstRow); HInternalScannerInterface scanner =
memcache.getScanner(scanTime, cols, firstRow);
if(scanner.isWildcardScanner()) {
this.wildcardMatch = true;
}
if(scanner.isMultipleMatchScanner()) {
this.multipleMatchers = true;
}
scanners[0] = scanner;
for(int i = 0; i < stores.length; i++) {
scanner = stores[i].getScanner(scanTime, cols, firstRow);
if(scanner.isWildcardScanner()) { if(scanner.isWildcardScanner()) {
this.wildcardMatch = true; this.wildcardMatch = true;
} }
if(scanner.isMultipleMatchScanner()) { if(scanner.isMultipleMatchScanner()) {
this.multipleMatchers = true; this.multipleMatchers = true;
} }
scanners[i + 1] = scanner; scanners[0] = scanner;
}
for(int i = 0; i < stores.length; i++) {
scanner = stores[i].getScanner(scanTime, cols, firstRow);
if(scanner.isWildcardScanner()) {
this.wildcardMatch = true;
}
if(scanner.isMultipleMatchScanner()) {
this.multipleMatchers = true;
}
scanners[i + 1] = scanner;
}
} catch(IOException e) {
for(int i = 0; i < this.scanners.length; i++) {
if(scanners[i] != null) {
closeScanner(i);
}
}
throw e;
}
for(int i = 0; i < scanners.length; i++) { for(int i = 0; i < scanners.length; i++) {
keys[i] = new HStoreKey(); keys[i] = new HStoreKey();
resultSets[i] = new TreeMap<Text, BytesWritable>(); resultSets[i] = new TreeMap<Text, BytesWritable>();
@ -1319,7 +1419,7 @@ public class HRegion implements HConstants {
} }
/** Shut down a single scanner */ /** Shut down a single scanner */
void closeScanner(int i) throws IOException { void closeScanner(int i) {
try { try {
scanners[i].close(); scanners[i].close();
@ -1331,7 +1431,7 @@ public class HRegion implements HConstants {
} }
/** All done with the scanner. */ /** All done with the scanner. */
public void close() throws IOException { public void close() {
for(int i = 0; i < scanners.length; i++) { for(int i = 0; i < scanners.length; i++) {
if(scanners[i] != null) { if(scanners[i] != null) {
closeScanner(i); closeScanner(i);

View File

@ -30,7 +30,7 @@ public interface HRegionInterface extends VersionedProtocol {
// Get metainfo about an HRegion // Get metainfo about an HRegion
public HRegionInfo getRegionInfo(Text regionName); public HRegionInfo getRegionInfo(Text regionName) throws NotServingRegionException;
// GET methods for an HRegion. // GET methods for an HRegion.

View File

@ -46,7 +46,7 @@ public class HRegionServer
private volatile boolean stopRequested; private volatile boolean stopRequested;
private Path regionDir; private Path regionDir;
private HServerAddress address; private HServerInfo info;
private Configuration conf; private Configuration conf;
private Random rand; private Random rand;
private TreeMap<Text, HRegion> regions; // region name -> HRegion private TreeMap<Text, HRegion> regions; // region name -> HRegion
@ -64,24 +64,26 @@ public class HRegionServer
private Thread splitOrCompactCheckerThread; private Thread splitOrCompactCheckerThread;
private Integer splitOrCompactLock = new Integer(0); private Integer splitOrCompactLock = new Integer(0);
private class SplitOrCompactChecker implements Runnable { private class SplitOrCompactChecker implements Runnable, RegionUnavailableListener {
private HClient client = new HClient(conf); private HClient client = new HClient(conf);
private class SplitRegion { /* (non-Javadoc)
public HRegion region; * @see org.apache.hadoop.hbase.RegionUnavailableListener#regionIsUnavailable(org.apache.hadoop.io.Text)
public Text midKey; */
public void regionIsUnavailable(Text regionName) {
SplitRegion(HRegion region, Text midKey) { lock.obtainWriteLock();
this.region = region; regions.remove(regionName);
this.midKey = midKey; lock.releaseWriteLock();
}
} }
/* (non-Javadoc)
* @see java.lang.Runnable#run()
*/
public void run() { public void run() {
while(! stopRequested) { while(! stopRequested) {
long startTime = System.currentTimeMillis(); long startTime = System.currentTimeMillis();
synchronized(splitOrCompactLock) { synchronized(splitOrCompactLock) { // Don't interrupt us while we're working
// Grab a list of regions to check // Grab a list of regions to check
@ -93,85 +95,81 @@ public class HRegionServer
lock.releaseReadLock(); lock.releaseReadLock();
} }
// Check to see if they need splitting or compacting
Vector<SplitRegion> toSplit = new Vector<SplitRegion>();
Vector<HRegion> toCompact = new Vector<HRegion>();
for(Iterator<HRegion> it = regionsToCheck.iterator(); it.hasNext(); ) {
HRegion cur = it.next();
Text midKey = new Text();
if(cur.needsCompaction()) {
toCompact.add(cur);
} else if(cur.needsSplit(midKey)) {
toSplit.add(new SplitRegion(cur, midKey));
}
}
try { try {
for(Iterator<HRegion>it = toCompact.iterator(); it.hasNext(); ) { for(Iterator<HRegion>it = regionsToCheck.iterator(); it.hasNext(); ) {
it.next().compactStores(); HRegion cur = it.next();
}
if(cur.isClosed()) {
for(Iterator<SplitRegion> it = toSplit.iterator(); it.hasNext(); ) { continue; // Skip if closed
SplitRegion r = it.next();
lock.obtainWriteLock();
regions.remove(r.region.getRegionName());
lock.releaseWriteLock();
HRegion[] newRegions = null;
Text oldRegion = r.region.getRegionName();
LOG.info("splitting region: " + oldRegion);
newRegions = r.region.closeAndSplit(r.midKey);
// When a region is split, the META table needs to updated if we're
// splitting a 'normal' region, and the ROOT table needs to be
// updated if we are splitting a META region.
Text tableToUpdate =
(oldRegion.find(META_TABLE_NAME.toString()) == 0) ?
ROOT_TABLE_NAME : META_TABLE_NAME;
if(LOG.isDebugEnabled()) {
LOG.debug("region split complete. updating meta");
} }
if(cur.needsCompaction()) {
// The best time to split a region is right after it has been compacted
if(cur.compactStores()) {
Text midKey = new Text();
if(cur.needsSplit(midKey)) {
Text oldRegion = cur.getRegionName();
client.openTable(tableToUpdate); LOG.info("splitting region: " + oldRegion);
long lockid = client.startUpdate(oldRegion);
client.delete(lockid, COL_REGIONINFO);
client.delete(lockid, COL_SERVER);
client.delete(lockid, COL_STARTCODE);
client.commit(lockid);
for(int i = 0; i < newRegions.length; i++) { HRegion[] newRegions = cur.closeAndSplit(midKey, this);
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(bytes);
newRegions[i].getRegionInfo().write(out);
lockid = client.startUpdate(newRegions[i].getRegionName()); // When a region is split, the META table needs to updated if we're
client.put(lockid, COL_REGIONINFO, bytes.toByteArray()); // splitting a 'normal' region, and the ROOT table needs to be
client.commit(lockid); // updated if we are splitting a META region.
if(LOG.isDebugEnabled()) {
LOG.debug("region split complete. updating meta");
}
Text tableToUpdate =
(oldRegion.find(META_TABLE_NAME.toString()) == 0) ?
ROOT_TABLE_NAME : META_TABLE_NAME;
client.openTable(tableToUpdate);
long lockid = client.startUpdate(oldRegion);
client.delete(lockid, COL_REGIONINFO);
client.delete(lockid, COL_SERVER);
client.delete(lockid, COL_STARTCODE);
client.commit(lockid);
for(int i = 0; i < newRegions.length; i++) {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(bytes);
newRegions[i].getRegionInfo().write(out);
lockid = client.startUpdate(newRegions[i].getRegionName());
client.put(lockid, COL_REGIONINFO, bytes.toByteArray());
client.put(lockid, COL_SERVER,
info.getServerAddress().toString().getBytes(UTF8_ENCODING));
client.put(lockid, COL_STARTCODE,
String.valueOf(info.getStartCode()).getBytes(UTF8_ENCODING));
client.commit(lockid);
}
// Now tell the master about the new regions
if(LOG.isDebugEnabled()) {
LOG.debug("reporting region split to master");
}
reportSplit(newRegions[0].getRegionInfo(), newRegions[1].getRegionInfo());
LOG.info("region split successful. old region=" + oldRegion
+ ", new regions: " + newRegions[0].getRegionName() + ", "
+ newRegions[1].getRegionName());
// Finally, start serving the new regions
lock.obtainWriteLock();
regions.put(newRegions[0].getRegionName(), newRegions[0]);
regions.put(newRegions[1].getRegionName(), newRegions[1]);
lock.releaseWriteLock();
}
}
} }
// Now tell the master about the new regions
if(LOG.isDebugEnabled()) {
LOG.debug("reporting region split to master");
}
reportSplit(newRegions[0].getRegionInfo(), newRegions[1].getRegionInfo());
LOG.info("region split successful. old region=" + oldRegion
+ ", new regions: " + newRegions[0].getRegionName() + ", "
+ newRegions[1].getRegionName());
newRegions[0].close();
newRegions[1].close();
} }
} catch(IOException e) { } catch(IOException e) {
//TODO: What happens if this fails? Are we toast? //TODO: What happens if this fails? Are we toast?
@ -228,6 +226,10 @@ public class HRegionServer
for(Iterator<HRegion> it = toFlush.iterator(); it.hasNext(); ) { for(Iterator<HRegion> it = toFlush.iterator(); it.hasNext(); ) {
HRegion cur = it.next(); HRegion cur = it.next();
if(cur.isClosed()) { // Skip if closed
continue;
}
try { try {
cur.optionallyFlush(); cur.optionallyFlush();
@ -330,8 +332,7 @@ public class HRegionServer
/** Start a HRegionServer at an indicated location */ /** Start a HRegionServer at an indicated location */
public HRegionServer(Path regionDir, HServerAddress address, public HRegionServer(Path regionDir, HServerAddress address,
Configuration conf) Configuration conf) throws IOException {
throws IOException {
// Basic setup // Basic setup
this.stopRequested = false; this.stopRequested = false;
@ -369,19 +370,25 @@ public class HRegionServer
try { try {
// Server to handle client requests // Server to handle client requests
this.server = RPC.getServer(this, address.getBindAddress().toString(), this.server = RPC.getServer(this, address.getBindAddress().toString(),
address.getPort(), conf.getInt("hbase.regionserver.handler.count", 10), address.getPort(), conf.getInt("hbase.regionserver.handler.count", 10),
false, conf); false, conf);
this.address = new HServerAddress(server.getListenerAddress()); this.info = new HServerInfo(new HServerAddress(server.getListenerAddress()),
this.rand.nextLong());
// Local file paths // Local file paths
String serverName = String serverName =
this.address.getBindAddress() + "_" + this.address.getPort(); this.info.getServerAddress().getBindAddress() + "_"
+ this.info.getServerAddress().getPort();
Path newlogdir = new Path(regionDir, "log" + "_" + serverName); Path newlogdir = new Path(regionDir, "log" + "_" + serverName);
this.oldlogfile = new Path(regionDir, "oldlogfile" + "_" + serverName); this.oldlogfile = new Path(regionDir, "oldlogfile" + "_" + serverName);
// Logging // Logging
this.fs = FileSystem.get(conf); this.fs = FileSystem.get(conf);
HLog.consolidateOldLog(newlogdir, oldlogfile, fs, conf); HLog.consolidateOldLog(newlogdir, oldlogfile, fs, conf);
// TODO: Now we have a consolidated log for all regions, sort and // TODO: Now we have a consolidated log for all regions, sort and
@ -393,13 +400,14 @@ public class HRegionServer
this.logRollerThread = new Thread(logRoller, "HRegionServer.logRoller"); this.logRollerThread = new Thread(logRoller, "HRegionServer.logRoller");
// Remote HMaster // Remote HMaster
this.hbaseMaster = (HMasterRegionInterface)RPC.
waitForProxy(HMasterRegionInterface.class, this.hbaseMaster = (HMasterRegionInterface)RPC.waitForProxy(
HMasterRegionInterface.versionID, HMasterRegionInterface.class, HMasterRegionInterface.versionID,
new HServerAddress(conf.get(MASTER_ADDRESS)).getInetSocketAddress(), new HServerAddress(conf.get(MASTER_ADDRESS)).getInetSocketAddress(),
conf); conf);
// Threads // Threads
this.workerThread.start(); this.workerThread.start();
this.cacheFlusherThread.start(); this.cacheFlusherThread.start();
this.splitOrCompactCheckerThread.start(); this.splitOrCompactCheckerThread.start();
@ -452,7 +460,7 @@ public class HRegionServer
this.server.join(); this.server.join();
} catch(InterruptedException iex) { } catch(InterruptedException iex) {
} }
LOG.info("HRegionServer stopped at: " + address.toString()); LOG.info("HRegionServer stopped at: " + info.getServerAddress().toString());
} }
/** /**
@ -462,7 +470,6 @@ public class HRegionServer
*/ */
public void run() { public void run() {
while(! stopRequested) { while(! stopRequested) {
HServerInfo info = new HServerInfo(address, rand.nextLong());
long lastMsg = 0; long lastMsg = 0;
long waitTime; long waitTime;
@ -557,7 +564,7 @@ public class HRegionServer
} }
} catch(IOException e) { } catch(IOException e) {
e.printStackTrace(); LOG.error(e);
} }
} }
@ -580,7 +587,7 @@ public class HRegionServer
} }
} }
try { try {
LOG.info("stopping server at: " + address.toString()); LOG.info("stopping server at: " + info.getServerAddress().toString());
// Send interrupts to wake up threads if sleeping so they notice shutdown. // Send interrupts to wake up threads if sleeping so they notice shutdown.
@ -761,58 +768,68 @@ public class HRegionServer
throws IOException { throws IOException {
this.lock.obtainWriteLock(); this.lock.obtainWriteLock();
HRegion region = null;
try { try {
HRegion region = regions.remove(info.regionName); region = regions.remove(info.regionName);
if(region != null) {
region.close();
if(reportWhenCompleted) {
reportClose(region);
}
}
} finally { } finally {
this.lock.releaseWriteLock(); this.lock.releaseWriteLock();
} }
if(region != null) {
region.close();
if(reportWhenCompleted) {
reportClose(region);
}
}
} }
private void closeAndDeleteRegion(HRegionInfo info) throws IOException { private void closeAndDeleteRegion(HRegionInfo info) throws IOException {
this.lock.obtainWriteLock(); this.lock.obtainWriteLock();
HRegion region = null;
try { try {
HRegion region = regions.remove(info.regionName); region = regions.remove(info.regionName);
if(region != null) {
region.closeAndDelete();
}
} finally { } finally {
this.lock.releaseWriteLock(); this.lock.releaseWriteLock();
} }
if(region != null) {
if(LOG.isDebugEnabled()) {
LOG.debug("deleting region " + info.regionName);
}
region.closeAndDelete();
if(LOG.isDebugEnabled()) {
LOG.debug("region " + info.regionName + " deleted");
}
}
} }
/** Called either when the master tells us to restart or from stop() */ /** Called either when the master tells us to restart or from stop() */
private void closeAllRegions() { private void closeAllRegions() {
Vector<HRegion> regionsToClose = new Vector<HRegion>();
this.lock.obtainWriteLock(); this.lock.obtainWriteLock();
try { try {
for(Iterator<HRegion> it = regions.values().iterator(); it.hasNext(); ) { regionsToClose.addAll(regions.values());
HRegion region = it.next();
if (LOG.isDebugEnabled()) {
LOG.debug("closing region " + region.getRegionName());
}
try {
region.close();
} catch(IOException e) {
e.printStackTrace();
}
}
regions.clear(); regions.clear();
} finally { } finally {
this.lock.releaseWriteLock(); this.lock.releaseWriteLock();
} }
for(Iterator<HRegion> it = regionsToClose.iterator(); it.hasNext(); ) {
HRegion region = it.next();
if (LOG.isDebugEnabled()) {
LOG.debug("closing region " + region.getRegionName());
}
try {
region.close();
LOG.debug("region closed " + region.getRegionName());
} catch(IOException e) {
LOG.error("error closing region " + region.getRegionName(), e);
}
}
} }
/***************************************************************************** /*****************************************************************************
@ -847,20 +864,14 @@ public class HRegionServer
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
/** Obtain a table descriptor for the given region */ /** Obtain a table descriptor for the given region */
public HRegionInfo getRegionInfo(Text regionName) { public HRegionInfo getRegionInfo(Text regionName) throws NotServingRegionException {
HRegion region = getRegion(regionName); HRegion region = getRegion(regionName);
if(region == null) {
return null;
}
return region.getRegionInfo(); return region.getRegionInfo();
} }
/** Get the indicated row/column */ /** Get the indicated row/column */
public BytesWritable get(Text regionName, Text row, Text column) throws IOException { public BytesWritable get(Text regionName, Text row, Text column) throws IOException {
HRegion region = getRegion(regionName); HRegion region = getRegion(regionName);
if(region == null) {
throw new IOException("Not serving region " + regionName);
}
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("get " + row.toString() + ", " + column.toString()); LOG.debug("get " + row.toString() + ", " + column.toString());
@ -877,9 +888,6 @@ public class HRegionServer
int numVersions) throws IOException { int numVersions) throws IOException {
HRegion region = getRegion(regionName); HRegion region = getRegion(regionName);
if(region == null) {
throw new IOException("Not serving region " + regionName);
}
BytesWritable[] results = region.get(row, column, numVersions); BytesWritable[] results = region.get(row, column, numVersions);
if(results != null) { if(results != null) {
@ -893,9 +901,6 @@ public class HRegionServer
long timestamp, int numVersions) throws IOException { long timestamp, int numVersions) throws IOException {
HRegion region = getRegion(regionName); HRegion region = getRegion(regionName);
if(region == null) {
throw new IOException("Not serving region " + regionName);
}
BytesWritable[] results = region.get(row, column, timestamp, numVersions); BytesWritable[] results = region.get(row, column, timestamp, numVersions);
if(results != null) { if(results != null) {
@ -907,9 +912,6 @@ public class HRegionServer
/** Get all the columns (along with their names) for a given row. */ /** Get all the columns (along with their names) for a given row. */
public LabelledData[] getRow(Text regionName, Text row) throws IOException { public LabelledData[] getRow(Text regionName, Text row) throws IOException {
HRegion region = getRegion(regionName); HRegion region = getRegion(regionName);
if(region == null) {
throw new IOException("Not serving region " + regionName);
}
TreeMap<Text, BytesWritable> map = region.getFull(row); TreeMap<Text, BytesWritable> map = region.getFull(row);
LabelledData result[] = new LabelledData[map.size()]; LabelledData result[] = new LabelledData[map.size()];
@ -949,9 +951,6 @@ public class HRegionServer
throws IOException { throws IOException {
HRegion region = getRegion(regionName); HRegion region = getRegion(regionName);
if(region == null) {
throw new IOException("Not serving region " + regionName);
}
long lockid = region.startUpdate(row); long lockid = region.startUpdate(row);
leases.createLease(new Text(String.valueOf(clientid)), leases.createLease(new Text(String.valueOf(clientid)),
@ -966,9 +965,6 @@ public class HRegionServer
BytesWritable val) throws IOException { BytesWritable val) throws IOException {
HRegion region = getRegion(regionName); HRegion region = getRegion(regionName);
if(region == null) {
throw new IOException("Not serving region " + regionName);
}
leases.renewLease(new Text(String.valueOf(clientid)), leases.renewLease(new Text(String.valueOf(clientid)),
new Text(String.valueOf(lockid))); new Text(String.valueOf(lockid)));
@ -981,9 +977,6 @@ public class HRegionServer
throws IOException { throws IOException {
HRegion region = getRegion(regionName); HRegion region = getRegion(regionName);
if(region == null) {
throw new IOException("Not serving region " + regionName);
}
leases.renewLease(new Text(String.valueOf(clientid)), leases.renewLease(new Text(String.valueOf(clientid)),
new Text(String.valueOf(lockid))); new Text(String.valueOf(lockid)));
@ -996,9 +989,6 @@ public class HRegionServer
throws IOException { throws IOException {
HRegion region = getRegion(regionName); HRegion region = getRegion(regionName);
if(region == null) {
throw new IOException("Not serving region " + regionName);
}
leases.cancelLease(new Text(String.valueOf(clientid)), leases.cancelLease(new Text(String.valueOf(clientid)),
new Text(String.valueOf(lockid))); new Text(String.valueOf(lockid)));
@ -1011,9 +1001,6 @@ public class HRegionServer
throws IOException { throws IOException {
HRegion region = getRegion(regionName); HRegion region = getRegion(regionName);
if(region == null) {
throw new IOException("Not serving region " + regionName);
}
leases.cancelLease(new Text(String.valueOf(clientid)), leases.cancelLease(new Text(String.valueOf(clientid)),
new Text(String.valueOf(lockid))); new Text(String.valueOf(lockid)));
@ -1028,14 +1015,20 @@ public class HRegionServer
} }
/** Private utility method for safely obtaining an HRegion handle. */ /** Private utility method for safely obtaining an HRegion handle. */
private HRegion getRegion(Text regionName) { private HRegion getRegion(Text regionName) throws NotServingRegionException {
this.lock.obtainReadLock(); this.lock.obtainReadLock();
HRegion region = null;
try { try {
return regions.get(regionName); region = regions.get(regionName);
} finally { } finally {
this.lock.releaseReadLock(); this.lock.releaseReadLock();
} }
if(region == null) {
throw new NotServingRegionException(regionName.toString());
}
return region;
} }
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
@ -1051,14 +1044,12 @@ public class HRegionServer
} }
public void leaseExpired() { public void leaseExpired() {
HInternalScannerInterface s = scanners.remove(scannerName); HInternalScannerInterface s = null;
synchronized(scanners) {
s = scanners.remove(scannerName);
}
if(s != null) { if(s != null) {
try { s.close();
s.close();
} catch(IOException e) {
e.printStackTrace();
}
} }
} }
} }
@ -1068,16 +1059,14 @@ public class HRegionServer
throws IOException { throws IOException {
HRegion r = getRegion(regionName); HRegion r = getRegion(regionName);
if(r == null) {
throw new IOException("Not serving region " + regionName);
}
long scannerId = -1L; long scannerId = -1L;
try { try {
HInternalScannerInterface s = r.getScanner(cols, firstRow); HInternalScannerInterface s = r.getScanner(cols, firstRow);
scannerId = rand.nextLong(); scannerId = rand.nextLong();
Text scannerName = new Text(String.valueOf(scannerId)); Text scannerName = new Text(String.valueOf(scannerId));
scanners.put(scannerName, s); synchronized(scanners) {
scanners.put(scannerName, s);
}
leases.createLease(scannerName, scannerName, new ScannerListener(scannerName)); leases.createLease(scannerName, scannerName, new ScannerListener(scannerName));
} catch(IOException e) { } catch(IOException e) {
@ -1121,16 +1110,14 @@ public class HRegionServer
public void close(long scannerId) throws IOException { public void close(long scannerId) throws IOException {
Text scannerName = new Text(String.valueOf(scannerId)); Text scannerName = new Text(String.valueOf(scannerId));
HInternalScannerInterface s = scanners.remove(scannerName); HInternalScannerInterface s = null;
synchronized(scanners) {
s = scanners.remove(scannerName);
}
if(s == null) { if(s == null) {
throw new IOException("unknown scanner"); throw new IOException("unknown scanner");
} }
try { s.close();
s.close();
} catch(IOException ex) {
ex.printStackTrace();
}
leases.cancelLease(scannerName, scannerName); leases.cancelLease(scannerName, scannerName);
} }

View File

@ -342,8 +342,14 @@ public class HStore {
} }
} }
public synchronized Vector<HStoreFile> getAllMapFiles() { public Vector<HStoreFile> getAllMapFiles() {
return new Vector<HStoreFile>(mapFiles.values()); this.lock.obtainReadLock();
try {
return new Vector<HStoreFile>(mapFiles.values());
} finally {
this.lock.releaseReadLock();
}
} }
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
@ -938,7 +944,9 @@ public class HStore {
class HStoreScanner extends HAbstractScanner { class HStoreScanner extends HAbstractScanner {
private MapFile.Reader[] readers; private MapFile.Reader[] readers;
public HStoreScanner(long timestamp, Text[] targetCols, Text firstRow) throws IOException { public HStoreScanner(long timestamp, Text[] targetCols, Text firstRow)
throws IOException {
super(timestamp, targetCols); super(timestamp, targetCols);
lock.obtainReadLock(); lock.obtainReadLock();
@ -976,6 +984,7 @@ public class HStore {
} }
} catch (Exception ex) { } catch (Exception ex) {
LOG.error(ex);
close(); close();
} }
} }
@ -1021,10 +1030,15 @@ public class HStore {
} }
/** Close down the indicated reader. */ /** Close down the indicated reader. */
void closeSubScanner(int i) throws IOException { void closeSubScanner(int i) {
try { try {
if(readers[i] != null) { if(readers[i] != null) {
readers[i].close(); try {
readers[i].close();
} catch(IOException e) {
LOG.error(e);
}
} }
} finally { } finally {
@ -1035,12 +1049,17 @@ public class HStore {
} }
/** Shut it down! */ /** Shut it down! */
public void close() throws IOException { public void close() {
if(! scannerClosed) { if(! scannerClosed) {
try { try {
for(int i = 0; i < readers.length; i++) { for(int i = 0; i < readers.length; i++) {
if(readers[i] != null) { if(readers[i] != null) {
readers[i].close(); try {
readers[i].close();
} catch(IOException e) {
LOG.error(e);
}
} }
} }

View File

@ -15,6 +15,8 @@
*/ */
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.*; import org.apache.hadoop.io.*;
import java.io.*; import java.io.*;
@ -36,6 +38,8 @@ import java.util.*;
* You should close() the instance if you want to clean up the thread properly. * You should close() the instance if you want to clean up the thread properly.
******************************************************************************/ ******************************************************************************/
public class Leases { public class Leases {
private static final Log LOG = LogFactory.getLog(Leases.class);
long leasePeriod; long leasePeriod;
long leaseCheckFrequency; long leaseCheckFrequency;
LeaseMonitor leaseMonitor; LeaseMonitor leaseMonitor;
@ -47,7 +51,7 @@ public class Leases {
/** Indicate the length of the lease, in milliseconds */ /** Indicate the length of the lease, in milliseconds */
public Leases(long leasePeriod, long leaseCheckFrequency) { public Leases(long leasePeriod, long leaseCheckFrequency) {
this.leasePeriod = leasePeriod; this.leasePeriod = leasePeriod;
this.leaseCheckFrequency = leaseCheckFrequency;
this.leaseMonitor = new LeaseMonitor(); this.leaseMonitor = new LeaseMonitor();
this.leaseMonitorThread = new Thread(leaseMonitor); this.leaseMonitorThread = new Thread(leaseMonitor);
this.leaseMonitorThread.setName("Lease.monitor"); this.leaseMonitorThread.setName("Lease.monitor");
@ -59,6 +63,9 @@ public class Leases {
* without any cancellation calls. * without any cancellation calls.
*/ */
public void close() { public void close() {
if(LOG.isDebugEnabled()) {
LOG.debug("closing leases");
}
this.running = false; this.running = false;
try { try {
this.leaseMonitorThread.interrupt(); this.leaseMonitorThread.interrupt();
@ -71,6 +78,9 @@ public class Leases {
sortedLeases.clear(); sortedLeases.clear();
} }
} }
if(LOG.isDebugEnabled()) {
LOG.debug("leases closed");
}
} }
/** A client obtains a lease... */ /** A client obtains a lease... */

View File

@ -0,0 +1,30 @@
/**
* Copyright 2007 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.IOException;
public class NotServingRegionException extends IOException {
private static final long serialVersionUID = 1L << 17 - 1L;
public NotServingRegionException() {
super();
}
public NotServingRegionException(String s) {
super(s);
}
}

View File

@ -0,0 +1,27 @@
/**
* Copyright 2006 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import org.apache.hadoop.io.Text;
/**
* Used as a callback mechanism so that an HRegion can notify the HRegionServer
* when a region is about to be closed during a split operation. This is done
* to minimize the amount of time the region is off-line.
*/
public interface RegionUnavailableListener {
public void regionIsUnavailable(Text regionName);
}

View File

@ -1,58 +0,0 @@
/**
* Copyright 2006 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import org.apache.log4j.Level;
/**
* Retrieve environment variables that control debugging and logging environment
*/
public class Environment {
public static boolean debugging = false;
public static Level logLevel = Level.INFO;
private Environment() {}; // Not instantiable
public static void getenv() {
String value = null;
value = System.getenv("DEBUGGING");
if(value != null && value.equalsIgnoreCase("TRUE")) {
debugging = true;
}
value = System.getenv("LOGGING_LEVEL");
if(value != null && value.length() != 0) {
if(value.equalsIgnoreCase("ALL")) {
logLevel = Level.ALL;
} else if(value.equalsIgnoreCase("DEBUG")) {
logLevel = Level.DEBUG;
} else if(value.equalsIgnoreCase("ERROR")) {
logLevel = Level.ERROR;
} else if(value.equalsIgnoreCase("FATAL")) {
logLevel = Level.FATAL;
} else if(value.equalsIgnoreCase("INFO")) {
logLevel = Level.INFO;
} else if(value.equalsIgnoreCase("OFF")) {
logLevel = Level.OFF;
} else if(value.equalsIgnoreCase("TRACE")) {
logLevel = Level.TRACE;
} else if(value.equalsIgnoreCase("WARN")) {
logLevel = Level.WARN;
}
}
}
}

View File

@ -0,0 +1,55 @@
/**
* Copyright 2006 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
/**
* Abstract base class for HBase cluster junit tests. Spins up cluster on
* {@link #setUp()} and takes it down again in {@link #tearDown()}.
*/
public abstract class HBaseClusterTestCase extends HBaseTestCase {
protected MiniHBaseCluster cluster;
final boolean miniHdfs;
protected HBaseClusterTestCase() {
this(false);
}
protected HBaseClusterTestCase(String name) {
this(name, false);
}
protected HBaseClusterTestCase(final boolean miniHdfs) {
super();
this.miniHdfs = miniHdfs;
}
protected HBaseClusterTestCase(String name, final boolean miniHdfs) {
super(name);
this.miniHdfs = miniHdfs;
}
public void setUp() throws Exception {
super.setUp();
this.cluster = new MiniHBaseCluster(this.conf, 1, this.miniHdfs);
}
public void tearDown() throws Exception {
super.tearDown();
if (this.cluster != null) {
this.cluster.shutdown();
}
}
}

View File

@ -0,0 +1,46 @@
/**
* Copyright 2006 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* Abstract base class for test cases. Performs all static initialization
*/
public abstract class HBaseTestCase extends TestCase {
static {
StaticTestEnvironment.initialize();
}
protected Configuration conf;
protected HBaseTestCase() {
super();
conf = new HBaseConfiguration();
}
protected HBaseTestCase(String name) {
super(name);
conf = new HBaseConfiguration();
}
protected Path getUnitTestdir(String testName) {
return new Path(StaticTestEnvironment.TEST_DIRECTORY_KEY, testName);
}
}

View File

@ -35,9 +35,9 @@ public class MiniHBaseCluster implements HConstants {
private MiniDFSCluster cluster; private MiniDFSCluster cluster;
private FileSystem fs; private FileSystem fs;
private Path parentdir; private Path parentdir;
private HMasterRunner masterRunner; private HMaster master;
private Thread masterRunnerThread; private Thread masterThread;
private HRegionServerRunner[] regionServers; private HRegionServer[] regionServers;
private Thread[] regionThreads; private Thread[] regionThreads;
public MiniHBaseCluster(Configuration conf, int nRegionNodes) { public MiniHBaseCluster(Configuration conf, int nRegionNodes) {
@ -58,13 +58,13 @@ public class MiniHBaseCluster implements HConstants {
try { try {
try { try {
if(System.getProperty("test.build.data") == null) { if(System.getProperty(StaticTestEnvironment.TEST_DIRECTORY_KEY) == null) {
File testDir = new File(new File("").getAbsolutePath(), File testDir = new File(new File("").getAbsolutePath(),
"build/contrib/hbase/test"); "build/contrib/hbase/test");
String dir = testDir.getAbsolutePath(); String dir = testDir.getAbsolutePath();
LOG.info("Setting test.build.data to " + dir); LOG.info("Setting test.build.data to " + dir);
System.setProperty("test.build.data", dir); System.setProperty(StaticTestEnvironment.TEST_DIRECTORY_KEY, dir);
} }
if (miniHdfsFilesystem) { if (miniHdfsFilesystem) {
@ -85,26 +85,15 @@ public class MiniHBaseCluster implements HConstants {
} }
// Create the master // Create the master
this.masterRunner = new HMasterRunner(); this.master = new HMaster(conf);
this.masterRunnerThread = new Thread(masterRunner, "masterRunner"); this.masterThread = new Thread(this.master, "HMaster");
// Start up the master // Start up the master
LOG.info("Starting HMaster"); LOG.info("Starting HMaster");
masterRunnerThread.start(); masterThread.start();
while(! masterRunner.isCrashed() && ! masterRunner.isInitialized()) {
try {
LOG.info("...waiting for HMaster to initialize...");
Thread.sleep(1000);
} catch(InterruptedException e) {
}
if(masterRunner.isCrashed()) {
throw new RuntimeException("HMaster crashed");
}
}
LOG.info("HMaster started.");
// Set the master's port for the HRegionServers // Set the master's port for the HRegionServers
String address = masterRunner.getHMasterAddress().toString(); String address = master.getMasterAddress().toString();
this.conf.set(MASTER_ADDRESS, address); this.conf.set(MASTER_ADDRESS, address);
// Start the HRegionServers // Start the HRegionServers
@ -115,34 +104,20 @@ public class MiniHBaseCluster implements HConstants {
LOG.info("Starting HRegionServers"); LOG.info("Starting HRegionServers");
startRegionServers(this.conf, nRegionNodes); startRegionServers(this.conf, nRegionNodes);
LOG.info("HRegionServers running");
// Wait for things to get started
while(! masterRunner.isCrashed() && ! masterRunner.isUp()) {
try {
LOG.info("Waiting for Mini HBase cluster to start...");
Thread.sleep(1000);
} catch(InterruptedException e) {
}
if(masterRunner.isCrashed()) {
throw new RuntimeException("HMaster crashed");
}
}
} catch(Throwable e) { } catch(Throwable e) {
// Delete all DFS files e.printStackTrace();
deleteFile(new File(System.getProperty("test.build.data"), "dfs")); shutdown();
throw new RuntimeException("Mini HBase cluster did not start");
} }
} }
private void startRegionServers(Configuration conf, int nRegionNodes) { private void startRegionServers(Configuration conf, int nRegionNodes)
this.regionServers = new HRegionServerRunner[nRegionNodes]; throws IOException {
this.regionServers = new HRegionServer[nRegionNodes];
this.regionThreads = new Thread[nRegionNodes]; this.regionThreads = new Thread[nRegionNodes];
for(int i = 0; i < nRegionNodes; i++) { for(int i = 0; i < nRegionNodes; i++) {
regionServers[i] = new HRegionServerRunner(conf); regionServers[i] = new HRegionServer(conf);
regionThreads[i] = new Thread(regionServers[i], "HRegionServer-" + i); regionThreads[i] = new Thread(regionServers[i], "HRegionServer-" + i);
regionThreads[i].start(); regionThreads[i].start();
} }
@ -153,35 +128,48 @@ public class MiniHBaseCluster implements HConstants {
* supplied port is not necessarily the actual port used. * supplied port is not necessarily the actual port used.
*/ */
public HServerAddress getHMasterAddress() { public HServerAddress getHMasterAddress() {
return masterRunner.getHMasterAddress(); return master.getMasterAddress();
} }
/** Shut down the HBase cluster */ /** Shut down the HBase cluster */
public void shutdown() { public void shutdown() {
LOG.info("Shutting down the HBase Cluster"); LOG.info("Shutting down the HBase Cluster");
for(int i = 0; i < regionServers.length; i++) {
regionServers[i].shutdown();
}
masterRunner.shutdown();
for(int i = 0; i < regionServers.length; i++) { for(int i = 0; i < regionServers.length; i++) {
try { try {
regionThreads[i].join(); regionServers[i].stop();
} catch (InterruptedException e) {
} catch(IOException e) {
e.printStackTrace(); e.printStackTrace();
} }
} }
try { try {
masterRunnerThread.join(); master.shutdown();
} catch (InterruptedException e) {
} catch(IOException e) {
e.printStackTrace(); e.printStackTrace();
} }
if (cluster != null) { for(int i = 0; i < regionServers.length; i++) {
try {
regionThreads[i].join();
} catch(InterruptedException e) {
}
}
try {
masterThread.join();
} catch(InterruptedException e) {
}
LOG.info("HBase Cluster shutdown complete");
if(cluster != null) {
LOG.info("Shutting down Mini DFS cluster"); LOG.info("Shutting down Mini DFS cluster");
cluster.shutdown(); cluster.shutdown();
} }
// Delete all DFS files // Delete all DFS files
deleteFile(new File(System.getProperty("test.build.data"), "dfs")); deleteFile(new File(System.getProperty(
StaticTestEnvironment.TEST_DIRECTORY_KEY), "dfs"));
} }
private void deleteFile(File f) { private void deleteFile(File f) {
@ -193,126 +181,4 @@ public class MiniHBaseCluster implements HConstants {
} }
f.delete(); f.delete();
} }
private class HMasterRunner implements Runnable {
private HMaster master = null;
private Thread masterThread = null;
private volatile boolean isInitialized = false;
private boolean isCrashed = false;
private boolean isRunning = true;
private long threadSleepTime = conf.getLong(THREAD_WAKE_FREQUENCY, 10 * 1000);
public HServerAddress getHMasterAddress() {
return this.master.getMasterAddress();
}
public synchronized boolean isInitialized() {
return isInitialized;
}
public synchronized boolean isCrashed() {
return isCrashed;
}
public boolean isUp() {
if(master == null) {
return false;
}
synchronized(this) {
return isInitialized;
}
}
/** Create the HMaster and run it */
public void run() {
try {
synchronized(this) {
if(isRunning) {
this.master = new HMaster(conf);
masterThread = new Thread(this.master);
masterThread.start();
}
isInitialized = true;
}
} catch(Throwable e) {
shutdown();
LOG.error("HMaster crashed:", e);
synchronized(this) {
isCrashed = true;
}
}
while(this.master != null && this.master.isMasterRunning()) {
try {
Thread.sleep(threadSleepTime);
} catch(InterruptedException e) {
}
}
synchronized(this) {
isCrashed = true;
}
shutdown();
}
/** Shut down the HMaster and wait for it to finish */
public synchronized void shutdown() {
isRunning = false;
if (this.master != null) {
try {
this.master.shutdown();
} catch(IOException e) {
LOG.error("Master crashed during stop", e);
} finally {
try {
masterThread.join();
} catch(InterruptedException e) {
}
master = null;
}
}
}
}
private class HRegionServerRunner implements Runnable {
private HRegionServer server = null;
private boolean isRunning = true;
private Configuration conf;
public HRegionServerRunner(Configuration conf) {
this.conf = conf;
}
/** Start up the HRegionServer */
public void run() {
try {
synchronized(this) {
if(isRunning) {
server = new HRegionServer(conf);
}
}
server.run();
} catch(Throwable e) {
shutdown();
LOG.error("HRegionServer crashed:", e);
}
}
/** Shut down the HRegionServer */
public synchronized void shutdown() {
isRunning = false;
if(server != null) {
try {
server.stop();
} catch(IOException e) {
LOG.error("HRegionServer crashed during stop", e);
} finally {
server.join();
server = null;
}
}
}
}
} }

View File

@ -0,0 +1,92 @@
/**
* Copyright 2006 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.File;
import java.util.Enumeration;
import org.apache.log4j.Appender;
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Layout;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
public class StaticTestEnvironment {
private StaticTestEnvironment() {}; // Not instantiable
public static final String TEST_DIRECTORY_KEY = "test.build.data";
public static boolean debugging = false;
@SuppressWarnings("unchecked")
public static void initialize() {
String value = null;
if (System.getProperty(TEST_DIRECTORY_KEY) == null) {
System.setProperty(TEST_DIRECTORY_KEY, new File(
"build/contrib/hbase/test").getAbsolutePath());
}
value = System.getenv("DEBUGGING");
if(value != null && value.equalsIgnoreCase("TRUE")) {
debugging = true;
Logger rootLogger = Logger.getRootLogger();
rootLogger.setLevel(Level.WARN);
Level logLevel = Level.INFO;
value = System.getenv("LOGGING_LEVEL");
if(value != null && value.length() != 0) {
if(value.equalsIgnoreCase("ALL")) {
logLevel = Level.ALL;
} else if(value.equalsIgnoreCase("DEBUG")) {
logLevel = Level.DEBUG;
} else if(value.equalsIgnoreCase("ERROR")) {
logLevel = Level.ERROR;
} else if(value.equalsIgnoreCase("FATAL")) {
logLevel = Level.FATAL;
} else if(value.equalsIgnoreCase("INFO")) {
logLevel = Level.INFO;
} else if(value.equalsIgnoreCase("OFF")) {
logLevel = Level.OFF;
} else if(value.equalsIgnoreCase("TRACE")) {
logLevel = Level.TRACE;
} else if(value.equalsIgnoreCase("WARN")) {
logLevel = Level.WARN;
}
}
ConsoleAppender consoleAppender = null;
for(Enumeration<Appender> e = rootLogger.getAllAppenders();
e.hasMoreElements();) {
Appender a = e.nextElement();
if(a instanceof ConsoleAppender) {
consoleAppender = (ConsoleAppender)a;
break;
}
}
if(consoleAppender != null) {
Layout layout = consoleAppender.getLayout();
if(layout instanceof PatternLayout) {
PatternLayout consoleLayout = (PatternLayout)layout;
consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
}
}
Logger.getLogger(
HBaseTestCase.class.getPackage().getName()).setLevel(logLevel);
}
}
}

View File

@ -17,29 +17,17 @@ package org.apache.hadoop.hbase;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream; import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.Enumeration;
import java.util.Iterator; import java.util.Iterator;
import java.util.TreeMap; import java.util.TreeMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.dfs.MiniDFSCluster; import org.apache.hadoop.dfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.log4j.Appender; public class TestGet extends HBaseTestCase {
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Layout;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import junit.framework.TestCase;
public class TestGet extends TestCase {
private static final Text CONTENTS = new Text("contents:"); private static final Text CONTENTS = new Text("contents:");
private static final Text ROW_KEY = new Text(HGlobals.rootRegionInfo.regionName); private static final Text ROW_KEY = new Text(HGlobals.rootRegionInfo.regionName);
@ -71,7 +59,6 @@ public class TestGet extends TestCase {
} }
} }
@SuppressWarnings("unchecked")
public void testGet() throws IOException { public void testGet() throws IOException {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
@ -79,37 +66,6 @@ public class TestGet extends TestCase {
// Initialization // Initialization
if(System.getProperty("test.build.data") == null) {
String dir = new File(new File("").getAbsolutePath(), "build/contrib/hbase/test").getAbsolutePath();
System.out.println(dir);
System.setProperty("test.build.data", dir);
}
Configuration conf = new HBaseConfiguration();
Environment.getenv();
if(Environment.debugging) {
Logger rootLogger = Logger.getRootLogger();
rootLogger.setLevel(Level.WARN);
ConsoleAppender consoleAppender = null;
for(Enumeration<Appender> e = (Enumeration<Appender>)rootLogger.getAllAppenders();
e.hasMoreElements();) {
Appender a = e.nextElement();
if(a instanceof ConsoleAppender) {
consoleAppender = (ConsoleAppender)a;
break;
}
}
if(consoleAppender != null) {
Layout layout = consoleAppender.getLayout();
if(layout instanceof PatternLayout) {
PatternLayout consoleLayout = (PatternLayout)layout;
consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
}
}
Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
}
cluster = new MiniDFSCluster(conf, 2, true, (String[])null); cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
Path dir = new Path("/hbase"); Path dir = new Path("/hbase");

View File

@ -15,45 +15,52 @@
*/ */
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.Enumeration;
import java.util.Iterator; import java.util.Iterator;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.TreeSet; import java.util.TreeSet;
import junit.framework.TestCase;
import junit.framework.Test;
import junit.framework.TestSuite;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.log4j.Appender;
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Layout;
import org.apache.log4j.Logger;
import org.apache.log4j.Level;
import org.apache.log4j.PatternLayout;
/** /**
* Test HBase Master and Region servers, client API * Test HBase Master and Region servers, client API
*/ */
public class TestHBaseCluster extends TestCase { public class TestHBaseCluster extends HBaseClusterTestCase {
private HTableDescriptor desc;
private HClient client;
/** constructor */ /** constructor */
public TestHBaseCluster(String name) { public TestHBaseCluster() {
super(name); super(true);
this.desc = null;
this.client = null;
} }
/** Test suite so that all tests get run */ /**
public static Test suite() { * Since all the "tests" depend on the results of the previous test, they are
TestSuite suite = new TestSuite(); * not Junit tests that can stand alone. Consequently we have a single Junit
suite.addTest(new TestHBaseCluster("testSetup")); * test that runs the "sub-tests" as private methods.
suite.addTest(new TestHBaseCluster("testBasic")); */
suite.addTest(new TestHBaseCluster("testScanner")); public void testHBaseCluster() {
suite.addTest(new TestHBaseCluster("testCleanup")); try {
return suite; setup();
basic();
scanner();
listTables();
cleanup();
} catch(IOException e) {
e.printStackTrace();
fail();
}
}
public void tearDown() throws Exception {
super.tearDown();
if(client != null) {
client.close();
}
} }
private static final int FIRST_ROW = 1; private static final int FIRST_ROW = 1;
@ -65,126 +72,61 @@ public class TestHBaseCluster extends TestCase {
private static final String ANCHORNUM = "anchor:anchornum-"; private static final String ANCHORNUM = "anchor:anchornum-";
private static final String ANCHORSTR = "anchorstr"; private static final String ANCHORSTR = "anchorstr";
private static Configuration conf = null; private void setup() throws IOException {
private static boolean failures = false; client = new HClient(conf);
private static boolean initialized = false; desc = new HTableDescriptor("test", 3);
private static MiniHBaseCluster cluster = null; desc.addFamily(new Text(CONTENTS));
private static HTableDescriptor desc = null; desc.addFamily(new Text(ANCHOR));
private static HClient client = null; client.createTable(desc);
// Set up environment, start mini cluster, etc.
@SuppressWarnings("unchecked")
public void testSetup() throws Exception {
try {
if(System.getProperty("test.build.data") == null) {
String dir = new File(new File("").getAbsolutePath(), "build/contrib/hbase/test").getAbsolutePath();
System.out.println(dir);
System.setProperty("test.build.data", dir);
}
conf = new HBaseConfiguration();
Environment.getenv();
Logger rootLogger = Logger.getRootLogger();
if(Environment.debugging) {
rootLogger.setLevel(Level.WARN);
}
ConsoleAppender consoleAppender = null;
for(Enumeration<Appender> e = (Enumeration<Appender>)rootLogger.getAllAppenders();
e.hasMoreElements();) {
Appender a = e.nextElement();
if(a instanceof ConsoleAppender) {
consoleAppender = (ConsoleAppender)a;
break;
}
}
if(consoleAppender != null) {
Layout layout = consoleAppender.getLayout();
if(layout instanceof PatternLayout) {
PatternLayout consoleLayout = (PatternLayout)layout;
consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
}
}
Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
cluster = new MiniHBaseCluster(conf, 1);
client = new HClient(conf);
desc = new HTableDescriptor("test", 3);
desc.addFamily(new Text(CONTENTS));
desc.addFamily(new Text(ANCHOR));
client.createTable(desc);
} catch(Exception e) {
failures = true;
throw e;
}
initialized = true;
} }
// Test basic functionality. Writes to contents:basic and anchor:anchornum-* // Test basic functionality. Writes to contents:basic and anchor:anchornum-*
public void testBasic() throws IOException { private void basic() throws IOException {
if(!initialized) { long startTime = System.currentTimeMillis();
throw new IllegalStateException();
client.openTable(desc.getName());
// Write out a bunch of values
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
long writeid = client.startUpdate(new Text("row_" + k));
client.put(writeid, CONTENTS_BASIC, (CONTENTSTR + k).getBytes());
client.put(writeid, new Text(ANCHORNUM + k), (ANCHORSTR + k).getBytes());
client.commit(writeid);
}
System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
+ ((System.currentTimeMillis() - startTime) / 1000.0));
// Read them back in
startTime = System.currentTimeMillis();
Text collabel = null;
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
Text rowlabel = new Text("row_" + k);
byte bodydata[] = client.get(rowlabel, CONTENTS_BASIC);
assertNotNull(bodydata);
String bodystr = new String(bodydata).toString().trim();
String teststr = CONTENTSTR + k;
assertEquals("Incorrect value for key: (" + rowlabel + "," + CONTENTS_BASIC
+ "), expected: '" + teststr + "' got: '" + bodystr + "'",
bodystr, teststr);
collabel = new Text(ANCHORNUM + k);
bodydata = client.get(rowlabel, collabel);
bodystr = new String(bodydata).toString().trim();
teststr = ANCHORSTR + k;
assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
+ "), expected: '" + teststr + "' got: '" + bodystr + "'",
bodystr, teststr);
} }
try { System.out.println("Read " + NUM_VALS + " rows. Elapsed time: "
long startTime = System.currentTimeMillis(); + ((System.currentTimeMillis() - startTime) / 1000.0));
client.openTable(desc.getName());
// Write out a bunch of values
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
long writeid = client.startUpdate(new Text("row_" + k));
client.put(writeid, CONTENTS_BASIC, (CONTENTSTR + k).getBytes());
client.put(writeid, new Text(ANCHORNUM + k), (ANCHORSTR + k).getBytes());
client.commit(writeid);
}
System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
+ ((System.currentTimeMillis() - startTime) / 1000.0));
// Read them back in
startTime = System.currentTimeMillis();
Text collabel = null;
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
Text rowlabel = new Text("row_" + k);
byte bodydata[] = client.get(rowlabel, CONTENTS_BASIC);
assertNotNull(bodydata);
String bodystr = new String(bodydata).toString().trim();
String teststr = CONTENTSTR + k;
assertEquals("Incorrect value for key: (" + rowlabel + "," + CONTENTS_BASIC
+ "), expected: '" + teststr + "' got: '" + bodystr + "'",
bodystr, teststr);
collabel = new Text(ANCHORNUM + k);
bodydata = client.get(rowlabel, collabel);
bodystr = new String(bodydata).toString().trim();
teststr = ANCHORSTR + k;
assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
+ "), expected: '" + teststr + "' got: '" + bodystr + "'",
bodystr, teststr);
}
System.out.println("Read " + NUM_VALS + " rows. Elapsed time: "
+ ((System.currentTimeMillis() - startTime) / 1000.0));
} catch(IOException e) {
failures = true;
throw e;
}
} }
public void testScanner() throws IOException { private void scanner() throws IOException {
if(!initialized || failures) {
throw new IllegalStateException();
}
Text[] cols = new Text[] { Text[] cols = new Text[] {
new Text(ANCHORNUM + "[0-9]+"), new Text(ANCHORNUM + "[0-9]+"),
new Text(CONTENTS_BASIC) new Text(CONTENTS_BASIC)
@ -234,57 +176,31 @@ public class TestHBaseCluster extends TestCase {
+ " rows. Elapsed time: " + " rows. Elapsed time: "
+ ((System.currentTimeMillis() - startTime) / 1000.0)); + ((System.currentTimeMillis() - startTime) / 1000.0));
} catch(IOException e) {
failures = true;
throw e;
} finally { } finally {
s.close(); s.close();
} }
} }
public void testListTables() throws IOException { private void listTables() throws IOException {
if(!initialized || failures) { HTableDescriptor[] tables = client.listTables();
throw new IllegalStateException(); assertEquals(1, tables.length);
} assertEquals(desc.getName(), tables[0].getName());
TreeSet<Text> families = tables[0].families();
assertEquals(2, families.size());
assertTrue(families.contains(new Text(CONTENTS)));
assertTrue(families.contains(new Text(ANCHOR)));
}
private void cleanup() throws IOException {
// Delete the table we created
client.deleteTable(desc.getName());
try { try {
HTableDescriptor[] tables = client.listTables(); Thread.sleep(30000); // Wait for table to be deleted
assertEquals(1, tables.length);
assertEquals(desc.getName(), tables[0].getName()); } catch(InterruptedException e) {
TreeSet<Text> families = tables[0].families();
assertEquals(2, families.size());
assertTrue(families.contains(new Text(CONTENTS)));
assertTrue(families.contains(new Text(ANCHOR)));
} catch(IOException e) {
failures = true;
throw e;
} }
} }
public void testCleanup() throws IOException {
if(!initialized) {
throw new IllegalStateException();
}
try {
if(!failures) {
// Delete the table we created
client.deleteTable(desc.getName());
try {
Thread.sleep(60000); // Wait for table to be deleted
} catch(InterruptedException e) {
}
}
} finally {
// Shut down the cluster
cluster.shutdown();
client.close();
}
}
} }

View File

@ -0,0 +1,97 @@
/**
* Copyright 2007 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.util.TreeMap;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.SequenceFile.Reader;
public class TestHLog extends HBaseTestCase implements HConstants {
protected void setUp() throws Exception {
super.setUp();
}
public void testAppend() throws Exception {
Path dir = getUnitTestdir(getName());
FileSystem fs = FileSystem.get(this.conf);
if (fs.exists(dir)) {
fs.delete(dir);
}
final int COL_COUNT = 10;
final Text regionName = new Text("regionname");
final Text tableName = new Text("tablename");
final Text row = new Text("row");
Reader reader = null;
HLog log = new HLog(fs, dir, this.conf);
try {
// Write columns named 1, 2, 3, etc. and then values of single byte
// 1, 2, 3...
TreeMap<Text, BytesWritable> cols = new TreeMap<Text, BytesWritable>();
for (int i = 0; i < COL_COUNT; i++) {
cols.put(new Text(Integer.toString(i)),
new BytesWritable(new byte[] { (byte)(i + '0') }));
}
long timestamp = System.currentTimeMillis();
log.append(regionName, tableName, row, cols, timestamp);
long logSeqId = log.startCacheFlush();
log.completeCacheFlush(regionName, tableName, logSeqId);
log.close();
Path filename = log.computeFilename(log.filenum - 1);
log = null;
// Now open a reader on the log and assert append worked.
reader = new SequenceFile.Reader(fs, filename, conf);
HLogKey key = new HLogKey();
HLogEdit val = new HLogEdit();
for (int i = 0; i < COL_COUNT; i++) {
reader.next(key, val);
assertEquals(key.getRegionName(), regionName);
assertEquals(key.getTablename(), tableName);
assertEquals(key.getRow(), row);
assertEquals(val.getVal().get()[0], (byte)(i + '0'));
System.out.println(key + " " + val);
}
while (reader.next(key, val)) {
// Assert only one more row... the meta flushed row.
assertEquals(key.getRegionName(), regionName);
assertEquals(key.getTablename(), tableName);
assertEquals(key.getRow(), HLog.METAROW);
assertEquals(val.getColumn(), HLog.METACOLUMN);
assertEquals(0, val.getVal().compareTo(COMPLETE_CACHEFLUSH));
System.out.println(key + " " + val);
}
} finally {
if (log != null) {
log.close();
}
if (reader != null) {
reader.close();
}
if (fs.exists(dir)) {
fs.delete(dir);
}
}
}
protected void tearDown() throws Exception {
super.tearDown();
}
}

View File

@ -15,31 +15,20 @@
*/ */
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import junit.framework.TestCase;
import junit.framework.Test;
import junit.framework.TestSuite;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.TreeMap; import java.util.TreeMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.dfs.MiniDFSCluster; import org.apache.hadoop.dfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.log4j.Appender;
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Layout;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import org.apache.log4j.Level;
import org.apache.log4j.PatternLayout;
/** /**
* Basic stand-alone testing of HRegion. * Basic stand-alone testing of HRegion.
@ -47,27 +36,35 @@ import org.apache.log4j.PatternLayout;
* A lot of the meta information for an HRegion now lives inside other * A lot of the meta information for an HRegion now lives inside other
* HRegions or in the HBaseMaster, so only basic testing is possible. * HRegions or in the HBaseMaster, so only basic testing is possible.
*/ */
public class TestHRegion extends TestCase { public class TestHRegion extends HBaseTestCase implements RegionUnavailableListener {
private Logger LOG = Logger.getLogger(this.getClass().getName()); private Logger LOG = Logger.getLogger(this.getClass().getName());
/** Constructor */ /** Constructor */
public TestHRegion(String name) { public TestHRegion() {
super(name); super();
} }
/** Test suite so that all tests get run */ /**
public static Test suite() { * Since all the "tests" depend on the results of the previous test, they are
TestSuite suite = new TestSuite(); * not Junit tests that can stand alone. Consequently we have a single Junit
suite.addTest(new TestHRegion("testSetup")); * test that runs the "sub-tests" as private methods.
suite.addTest(new TestHRegion("testLocks")); */
suite.addTest(new TestHRegion("testBadPuts")); public void testHRegion() {
suite.addTest(new TestHRegion("testBasic")); try {
suite.addTest(new TestHRegion("testScan")); setup();
suite.addTest(new TestHRegion("testBatchWrite")); locks();
suite.addTest(new TestHRegion("testSplitAndMerge")); badPuts();
suite.addTest(new TestHRegion("testRead")); basic();
suite.addTest(new TestHRegion("testCleanup")); scan();
return suite; batchWrite();
splitAndMerge();
read();
cleanup();
} catch(Exception e) {
e.printStackTrace();
fail();
}
} }
@ -82,9 +79,6 @@ public class TestHRegion extends TestCase {
private static final Text CONTENTS_FIRSTCOL = new Text("contents:firstcol"); private static final Text CONTENTS_FIRSTCOL = new Text("contents:firstcol");
private static final Text ANCHOR_SECONDCOL = new Text("anchor:secondcol"); private static final Text ANCHOR_SECONDCOL = new Text("anchor:secondcol");
private static boolean initialized = false;
private static boolean failures = false;
private static Configuration conf = null;
private static MiniDFSCluster cluster = null; private static MiniDFSCluster cluster = null;
private static FileSystem fs = null; private static FileSystem fs = null;
private static Path parentdir = null; private static Path parentdir = null;
@ -96,138 +90,86 @@ public class TestHRegion extends TestCase {
private static int numInserted = 0; private static int numInserted = 0;
// Set up environment, start mini cluster, etc. // Create directories, start mini cluster, etc.
@SuppressWarnings("unchecked") private void setup() throws IOException {
public void testSetup() throws IOException {
try {
if(System.getProperty("test.build.data") == null) {
String dir = new File(new File("").getAbsolutePath(), "build/contrib/hbase/test").getAbsolutePath();
System.out.println(dir);
System.setProperty("test.build.data", dir);
}
conf = new HBaseConfiguration();
Environment.getenv();
if(Environment.debugging) {
Logger rootLogger = Logger.getRootLogger();
rootLogger.setLevel(Level.WARN);
ConsoleAppender consoleAppender = null; cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
for(Enumeration<Appender> e = (Enumeration<Appender>)rootLogger.getAllAppenders(); fs = cluster.getFileSystem();
e.hasMoreElements();) { parentdir = new Path("/hbase");
fs.mkdirs(parentdir);
Appender a = e.nextElement(); newlogdir = new Path(parentdir, "log");
if(a instanceof ConsoleAppender) { oldlogfile = new Path(parentdir, "oldlogfile");
consoleAppender = (ConsoleAppender)a;
break;
}
}
if(consoleAppender != null) {
Layout layout = consoleAppender.getLayout();
if(layout instanceof PatternLayout) {
PatternLayout consoleLayout = (PatternLayout)layout;
consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
}
}
Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
}
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
fs = cluster.getFileSystem();
parentdir = new Path("/hbase");
fs.mkdirs(parentdir);
newlogdir = new Path(parentdir, "log");
oldlogfile = new Path(parentdir, "oldlogfile");
log = new HLog(fs, newlogdir, conf); log = new HLog(fs, newlogdir, conf);
desc = new HTableDescriptor("test", 3); desc = new HTableDescriptor("test", 3);
desc.addFamily(new Text("contents:")); desc.addFamily(new Text("contents:"));
desc.addFamily(new Text("anchor:")); desc.addFamily(new Text("anchor:"));
region = new HRegion(parentdir, log, fs, conf, region = new HRegion(parentdir, log, fs, conf,
new HRegionInfo(1, desc, null, null), null, oldlogfile); new HRegionInfo(1, desc, null, null), null, oldlogfile);
} catch(IOException e) {
failures = true;
throw e;
}
initialized = true;
} }
// Test basic functionality. Writes to contents:basic and anchor:anchornum-* // Test basic functionality. Writes to contents:basic and anchor:anchornum-*
public void testBasic() throws IOException { private void basic() throws IOException {
if(!initialized) { long startTime = System.currentTimeMillis();
throw new IllegalStateException();
// Write out a bunch of values
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
long writeid = region.startUpdate(new Text("row_" + k));
region.put(writeid, CONTENTS_BASIC,
new BytesWritable((CONTENTSTR + k).getBytes()));
region.put(writeid, new Text(ANCHORNUM + k),
new BytesWritable((ANCHORSTR + k).getBytes()));
region.commit(writeid);
}
System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
+ ((System.currentTimeMillis() - startTime) / 1000.0));
// Flush cache
startTime = System.currentTimeMillis();
region.flushcache(false);
System.out.println("Cache flush elapsed time: "
+ ((System.currentTimeMillis() - startTime) / 1000.0));
// Read them back in
startTime = System.currentTimeMillis();
Text collabel = null;
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
Text rowlabel = new Text("row_" + k);
BytesWritable bodydata = region.get(rowlabel, CONTENTS_BASIC);
assertNotNull(bodydata);
byte[] bytes = new byte[bodydata.getSize()];
System.arraycopy(bodydata.get(), 0, bytes, 0, bytes.length);
String bodystr = new String(bytes).toString().trim();
String teststr = CONTENTSTR + k;
assertEquals("Incorrect value for key: (" + rowlabel + "," + CONTENTS_BASIC
+ "), expected: '" + teststr + "' got: '" + bodystr + "'",
bodystr, teststr);
collabel = new Text(ANCHORNUM + k);
bodydata = region.get(rowlabel, collabel);
bytes = new byte[bodydata.getSize()];
System.arraycopy(bodydata.get(), 0, bytes, 0, bytes.length);
bodystr = new String(bytes).toString().trim();
teststr = ANCHORSTR + k;
assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
+ "), expected: '" + teststr + "' got: '" + bodystr + "'",
bodystr, teststr);
} }
try { System.out.println("Read " + NUM_VALS + " rows. Elapsed time: "
long startTime = System.currentTimeMillis(); + ((System.currentTimeMillis() - startTime) / 1000.0));
// Write out a bunch of values
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
long writeid = region.startUpdate(new Text("row_" + k));
region.put(writeid, CONTENTS_BASIC,
new BytesWritable((CONTENTSTR + k).getBytes()));
region.put(writeid, new Text(ANCHORNUM + k),
new BytesWritable((ANCHORSTR + k).getBytes()));
region.commit(writeid);
}
System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
+ ((System.currentTimeMillis() - startTime) / 1000.0));
// Flush cache
startTime = System.currentTimeMillis();
region.flushcache(false);
System.out.println("Cache flush elapsed time: "
+ ((System.currentTimeMillis() - startTime) / 1000.0));
// Read them back in
startTime = System.currentTimeMillis();
Text collabel = null;
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
Text rowlabel = new Text("row_" + k);
BytesWritable bodydata = region.get(rowlabel, CONTENTS_BASIC);
assertNotNull(bodydata);
byte[] bytes = new byte[bodydata.getSize()];
System.arraycopy(bodydata.get(), 0, bytes, 0, bytes.length);
String bodystr = new String(bytes).toString().trim();
String teststr = CONTENTSTR + k;
assertEquals("Incorrect value for key: (" + rowlabel + "," + CONTENTS_BASIC
+ "), expected: '" + teststr + "' got: '" + bodystr + "'",
bodystr, teststr);
collabel = new Text(ANCHORNUM + k);
bodydata = region.get(rowlabel, collabel);
bytes = new byte[bodydata.getSize()];
System.arraycopy(bodydata.get(), 0, bytes, 0, bytes.length);
bodystr = new String(bytes).toString().trim();
teststr = ANCHORSTR + k;
assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
+ "), expected: '" + teststr + "' got: '" + bodystr + "'",
bodystr, teststr);
}
System.out.println("Read " + NUM_VALS + " rows. Elapsed time: "
+ ((System.currentTimeMillis() - startTime) / 1000.0));
} catch(IOException e) {
failures = true;
throw e;
}
} }
public void testBadPuts() throws IOException { private void badPuts() throws IOException {
if(!initialized) {
throw new IllegalStateException();
}
// Try put with bad lockid. // Try put with bad lockid.
boolean exceptionThrown = false; boolean exceptionThrown = false;
@ -259,7 +201,7 @@ public class TestHRegion extends TestCase {
/** /**
* Test getting and releasing locks. * Test getting and releasing locks.
*/ */
public void testLocks() { private void locks() {
final int threadCount = 10; final int threadCount = 10;
final int lockCount = 10; final int lockCount = 10;
@ -317,11 +259,7 @@ public class TestHRegion extends TestCase {
// Test scanners. Writes contents:firstcol and anchor:secondcol // Test scanners. Writes contents:firstcol and anchor:secondcol
public void testScan() throws IOException { private void scan() throws IOException {
if(!initialized) {
throw new IllegalStateException();
}
Text cols[] = new Text[] { Text cols[] = new Text[] {
CONTENTS_FIRSTCOL, CONTENTS_FIRSTCOL,
ANCHOR_SECONDCOL ANCHOR_SECONDCOL
@ -583,138 +521,126 @@ public class TestHRegion extends TestCase {
// long time to run. // long time to run.
// Creates contents:body // Creates contents:body
public void testBatchWrite() throws IOException { private void batchWrite() throws IOException {
if(!initialized || failures) { if(! StaticTestEnvironment.debugging) {
throw new IllegalStateException();
}
if(! Environment.debugging) {
return; return;
} }
try { long totalFlush = 0;
long totalFlush = 0; long totalCompact = 0;
long totalCompact = 0; long totalLog = 0;
long totalLog = 0; long startTime = System.currentTimeMillis();
long startTime = System.currentTimeMillis();
// 1M writes // 1M writes
int valsize = 1000; int valsize = 1000;
for (int k = FIRST_ROW; k <= N_ROWS; k++) { for (int k = FIRST_ROW; k <= N_ROWS; k++) {
// Come up with a random 1000-byte string // Come up with a random 1000-byte string
String randstr1 = "" + System.currentTimeMillis(); String randstr1 = "" + System.currentTimeMillis();
StringBuffer buf1 = new StringBuffer("val_" + k + "__"); StringBuffer buf1 = new StringBuffer("val_" + k + "__");
while (buf1.length() < valsize) { while (buf1.length() < valsize) {
buf1.append(randstr1); buf1.append(randstr1);
} }
// Write to the HRegion // Write to the HRegion
long writeid = region.startUpdate(new Text("row_" + k)); long writeid = region.startUpdate(new Text("row_" + k));
region.put(writeid, CONTENTS_BODY, new BytesWritable(buf1.toString().getBytes())); region.put(writeid, CONTENTS_BODY, new BytesWritable(buf1.toString().getBytes()));
region.commit(writeid); region.commit(writeid);
if (k > 0 && k % (N_ROWS / 100) == 0) { if (k > 0 && k % (N_ROWS / 100) == 0) {
System.out.println("Flushing write #" + k); System.out.println("Flushing write #" + k);
long flushStart = System.currentTimeMillis(); long flushStart = System.currentTimeMillis();
region.flushcache(false); region.flushcache(false);
long flushEnd = System.currentTimeMillis(); long flushEnd = System.currentTimeMillis();
totalFlush += (flushEnd - flushStart); totalFlush += (flushEnd - flushStart);
if (k % (N_ROWS / 10) == 0) { if (k % (N_ROWS / 10) == 0) {
System.out.print("Rolling log..."); System.out.print("Rolling log...");
long logStart = System.currentTimeMillis(); long logStart = System.currentTimeMillis();
log.rollWriter(); log.rollWriter();
long logEnd = System.currentTimeMillis(); long logEnd = System.currentTimeMillis();
totalLog += (logEnd - logStart); totalLog += (logEnd - logStart);
System.out.println(" elapsed time: " + ((logEnd - logStart) / 1000.0)); System.out.println(" elapsed time: " + ((logEnd - logStart) / 1000.0));
}
} }
} }
long startCompact = System.currentTimeMillis();
if(region.compactStores()) {
totalCompact = System.currentTimeMillis() - startCompact;
System.out.println("Region compacted - elapsedTime: " + (totalCompact / 1000.0));
} else {
System.out.println("No compaction required.");
}
long endTime = System.currentTimeMillis();
long totalElapsed = (endTime - startTime);
System.out.println();
System.out.println("Batch-write complete.");
System.out.println("Wrote " + N_ROWS + " rows, each of ~" + valsize + " bytes");
System.out.println("Total flush-time: " + (totalFlush / 1000.0));
System.out.println("Total compact-time: " + (totalCompact / 1000.0));
System.out.println("Total log-time: " + (totalLog / 1000.0));
System.out.println("Total time elapsed: " + (totalElapsed / 1000.0));
System.out.println("Total time, rows/second: " + (N_ROWS / (totalElapsed / 1000.0)));
System.out.println("Adjusted time (not including flush, compact, or log): " + ((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0));
System.out.println("Adjusted time, rows/second: " + (N_ROWS / ((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0)));
System.out.println();
} catch(IOException e) {
failures = true;
throw e;
} }
long startCompact = System.currentTimeMillis();
if(region.compactStores()) {
totalCompact = System.currentTimeMillis() - startCompact;
System.out.println("Region compacted - elapsedTime: " + (totalCompact / 1000.0));
} else {
System.out.println("No compaction required.");
}
long endTime = System.currentTimeMillis();
long totalElapsed = (endTime - startTime);
System.out.println();
System.out.println("Batch-write complete.");
System.out.println("Wrote " + N_ROWS + " rows, each of ~" + valsize + " bytes");
System.out.println("Total flush-time: " + (totalFlush / 1000.0));
System.out.println("Total compact-time: " + (totalCompact / 1000.0));
System.out.println("Total log-time: " + (totalLog / 1000.0));
System.out.println("Total time elapsed: " + (totalElapsed / 1000.0));
System.out.println("Total time, rows/second: " + (N_ROWS / (totalElapsed / 1000.0)));
System.out.println("Adjusted time (not including flush, compact, or log): " + ((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0));
System.out.println("Adjusted time, rows/second: " + (N_ROWS / ((totalElapsed - totalFlush - totalCompact - totalLog) / 1000.0)));
System.out.println();
} }
// NOTE: This test depends on testBatchWrite succeeding // NOTE: This test depends on testBatchWrite succeeding
public void testSplitAndMerge() throws IOException { private void splitAndMerge() throws IOException {
if(!initialized || failures) { Text midKey = new Text();
throw new IllegalStateException();
if(region.needsSplit(midKey)) {
System.out.println("Needs split");
} }
// Split it anyway
Text midkey = new Text("row_"
+ (StaticTestEnvironment.debugging ? (N_ROWS / 2) : (NUM_VALS/2)));
try { Path oldRegionPath = region.getRegionDir();
Text midKey = new Text();
if(region.needsSplit(midKey)) {
System.out.println("Needs split");
}
// Split it anyway
Text midkey = new Text("row_" + (Environment.debugging ? (N_ROWS / 2) : (NUM_VALS/2))); long startTime = System.currentTimeMillis();
Path oldRegionPath = region.getRegionDir();
long startTime = System.currentTimeMillis();
HRegion subregions[] = region.closeAndSplit(midkey);
System.out.println("Split region elapsed time: "
+ ((System.currentTimeMillis() - startTime) / 1000.0));
assertEquals("Number of subregions", subregions.length, 2);
// Now merge it back together HRegion subregions[] = region.closeAndSplit(midkey, this);
Path oldRegion1 = subregions[0].getRegionDir(); System.out.println("Split region elapsed time: "
Path oldRegion2 = subregions[1].getRegionDir(); + ((System.currentTimeMillis() - startTime) / 1000.0));
startTime = System.currentTimeMillis();
region = HRegion.closeAndMerge(subregions[0], subregions[1]);
System.out.println("Merge regions elapsed time: " assertEquals("Number of subregions", subregions.length, 2);
+ ((System.currentTimeMillis() - startTime) / 1000.0));
// Now merge it back together
fs.delete(oldRegionPath);
fs.delete(oldRegion1); Path oldRegion1 = subregions[0].getRegionDir();
fs.delete(oldRegion2); Path oldRegion2 = subregions[1].getRegionDir();
} catch(IOException e) { startTime = System.currentTimeMillis();
failures = true;
throw e; region = HRegion.closeAndMerge(subregions[0], subregions[1]);
}
System.out.println("Merge regions elapsed time: "
+ ((System.currentTimeMillis() - startTime) / 1000.0));
fs.delete(oldRegionPath);
fs.delete(oldRegion1);
fs.delete(oldRegion2);
} }
/* (non-Javadoc)
* @see org.apache.hadoop.hbase.RegionUnavailableListener#regionIsUnavailable(org.apache.hadoop.io.Text)
*/
public void regionIsUnavailable(Text regionName) {
// We don't use this here. It is only for the HRegionServer
}
// This test verifies that everything is still there after splitting and merging // This test verifies that everything is still there after splitting and merging
public void testRead() throws IOException { private void read() throws IOException {
if(!initialized || failures) {
throw new IllegalStateException();
}
// First verify the data written by testBasic() // First verify the data written by testBasic()
@ -820,9 +746,8 @@ public class TestHRegion extends TestCase {
// Verify testBatchWrite data // Verify testBatchWrite data
if(Environment.debugging) { if(StaticTestEnvironment.debugging) {
startTime = System.currentTimeMillis(); startTime = System.currentTimeMillis();
s = region.getScanner(new Text[] { CONTENTS_BODY }, new Text()); s = region.getScanner(new Text[] { CONTENTS_BODY }, new Text());
try { try {
int numFetched = 0; int numFetched = 0;
@ -883,7 +808,6 @@ public class TestHRegion extends TestCase {
s.close(); s.close();
} }
} }
private static void deleteFile(File f) { private static void deleteFile(File f) {
if(f.isDirectory()) { if(f.isDirectory()) {
@ -895,18 +819,14 @@ public class TestHRegion extends TestCase {
f.delete(); f.delete();
} }
public void testCleanup() throws IOException { private void cleanup() throws IOException {
if(!initialized) {
throw new IllegalStateException();
}
// Shut down the mini cluster // Shut down the mini cluster
cluster.shutdown(); cluster.shutdown();
// Delete all the DFS files // Delete all the DFS files
deleteFile(new File(System.getProperty("test.build.data"), "dfs")); deleteFile(new File(System.getProperty("test.build.data"), "dfs"));
}
}
} }

View File

@ -17,9 +17,7 @@ package org.apache.hadoop.hbase;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream; import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.Enumeration;
import java.util.TreeMap; import java.util.TreeMap;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -30,16 +28,7 @@ import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.log4j.Appender; public class TestScanner extends HBaseTestCase {
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Layout;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import junit.framework.TestCase;
public class TestScanner extends TestCase {
private static final Text FIRST_ROW = new Text(); private static final Text FIRST_ROW = new Text();
private static final Text[] COLS = { private static final Text[] COLS = {
HConstants.COLUMN_FAMILY HConstants.COLUMN_FAMILY
@ -127,12 +116,7 @@ public class TestScanner extends TestCase {
} finally { } finally {
if(scanner != null) { if(scanner != null) {
try { scanner.close();
scanner.close();
} catch(IOException e) {
e.printStackTrace();
}
scanner = null; scanner = null;
} }
} }
@ -146,7 +130,6 @@ public class TestScanner extends TestCase {
} }
/** The test! */ /** The test! */
@SuppressWarnings("unchecked")
public void testScanner() throws IOException { public void testScanner() throws IOException {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
FileSystem fs = null; FileSystem fs = null;
@ -155,37 +138,7 @@ public class TestScanner extends TestCase {
// Initialization // Initialization
if(System.getProperty("test.build.data") == null) {
String dir = new File(new File("").getAbsolutePath(), "build/contrib/hbase/test").getAbsolutePath();
System.out.println(dir);
System.setProperty("test.build.data", dir);
}
Configuration conf = new HBaseConfiguration(); Configuration conf = new HBaseConfiguration();
Environment.getenv();
if(Environment.debugging) {
Logger rootLogger = Logger.getRootLogger();
rootLogger.setLevel(Level.WARN);
ConsoleAppender consoleAppender = null;
for(Enumeration<Appender> e = (Enumeration<Appender>)rootLogger.getAllAppenders();
e.hasMoreElements();) {
Appender a = e.nextElement();
if(a instanceof ConsoleAppender) {
consoleAppender = (ConsoleAppender)a;
break;
}
}
if(consoleAppender != null) {
Layout layout = consoleAppender.getLayout();
if(layout instanceof PatternLayout) {
PatternLayout consoleLayout = (PatternLayout)layout;
consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
}
}
Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
}
cluster = new MiniDFSCluster(conf, 2, true, (String[])null); cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
fs = cluster.getFileSystem(); fs = cluster.getFileSystem();
Path dir = new Path("/hbase"); Path dir = new Path("/hbase");

View File

@ -0,0 +1,77 @@
/**
* Copyright 2006 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.IOException;
/** Tests table creation restrictions*/
public class TestTable extends HBaseClusterTestCase {
public TestTable() {
super(true);
}
public void testTable() {
HClient client = new HClient(conf);
try {
client.createTable(HGlobals.rootTableDesc);
} catch(IllegalArgumentException e) {
// Expected - ignore it
} catch(Exception e) {
System.err.println("Unexpected exception");
e.printStackTrace();
fail();
}
try {
client.createTable(HGlobals.metaTableDesc);
} catch(IllegalArgumentException e) {
// Expected - ignore it
} catch(Exception e) {
System.err.println("Unexpected exception");
e.printStackTrace();
fail();
}
HTableDescriptor desc = new HTableDescriptor("test", 1);
desc.addFamily(HConstants.COLUMN_FAMILY);
try {
client.createTable(desc);
} catch(Exception e) {
System.err.println("Unexpected exception");
e.printStackTrace();
fail();
}
try {
client.createTable(desc);
} catch(IOException e) {
// Expected. Ignore it.
} catch(Exception e) {
System.err.println("Unexpected exception");
e.printStackTrace();
fail();
}
}
}