HADOOP-1923, HADOOP-1924 a) tests fail sporadically because set up and tear down is inconsistent b) TestDFSAbort failed in nightly #242
git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@577603 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
3d7bec584c
commit
f81060229f
|
@ -52,6 +52,8 @@ Trunk (unreleased changes)
|
||||||
flush to disk.
|
flush to disk.
|
||||||
HADOOP-1920 Wrapper scripts broken when hadoop in one location and hbase in
|
HADOOP-1920 Wrapper scripts broken when hadoop in one location and hbase in
|
||||||
another
|
another
|
||||||
|
HADOOP-1923, HADOOP-1924 a) tests fail sporadically because set up and tear
|
||||||
|
down is inconsistent b) TestDFSAbort failed in nightly #242
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HADOOP-1737 Make HColumnDescriptor data publically members settable
|
HADOOP-1737 Make HColumnDescriptor data publically members settable
|
||||||
|
|
|
@ -22,7 +22,6 @@ package org.apache.hadoop.hbase;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.UnsupportedEncodingException;
|
import java.io.UnsupportedEncodingException;
|
||||||
import java.lang.reflect.Constructor;
|
import java.lang.reflect.Constructor;
|
||||||
import java.net.ConnectException;
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
@ -73,6 +72,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
||||||
HMasterRegionInterface {
|
HMasterRegionInterface {
|
||||||
static final Log LOG = LogFactory.getLog(HMaster.class.getName());
|
static final Log LOG = LogFactory.getLog(HMaster.class.getName());
|
||||||
|
|
||||||
|
/** {@inheritDoc} */
|
||||||
public long getProtocolVersion(String protocol,
|
public long getProtocolVersion(String protocol,
|
||||||
@SuppressWarnings("unused") long clientVersion)
|
@SuppressWarnings("unused") long clientVersion)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -655,12 +655,7 @@ HMasterRegionInterface {
|
||||||
// We ran out of tries. Make sure the file system is still
|
// We ran out of tries. Make sure the file system is still
|
||||||
// available
|
// available
|
||||||
if (checkFileSystem()) {
|
if (checkFileSystem()) {
|
||||||
// If filesystem is OK, is the exception a ConnectionException?
|
continue; // avoid sleeping
|
||||||
// If so, mark the server as down. No point scanning either
|
|
||||||
// if no server to put meta region on. TODO.
|
|
||||||
if (e instanceof ConnectException) {
|
|
||||||
LOG.debug("Region hosting server is gone.");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -962,6 +957,7 @@ HMasterRegionInterface {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Main processing loop */
|
/** Main processing loop */
|
||||||
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
final String threadName = "HMaster";
|
final String threadName = "HMaster";
|
||||||
Thread.currentThread().setName(threadName);
|
Thread.currentThread().setName(threadName);
|
||||||
|
@ -1201,11 +1197,17 @@ HMasterRegionInterface {
|
||||||
// Note that cancelling the server's lease takes care of updating
|
// Note that cancelling the server's lease takes care of updating
|
||||||
// serversToServerInfo, etc.
|
// serversToServerInfo, etc.
|
||||||
|
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Region server " + serverName +
|
||||||
|
": MSG_REPORT_EXITING -- cancelling lease");
|
||||||
|
}
|
||||||
|
|
||||||
if (cancelLease(serverName, serverLabel)) {
|
if (cancelLease(serverName, serverLabel)) {
|
||||||
// Only process the exit message if the server still has a lease.
|
// Only process the exit message if the server still has a lease.
|
||||||
// Otherwise we could end up processing the server exit twice.
|
// Otherwise we could end up processing the server exit twice.
|
||||||
|
|
||||||
LOG.info("Region server " + serverName + ": MSG_REPORT_EXITING");
|
LOG.info("Region server " + serverName +
|
||||||
|
": MSG_REPORT_EXITING -- lease cancelled");
|
||||||
|
|
||||||
// Get all the regions the server was serving reassigned
|
// Get all the regions the server was serving reassigned
|
||||||
// (if we are not shutting down).
|
// (if we are not shutting down).
|
||||||
|
@ -1244,8 +1246,8 @@ HMasterRegionInterface {
|
||||||
synchronized (serversToServerInfo) {
|
synchronized (serversToServerInfo) {
|
||||||
storedInfo = serversToServerInfo.get(serverName);
|
storedInfo = serversToServerInfo.get(serverName);
|
||||||
}
|
}
|
||||||
if(storedInfo == null) {
|
if (storedInfo == null) {
|
||||||
if(LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("received server report from unknown server: " + serverName);
|
LOG.debug("received server report from unknown server: " + serverName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2564,7 +2566,7 @@ HMasterRegionInterface {
|
||||||
server.close(scannerId);
|
server.close(scannerId);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
e = RemoteExceptionHandler.checkIOException(e);
|
e = RemoteExceptionHandler.checkIOException(e);
|
||||||
LOG.error("", e);
|
LOG.error("closing scanner", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
scannerId = -1L;
|
scannerId = -1L;
|
||||||
|
|
|
@ -67,6 +67,7 @@ import org.apache.hadoop.util.StringUtils;
|
||||||
public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
static final Log LOG = LogFactory.getLog(HRegionServer.class);
|
static final Log LOG = LogFactory.getLog(HRegionServer.class);
|
||||||
|
|
||||||
|
/** {@inheritDoc} */
|
||||||
public long getProtocolVersion(final String protocol,
|
public long getProtocolVersion(final String protocol,
|
||||||
@SuppressWarnings("unused") final long clientVersion)
|
@SuppressWarnings("unused") final long clientVersion)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -141,11 +142,15 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
private HTable root = null;
|
private HTable root = null;
|
||||||
private HTable meta = null;
|
private HTable meta = null;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param stop
|
||||||
|
*/
|
||||||
public SplitOrCompactChecker(final AtomicBoolean stop) {
|
public SplitOrCompactChecker(final AtomicBoolean stop) {
|
||||||
super(conf.getInt("hbase.regionserver.thread.splitcompactcheckfrequency",
|
super(conf.getInt("hbase.regionserver.thread.splitcompactcheckfrequency",
|
||||||
30 * 1000), stop);
|
30 * 1000), stop);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** {@inheritDoc} */
|
||||||
public void closing(final Text regionName) {
|
public void closing(final Text regionName) {
|
||||||
lock.writeLock().lock();
|
lock.writeLock().lock();
|
||||||
try {
|
try {
|
||||||
|
@ -161,6 +166,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** {@inheritDoc} */
|
||||||
public void closed(final Text regionName) {
|
public void closed(final Text regionName) {
|
||||||
lock.writeLock().lock();
|
lock.writeLock().lock();
|
||||||
try {
|
try {
|
||||||
|
@ -176,6 +182,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
/**
|
/**
|
||||||
* Scan for splits or compactions to run. Run any we find.
|
* Scan for splits or compactions to run. Run any we find.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
protected void chore() {
|
protected void chore() {
|
||||||
// Don't interrupt us while we're working
|
// Don't interrupt us while we're working
|
||||||
synchronized (splitOrCompactLock) {
|
synchronized (splitOrCompactLock) {
|
||||||
|
@ -275,10 +282,16 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
/* Runs periodically to flush memcache.
|
/* Runs periodically to flush memcache.
|
||||||
*/
|
*/
|
||||||
class Flusher extends Chore {
|
class Flusher extends Chore {
|
||||||
|
/**
|
||||||
|
* @param period
|
||||||
|
* @param stop
|
||||||
|
*/
|
||||||
public Flusher(final int period, final AtomicBoolean stop) {
|
public Flusher(final int period, final AtomicBoolean stop) {
|
||||||
super(period, stop);
|
super(period, stop);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** {@inheritDoc} */
|
||||||
|
@Override
|
||||||
protected void chore() {
|
protected void chore() {
|
||||||
synchronized(cacheFlusherLock) {
|
synchronized(cacheFlusherLock) {
|
||||||
checkForFlushesToRun();
|
checkForFlushesToRun();
|
||||||
|
@ -323,10 +336,16 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
private int MAXLOGENTRIES =
|
private int MAXLOGENTRIES =
|
||||||
conf.getInt("hbase.regionserver.maxlogentries", 30 * 1000);
|
conf.getInt("hbase.regionserver.maxlogentries", 30 * 1000);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param period
|
||||||
|
* @param stop
|
||||||
|
*/
|
||||||
public LogRoller(final int period, final AtomicBoolean stop) {
|
public LogRoller(final int period, final AtomicBoolean stop) {
|
||||||
super(period, stop);
|
super(period, stop);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** {@inheritDoc} */
|
||||||
|
@Override
|
||||||
protected void chore() {
|
protected void chore() {
|
||||||
synchronized(logRollerLock) {
|
synchronized(logRollerLock) {
|
||||||
checkForLogRoll();
|
checkForLogRoll();
|
||||||
|
@ -592,7 +611,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
e = RemoteExceptionHandler.checkIOException(e);
|
e = RemoteExceptionHandler.checkIOException(e);
|
||||||
if(tries < this.numRetries) {
|
if(tries < this.numRetries) {
|
||||||
LOG.warn("", e);
|
LOG.warn("Processing message (Retry: " + tries + ")", e);
|
||||||
tries++;
|
tries++;
|
||||||
} else {
|
} else {
|
||||||
LOG.error("Exceeded max retries: " + this.numRetries, e);
|
LOG.error("Exceeded max retries: " + this.numRetries, e);
|
||||||
|
@ -646,7 +665,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
try {
|
try {
|
||||||
log.closeAndDelete();
|
log.closeAndDelete();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("", RemoteExceptionHandler.checkIOException(e));
|
LOG.error("Close and delete failed",
|
||||||
|
RemoteExceptionHandler.checkIOException(e));
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
if (!masterRequestedStop && closedRegions != null) {
|
if (!masterRequestedStop && closedRegions != null) {
|
||||||
|
@ -664,7 +684,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
hbaseMaster.regionServerReport(serverInfo, exitMsg);
|
hbaseMaster.regionServerReport(serverInfo, exitMsg);
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("", RemoteExceptionHandler.checkIOException(e));
|
LOG.warn("Failed to send exiting message to master: ",
|
||||||
|
RemoteExceptionHandler.checkIOException(e));
|
||||||
}
|
}
|
||||||
LOG.info("stopping server at: " +
|
LOG.info("stopping server at: " +
|
||||||
serverInfo.getServerAddress().toString());
|
serverInfo.getServerAddress().toString());
|
||||||
|
@ -799,6 +820,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** {@inheritDoc} */
|
||||||
public void run() {
|
public void run() {
|
||||||
try {
|
try {
|
||||||
for(ToDoEntry e = null; !stopRequested.get(); ) {
|
for(ToDoEntry e = null; !stopRequested.get(); ) {
|
||||||
|
@ -1101,7 +1123,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
leases.createLease(scannerId, scannerId, new ScannerListener(scannerName));
|
leases.createLease(scannerId, scannerId, new ScannerListener(scannerName));
|
||||||
return scannerId;
|
return scannerId;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("", RemoteExceptionHandler.checkIOException(e));
|
LOG.error("Opening scanner (fsOk: " + this.fsOk + ")",
|
||||||
|
RemoteExceptionHandler.checkIOException(e));
|
||||||
checkFileSystem();
|
checkFileSystem();
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
|
@ -1243,14 +1266,14 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
this.lock.readLock().unlock();
|
this.lock.readLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks to see if the file system is still accessible.
|
* Checks to see if the file system is still accessible.
|
||||||
* If not, sets abortRequested and stopRequested
|
* If not, sets abortRequested and stopRequested
|
||||||
*
|
*
|
||||||
* @return false if file system is not available
|
* @return false if file system is not available
|
||||||
*/
|
*/
|
||||||
protected synchronized boolean checkFileSystem() {
|
protected boolean checkFileSystem() {
|
||||||
if (this.fsOk) {
|
if (this.fsOk) {
|
||||||
if (!FSUtils.isFileSystemAvailable(fs)) {
|
if (!FSUtils.isFileSystemAvailable(fs)) {
|
||||||
LOG.fatal("Shutting down HRegionServer: file system not available");
|
LOG.fatal("Shutting down HRegionServer: file system not available");
|
||||||
|
|
|
@ -1336,7 +1336,7 @@ class HStore implements HConstants {
|
||||||
midKey.set(((HStoreKey)midkey).getRow());
|
midKey.set(((HStoreKey)midkey).getRow());
|
||||||
}
|
}
|
||||||
} catch(IOException e) {
|
} catch(IOException e) {
|
||||||
LOG.warn("", e);
|
LOG.warn("Failed getting store size", e);
|
||||||
} finally {
|
} finally {
|
||||||
this.lock.releaseReadLock();
|
this.lock.releaseReadLock();
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@ import java.io.IOException;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.dfs.DistributedFileSystem;
|
import org.apache.hadoop.dfs.DistributedFileSystem;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -32,7 +33,10 @@ import org.apache.hadoop.dfs.DistributedFileSystem;
|
||||||
public class FSUtils {
|
public class FSUtils {
|
||||||
private static final Log LOG = LogFactory.getLog(FSUtils.class);
|
private static final Log LOG = LogFactory.getLog(FSUtils.class);
|
||||||
|
|
||||||
private FSUtils() {} // not instantiable
|
/**
|
||||||
|
* Not instantiable
|
||||||
|
*/
|
||||||
|
private FSUtils() {}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks to see if the specified file system is available
|
* Checks to see if the specified file system is available
|
||||||
|
@ -41,31 +45,37 @@ public class FSUtils {
|
||||||
* @return true if the specified file system is available.
|
* @return true if the specified file system is available.
|
||||||
*/
|
*/
|
||||||
public static boolean isFileSystemAvailable(FileSystem fs) {
|
public static boolean isFileSystemAvailable(FileSystem fs) {
|
||||||
|
if (!(fs instanceof DistributedFileSystem)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
boolean available = false;
|
boolean available = false;
|
||||||
if (fs instanceof DistributedFileSystem) {
|
DistributedFileSystem dfs = (DistributedFileSystem) fs;
|
||||||
|
int maxTries = dfs.getConf().getInt("hbase.client.retries.number", 3);
|
||||||
|
Path root = new Path(dfs.getConf().get("hbase.dir", "/"));
|
||||||
|
for (int i = 0; i < maxTries; i++) {
|
||||||
|
IOException ex = null;
|
||||||
try {
|
try {
|
||||||
if (((DistributedFileSystem) fs).getDataNodeStats().length > 0) {
|
if (dfs.exists(root)) {
|
||||||
available = true;
|
available = true;
|
||||||
|
break;
|
||||||
} else {
|
|
||||||
LOG.fatal("file system unavailable: no data nodes");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.fatal("file system unavailable because: ", e);
|
ex = e;
|
||||||
|
}
|
||||||
|
String exception = "";
|
||||||
|
if (ex != null) {
|
||||||
|
exception = ": " + ex.getMessage();
|
||||||
|
}
|
||||||
|
LOG.info("Failed exists test on " + root + " (Attempt " + i + ")" +
|
||||||
|
exception);
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
if (!available) {
|
||||||
|
fs.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
|
||||||
if (!available) {
|
|
||||||
fs.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("file system close", e);
|
LOG.error("file system close failed: ", e);
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
available = true;
|
|
||||||
}
|
}
|
||||||
return available;
|
return available;
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,6 +119,9 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
|
||||||
if (dfsCluster != null) {
|
if (dfsCluster != null) {
|
||||||
dfsCluster.shutdown();
|
dfsCluster.shutdown();
|
||||||
}
|
}
|
||||||
|
if (fs != null) {
|
||||||
|
fs.close();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private HRegion createAregion(Text startKey, Text endKey, int firstRow,
|
private HRegion createAregion(Text startKey, Text endKey, int firstRow,
|
||||||
|
|
|
@ -84,6 +84,9 @@ public class MiniHBaseCluster implements HConstants {
|
||||||
/**
|
/**
|
||||||
* Starts a MiniHBaseCluster on top of an existing HDFSCluster
|
* Starts a MiniHBaseCluster on top of an existing HDFSCluster
|
||||||
*
|
*
|
||||||
|
* Note that if you use this constructor, you should shut down the mini dfs
|
||||||
|
* cluster in your test case.
|
||||||
|
*
|
||||||
* @param conf
|
* @param conf
|
||||||
* @param nRegionNodes
|
* @param nRegionNodes
|
||||||
* @param dfsCluster
|
* @param dfsCluster
|
||||||
|
@ -93,7 +96,8 @@ public class MiniHBaseCluster implements HConstants {
|
||||||
MiniDFSCluster dfsCluster) throws IOException {
|
MiniDFSCluster dfsCluster) throws IOException {
|
||||||
|
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.cluster = dfsCluster;
|
this.fs = dfsCluster.getFileSystem();
|
||||||
|
this.cluster = null;
|
||||||
init(nRegionNodes);
|
init(nRegionNodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,13 +120,16 @@ public class MiniHBaseCluster implements HConstants {
|
||||||
this.deleteOnExit = deleteOnExit;
|
this.deleteOnExit = deleteOnExit;
|
||||||
if (miniHdfsFilesystem) {
|
if (miniHdfsFilesystem) {
|
||||||
this.cluster = new MiniDFSCluster(this.conf, 2, format, (String[])null);
|
this.cluster = new MiniDFSCluster(this.conf, 2, format, (String[])null);
|
||||||
|
this.fs = cluster.getFileSystem();
|
||||||
|
} else {
|
||||||
|
this.cluster = null;
|
||||||
|
this.fs = FileSystem.get(conf);
|
||||||
}
|
}
|
||||||
init(nRegionNodes);
|
init(nRegionNodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void init(final int nRegionNodes) throws IOException {
|
private void init(final int nRegionNodes) throws IOException {
|
||||||
try {
|
try {
|
||||||
this.fs = FileSystem.get(conf);
|
|
||||||
this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
|
this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
|
||||||
fs.mkdirs(parentdir);
|
fs.mkdirs(parentdir);
|
||||||
this.masterThread = startMaster(this.conf);
|
this.masterThread = startMaster(this.conf);
|
||||||
|
|
|
@ -40,6 +40,7 @@ public class TestBatchUpdate extends HBaseClusterTestCase {
|
||||||
* @throws UnsupportedEncodingException
|
* @throws UnsupportedEncodingException
|
||||||
*/
|
*/
|
||||||
public TestBatchUpdate() throws UnsupportedEncodingException {
|
public TestBatchUpdate() throws UnsupportedEncodingException {
|
||||||
|
super();
|
||||||
value = "abcd".getBytes(HConstants.UTF8_ENCODING);
|
value = "abcd".getBytes(HConstants.UTF8_ENCODING);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -60,7 +60,10 @@ public class TestDFSAbort extends HBaseClusterTestCase {
|
||||||
cluster.join();
|
cluster.join();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void main(String[] args) {
|
/**
|
||||||
|
* @param args unused
|
||||||
|
*/
|
||||||
|
public static void main(@SuppressWarnings("unused") String[] args) {
|
||||||
TestRunner.run(new TestSuite(TestDFSAbort.class));
|
TestRunner.run(new TestSuite(TestDFSAbort.class));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,6 @@ import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.io.MapWritable;
|
import org.apache.hadoop.io.MapWritable;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
|
@ -177,7 +176,7 @@ public class TestScanner2 extends HBaseClusterTestCase {
|
||||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||||
Text tableName = new Text(getName());
|
Text tableName = new Text(getName());
|
||||||
admin.createTable(new HTableDescriptor(tableName.toString()));
|
admin.createTable(new HTableDescriptor(tableName.toString()));
|
||||||
List<HRegionInfo> regions = scan(conf, metaTable);
|
List<HRegionInfo> regions = scan(metaTable);
|
||||||
assertEquals("Expected one region", regions.size(), 1);
|
assertEquals("Expected one region", regions.size(), 1);
|
||||||
HRegionInfo region = regions.get(0);
|
HRegionInfo region = regions.get(0);
|
||||||
assertTrue("Expected region named for test",
|
assertTrue("Expected region named for test",
|
||||||
|
@ -197,10 +196,10 @@ public class TestScanner2 extends HBaseClusterTestCase {
|
||||||
homedir, this.conf, null));
|
homedir, this.conf, null));
|
||||||
try {
|
try {
|
||||||
for (HRegion r : newRegions) {
|
for (HRegion r : newRegions) {
|
||||||
addRegionToMETA(conf, metaTable, r, this.cluster.getHMasterAddress(),
|
addRegionToMETA(metaTable, r, this.cluster.getHMasterAddress(),
|
||||||
-1L);
|
-1L);
|
||||||
}
|
}
|
||||||
regions = scan(conf, metaTable);
|
regions = scan(metaTable);
|
||||||
assertEquals("Should be two regions only", 2, regions.size());
|
assertEquals("Should be two regions only", 2, regions.size());
|
||||||
} finally {
|
} finally {
|
||||||
for (HRegion r : newRegions) {
|
for (HRegion r : newRegions) {
|
||||||
|
@ -210,7 +209,7 @@ public class TestScanner2 extends HBaseClusterTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<HRegionInfo> scan(final Configuration conf, final HTable t)
|
private List<HRegionInfo> scan(final HTable t)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
List<HRegionInfo> regions = new ArrayList<HRegionInfo>();
|
List<HRegionInfo> regions = new ArrayList<HRegionInfo>();
|
||||||
HRegionInterface regionServer = null;
|
HRegionInterface regionServer = null;
|
||||||
|
@ -262,8 +261,7 @@ public class TestScanner2 extends HBaseClusterTestCase {
|
||||||
return regions;
|
return regions;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void addRegionToMETA(final Configuration conf,
|
private void addRegionToMETA(final HTable t, final HRegion region,
|
||||||
final HTable t, final HRegion region,
|
|
||||||
final HServerAddress serverAddress,
|
final HServerAddress serverAddress,
|
||||||
final long startCode)
|
final long startCode)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
|
@ -32,7 +32,8 @@ import org.apache.log4j.Logger;
|
||||||
* {@Link TestHRegion} does a split but this TestCase adds testing of fast
|
* {@Link TestHRegion} does a split but this TestCase adds testing of fast
|
||||||
* split and manufactures odd-ball split scenarios.
|
* split and manufactures odd-ball split scenarios.
|
||||||
*/
|
*/
|
||||||
public class TestSplit extends HBaseTestCase {
|
public class TestSplit extends MultiRegionTable {
|
||||||
|
@SuppressWarnings("hiding")
|
||||||
static final Log LOG = LogFactory.getLog(TestSplit.class.getName());
|
static final Log LOG = LogFactory.getLog(TestSplit.class.getName());
|
||||||
|
|
||||||
/** constructor */
|
/** constructor */
|
||||||
|
|
|
@ -45,7 +45,8 @@ import org.apache.hadoop.hbase.mapred.IdentityTableReduce;
|
||||||
/**
|
/**
|
||||||
* Test Map/Reduce job over HBase tables
|
* Test Map/Reduce job over HBase tables
|
||||||
*/
|
*/
|
||||||
public class TestTableMapReduce extends HBaseTestCase {
|
public class TestTableMapReduce extends MultiRegionTable {
|
||||||
|
@SuppressWarnings("hiding")
|
||||||
private static final Log LOG =
|
private static final Log LOG =
|
||||||
LogFactory.getLog(TestTableMapReduce.class.getName());
|
LogFactory.getLog(TestTableMapReduce.class.getName());
|
||||||
|
|
||||||
|
@ -115,6 +116,10 @@ public class TestTableMapReduce extends HBaseTestCase {
|
||||||
if (dfsCluster != null) {
|
if (dfsCluster != null) {
|
||||||
dfsCluster.shutdown();
|
dfsCluster.shutdown();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (fs != null) {
|
||||||
|
fs.close();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -254,7 +259,7 @@ public class TestTableMapReduce extends HBaseTestCase {
|
||||||
admin.createTable(desc);
|
admin.createTable(desc);
|
||||||
|
|
||||||
// Populate a table into multiple regions
|
// Populate a table into multiple regions
|
||||||
MultiRegionTable.makeMultiRegionTable(conf, hCluster, null,
|
MultiRegionTable.makeMultiRegionTable(conf, hCluster, fs,
|
||||||
MULTI_REGION_TABLE_NAME, INPUT_COLUMN);
|
MULTI_REGION_TABLE_NAME, INPUT_COLUMN);
|
||||||
|
|
||||||
// Verify table indeed has multiple regions
|
// Verify table indeed has multiple regions
|
||||||
|
|
|
@ -43,10 +43,12 @@ public class TestHBaseShell extends HBaseClusterTestCase {
|
||||||
private ByteArrayOutputStream baos;
|
private ByteArrayOutputStream baos;
|
||||||
private HBaseAdmin admin;
|
private HBaseAdmin admin;
|
||||||
|
|
||||||
|
/** constructor */
|
||||||
public TestHBaseShell() {
|
public TestHBaseShell() {
|
||||||
super(1 /*One region server only*/);
|
super(1 /*One region server only*/);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** {@inheritDoc} */
|
||||||
@Override
|
@Override
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
super.setUp();
|
super.setUp();
|
||||||
|
@ -100,6 +102,9 @@ public class TestHBaseShell extends HBaseClusterTestCase {
|
||||||
sglQuotedColumnFamily + "');", tmpTableName, sglQuotedColumnFamily);
|
sglQuotedColumnFamily + "');", tmpTableName, sglQuotedColumnFamily);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @throws Exception
|
||||||
|
*/
|
||||||
public void testInsertSelectDelete() throws Exception {
|
public void testInsertSelectDelete() throws Exception {
|
||||||
final String tableName = getName();
|
final String tableName = getName();
|
||||||
final String columnFamily = tableName;
|
final String columnFamily = tableName;
|
||||||
|
|
Loading…
Reference in New Issue