HBASE-426 hbase can't find remote filesystem
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@620058 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9dccb4c8b8
commit
8dafaa567a
|
@ -16,6 +16,7 @@ Hbase Change Log
|
|||
HBASE-2 hlog numbers should wrap around when they reach 999
|
||||
(Bryan Duxbury via Stack)
|
||||
HBASE-421 TestRegionServerExit broken
|
||||
HBASE-426 hbase can't find remote filesystem
|
||||
|
||||
IMPROVEMENTS
|
||||
HBASE-415 Rewrite leases to use DelayedBlockingQueue instead of polling
|
||||
|
|
|
@ -32,9 +32,10 @@
|
|||
</property>
|
||||
<property>
|
||||
<name>hbase.rootdir</name>
|
||||
<value>file:///${hadoop.tmp.dir}/hbase</value>
|
||||
<value>file:///tmp/hbase-${user.home}/hbase</value>
|
||||
<description>The directory shared by region servers.
|
||||
Should be fully-qualified to include the filesystem.
|
||||
Should be fully-qualified to include the filesystem to use.
|
||||
E.g: hdfs://NAMENODE_SERVER:PORT/HBASE_ROOTDIR
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
|
|
|
@ -70,7 +70,7 @@ public class HConnectionManager implements HConstants {
|
|||
public static HConnection getConnection(HBaseConfiguration conf) {
|
||||
TableServers connection;
|
||||
synchronized (HBASE_INSTANCES) {
|
||||
String instanceName = conf.get(HBASE_DIR, DEFAULT_HBASE_DIR);
|
||||
String instanceName = conf.get(HBASE_DIR);
|
||||
|
||||
connection = HBASE_INSTANCES.get(instanceName);
|
||||
|
||||
|
@ -88,9 +88,8 @@ public class HConnectionManager implements HConstants {
|
|||
*/
|
||||
public static void deleteConnection(HBaseConfiguration conf) {
|
||||
synchronized (HBASE_INSTANCES) {
|
||||
TableServers instance =
|
||||
HBASE_INSTANCES.remove(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
|
||||
}
|
||||
HBASE_INSTANCES.remove(conf.get(HBASE_DIR));
|
||||
}
|
||||
}
|
||||
|
||||
/* Encapsulates finding the servers for an HBase instance */
|
||||
|
|
|
@ -83,9 +83,6 @@ public interface HConstants {
|
|||
/** Parameter name for HBase instance root directory */
|
||||
static final String HBASE_DIR = "hbase.rootdir";
|
||||
|
||||
/** Default HBase instance root directory */
|
||||
static final String DEFAULT_HBASE_DIR = "/hbase";
|
||||
|
||||
/** Used to construct the name of the log directory for a region server */
|
||||
static final String HREGION_LOGDIR_NAME = "log";
|
||||
|
||||
|
|
|
@ -598,7 +598,7 @@ public class HLog implements HConstants {
|
|||
}
|
||||
Configuration conf = new HBaseConfiguration();
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
Path baseDir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
|
||||
Path baseDir = new Path(conf.get(HBASE_DIR));
|
||||
|
||||
for (int i = 1; i < args.length; i++) {
|
||||
Path logPath = new Path(args[i]);
|
||||
|
|
|
@ -99,7 +99,7 @@ class HMerge implements HConstants {
|
|||
conf.getLong("hbase.hregion.max.filesize", DEFAULT_MAX_FILE_SIZE);
|
||||
|
||||
this.tabledir = new Path(
|
||||
fs.makeQualified(new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR))),
|
||||
fs.makeQualified(new Path(conf.get(HBASE_DIR))),
|
||||
tableName.toString()
|
||||
);
|
||||
Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
|
||||
|
@ -296,7 +296,7 @@ class HMerge implements HConstants {
|
|||
super(conf, fs, META_TABLE_NAME);
|
||||
|
||||
Path rootTableDir = HTableDescriptor.getTableDir(
|
||||
fs.makeQualified(new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR))),
|
||||
fs.makeQualified(new Path(conf.get(HBASE_DIR))),
|
||||
ROOT_TABLE_NAME);
|
||||
|
||||
// Scan root region to find all the meta regions
|
||||
|
|
|
@ -879,28 +879,28 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
* @throws IOException
|
||||
*/
|
||||
public HMaster(HBaseConfiguration conf) throws IOException {
|
||||
this(new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR)),
|
||||
this(new Path(conf.get(HBASE_DIR)),
|
||||
new HServerAddress(conf.get(MASTER_ADDRESS, DEFAULT_MASTER_ADDRESS)),
|
||||
conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the HMaster
|
||||
* @param rootdir base directory of this HBase instance
|
||||
* @param rd base directory of this HBase instance. Must be fully
|
||||
* qualified so includes filesystem to use.
|
||||
* @param address server address and port number
|
||||
* @param conf configuration
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
public HMaster(Path rootdir, HServerAddress address, HBaseConfiguration conf)
|
||||
throws IOException {
|
||||
public HMaster(Path rd, HServerAddress address, HBaseConfiguration conf)
|
||||
throws IOException {
|
||||
|
||||
this.conf = conf;
|
||||
this.fs = FileSystem.get(conf);
|
||||
this.rootdir = fs.makeQualified(rootdir);
|
||||
this.rootdir = this.fs.makeQualified(rd);
|
||||
this.conf.set(HConstants.HBASE_DIR, this.rootdir.toString());
|
||||
this.rand = new Random();
|
||||
|
||||
Path rootRegionDir =
|
||||
HRegion.getRegionDir(rootdir, HRegionInfo.rootRegionInfo);
|
||||
LOG.info("Root region dir: " + rootRegionDir.toString());
|
||||
|
|
|
@ -148,8 +148,8 @@ public class Migrate extends Configured implements Tool {
|
|||
|
||||
LOG.info("Starting upgrade" + (readOnly ? " check" : ""));
|
||||
|
||||
Path rootdir = fs.makeQualified(new Path( // get HBase root dir
|
||||
conf.get(HConstants.HBASE_DIR, HConstants.DEFAULT_HBASE_DIR)));
|
||||
Path rootdir =
|
||||
fs.makeQualified(new Path(this.conf.get(HConstants.HBASE_DIR)));
|
||||
|
||||
if (!fs.exists(rootdir)) {
|
||||
throw new FileNotFoundException("HBase root directory " +
|
||||
|
|
|
@ -39,12 +39,12 @@ Start by defining the following directory variables for your convenience:
|
|||
</p>
|
||||
<p>
|
||||
<ul>
|
||||
<li><code>${HBASE_HOME}</code>: The Hbase root.
|
||||
<li><code>${HBASE_HOME}</code>: The HBase root.
|
||||
</li>
|
||||
</ul>
|
||||
</p>
|
||||
<p>Edit <code>${HBASE_HOME}/conf/hbase-env.sh</code>. In this file you can
|
||||
set the heapsize for Hbase, etc. At a minimum, set
|
||||
set the heapsize for HBase, etc. At a minimum, set
|
||||
<code>JAVA_HOME</code> to the root of your Java installation.
|
||||
<p>
|
||||
If you are running a standalone operation, proceed to <a href=#runandconfirm>Running
|
||||
|
@ -54,11 +54,11 @@ and Confirming Your Installation</a>. If you are running a distributed operatio
|
|||
<h2><a name="distributed" >Distributed Operation</a></h2>
|
||||
<p>Distributed mode requires an instance of the Hadoop Distributed File System (DFS).
|
||||
See the Hadoop <a href="http://lucene.apache.org/hadoop/api/overview-summary.html#overview_description">
|
||||
requirements and instructions</a> for running a distributed operation. Configuring Hbase for a
|
||||
requirements and instructions</a> for running a distributed operation. Configuring HBase for a
|
||||
distributed operation requires modification of the following two files:
|
||||
<code>${HBASE_HOME}/conf/hbase-site.xml</code> and <code>${HBASE_HOME}/conf/regionservers</code>.
|
||||
The former needs to be pointed at the running Hadoop DFS instance. The latter file lists
|
||||
all members of the Hbase cluster.
|
||||
all members of the HBase cluster.
|
||||
</p>
|
||||
<p>
|
||||
<code>hbase-site.xml</code> allows the user to override the properties defined in
|
||||
|
@ -67,21 +67,22 @@ should never be modified). At a minimum the <code>hbase.master</code> and the
|
|||
<code>hbase.rootdir</code> properties should be redefined
|
||||
in <code>hbase-site.xml</code> to define the <code>host:port</code> pair on which to run the
|
||||
HMaster (<a href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseArchitecture">read about the
|
||||
Hbase master, regionservers, etc</a>) and to point hbase the Hadoop filesystem to use:
|
||||
Hbase master, regionservers, etc</a>) and to point hbase at the Hadoop filesystem to use. For
|
||||
example:
|
||||
</p>
|
||||
<pre>
|
||||
<configuration>
|
||||
|
||||
<property>
|
||||
<name>hbase.master</name>
|
||||
<value>[YOUR_HOST]:[PORT]</value>
|
||||
<description>The host and port that the Hbase master runs at.
|
||||
<value>http://localhost:60000</value>
|
||||
<description>The host and port that the HBase master runs at.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hbase.rootdir</name>
|
||||
<value>FULLY_QUALIFIED_PATH</value>
|
||||
<value>hdfs://localhost:9000/hbase</value>
|
||||
<description>The directory shared by region servers.
|
||||
</description>
|
||||
</property>
|
||||
|
@ -98,41 +99,41 @@ host per line (This file is synonymous to the slaves file at
|
|||
<p>If you are running in standalone, non-distributed mode, hbase by default uses
|
||||
the local filesystem.</p>
|
||||
<p>If you are running a distributed cluster you will need to start the Hadoop DFS daemons
|
||||
before starting Hbase and stop the daemons after Hbase has shut down. Start and
|
||||
before starting HBase and stop the daemons after HBase has shut down. Start and
|
||||
stop the Hadoop DFS daemons as per the Hadoop
|
||||
<a href="http://lucene.apache.org/hadoop/api/overview-summary.html">instructions</a>. Hbase
|
||||
<a href="http://lucene.apache.org/hadoop/api/overview-summary.html">instructions</a>. HBase
|
||||
does not normally use the mapreduce daemons. These do not need to be started.</p>
|
||||
|
||||
<p>Start Hbase with the following command:
|
||||
<p>Start HBase with the following command:
|
||||
</p>
|
||||
<pre>
|
||||
${HBASE_HOME}/bin/start-hbase.sh
|
||||
</pre>
|
||||
<p>
|
||||
Once Hbase has started, enter <code>${HBASE_HOME}/bin/hbase shell</code> to obtain a
|
||||
shell against Hbase from which you can execute HQL commands (HQL is a severe subset of SQL).
|
||||
In the Hbase shell, type
|
||||
<code>help;</code> to see a list of supported HQL commands. Note that all commands in the Hbase
|
||||
Once HBase has started, enter <code>${HBASE_HOME}/bin/hbase shell</code> to obtain a
|
||||
shell against HBase from which you can execute HQL commands (HQL is a severe subset of SQL).
|
||||
In the HBase shell, type
|
||||
<code>help;</code> to see a list of supported HQL commands. Note that all commands in the HBase
|
||||
shell must end with <code>;</code>. Test your installation by creating, viewing, and dropping
|
||||
a table, as per the help instructions. Be patient with the <code>create</code> and
|
||||
<code>drop</code> operations as they may each take 10 seconds or more. To stop hbase, exit the
|
||||
Hbase shell and enter:
|
||||
HBase shell and enter:
|
||||
</p>
|
||||
<pre>
|
||||
${HBASE_HOME}/bin/stop-hbase.sh
|
||||
</pre>
|
||||
<p>
|
||||
If you are running a distributed operation, be sure to wait until Hbase has shut down completely
|
||||
If you are running a distributed operation, be sure to wait until HBase has shut down completely
|
||||
before stopping the Hadoop daemons.
|
||||
</p>
|
||||
<p>
|
||||
The default location for logs is <code>${HBASE_HOME}/logs</code>.
|
||||
</p>
|
||||
<p>Hbase also puts up a UI listing vital attributes. By default its deployed on the master host
|
||||
<p>HBase also puts up a UI listing vital attributes. By default its deployed on the master host
|
||||
at port 60010.</p>
|
||||
|
||||
<h2><a name="upgrading" >Upgrading</a></h2>
|
||||
<p>After installing the new Hbase, before starting your cluster, run the
|
||||
<p>After installing the new HBase, before starting your cluster, run the
|
||||
<code>${HBASE_DIR}/bin/hbase migrate</code> migration script. It will make any
|
||||
adjustments to the filesystem data under <code>hbase.rootdir</code> necessary to run
|
||||
the hbase version (It does not change your install unless you explicitly ask it to).
|
||||
|
@ -140,8 +141,8 @@ the hbase version (It does not change your install unless you explicitly ask it
|
|||
|
||||
<h2><a name="related" >Related Documentation</a></h2>
|
||||
<ul>
|
||||
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase">Hbase Home Page</a>
|
||||
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseArchitecture">Hbase Architecture</a>
|
||||
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase">HBase Home Page</a>
|
||||
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseArchitecture">HBase Architecture</a>
|
||||
</ul>
|
||||
|
||||
</body>
|
||||
|
|
|
@ -22,13 +22,6 @@
|
|||
*/
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>fs.default.name</name>
|
||||
<value></value>
|
||||
<description>Use hdfs as file system by default. Modify this to run on
|
||||
local file system.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.regionserver.msginterval</name>
|
||||
<value>1000</value>
|
||||
|
@ -104,11 +97,6 @@
|
|||
invoking an optional cache flush. Default 60,000.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.rootdir</name>
|
||||
<value>/hbase</value>
|
||||
<description>location of HBase instance in dfs</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.hregion.max.filesize</name>
|
||||
<value>67108864</value>
|
||||
|
|
|
@ -68,6 +68,9 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
|
|||
public void setUp() throws Exception {
|
||||
conf.setLong("hbase.hregion.max.filesize", 64L * 1024L * 1024L);
|
||||
dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.dfsCluster.getFileSystem().getHomeDirectory().toString());
|
||||
|
||||
// Note: we must call super.setUp after starting the mini cluster or
|
||||
// we will end up with a local file system
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.apache.hadoop.hbase;
|
|||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
|
@ -113,9 +112,8 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
fs.delete(testDir);
|
||||
}
|
||||
} else {
|
||||
this.testDir = fs.makeQualified(
|
||||
new Path(conf.get(HConstants.HBASE_DIR, HConstants.DEFAULT_HBASE_DIR))
|
||||
);
|
||||
this.testDir =
|
||||
this.fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR)));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.fatal("error during setup", e);
|
||||
|
@ -147,10 +145,10 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
protected HRegion createNewHRegion(HTableDescriptor desc, Text startKey,
|
||||
Text endKey) throws IOException {
|
||||
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
Path rootdir = fs.makeQualified(
|
||||
new Path(conf.get(HConstants.HBASE_DIR, HConstants.DEFAULT_HBASE_DIR)));
|
||||
fs.mkdirs(rootdir);
|
||||
FileSystem filesystem = FileSystem.get(conf);
|
||||
Path rootdir = filesystem.makeQualified(
|
||||
new Path(conf.get(HConstants.HBASE_DIR)));
|
||||
filesystem.mkdirs(rootdir);
|
||||
|
||||
return HRegion.createHRegion(new HRegionInfo(desc, startKey, endKey),
|
||||
rootdir, conf);
|
||||
|
@ -409,7 +407,6 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
*/
|
||||
public static class HRegionIncommon implements Incommon, FlushCache {
|
||||
final HRegion region;
|
||||
private final Random rand = new Random();
|
||||
private BatchUpdate batch;
|
||||
|
||||
private void checkBatch() {
|
||||
|
@ -445,12 +442,13 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
}
|
||||
}
|
||||
/** {@inheritDoc} */
|
||||
public void put(long lockid, Text column, byte[] val) {
|
||||
public void put(@SuppressWarnings("unused") long lockid, Text column,
|
||||
byte[] val) {
|
||||
checkBatch();
|
||||
this.batch.put(column, val);
|
||||
}
|
||||
/** {@inheritDoc} */
|
||||
public void delete(long lockid, Text column) {
|
||||
public void delete(@SuppressWarnings("unused") long lockid, Text column) {
|
||||
checkBatch();
|
||||
this.batch.delete(column);
|
||||
}
|
||||
|
|
|
@ -139,8 +139,9 @@ public class MiniHBaseCluster implements HConstants {
|
|||
|
||||
private void init(final int nRegionNodes) throws IOException {
|
||||
try {
|
||||
this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
|
||||
fs.mkdirs(parentdir);
|
||||
this.parentdir = this.fs.getHomeDirectory();
|
||||
this.conf.set(HConstants.HBASE_DIR, this.parentdir.toString());
|
||||
this.fs.mkdirs(parentdir);
|
||||
FSUtils.setVersion(fs, parentdir);
|
||||
this.hbaseCluster = new LocalHBaseCluster(this.conf, nRegionNodes);
|
||||
this.hbaseCluster.startup();
|
||||
|
|
|
@ -59,6 +59,9 @@ public class TestCompaction extends HBaseTestCase {
|
|||
@Override
|
||||
public void setUp() throws Exception {
|
||||
this.cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
// Make the hbase rootdir match the minidfs we just span up
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.cluster.getFileSystem().getHomeDirectory().toString());
|
||||
super.setUp();
|
||||
HTableDescriptor htd = createTableDescriptor(getName());
|
||||
this.r = createNewHRegion(htd, null, null);
|
||||
|
|
|
@ -38,6 +38,9 @@ public class TestDeleteAll extends HBaseTestCase {
|
|||
super.setUp();
|
||||
try {
|
||||
this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.miniHdfs.getFileSystem().getHomeDirectory().toString());
|
||||
} catch (Exception e) {
|
||||
LOG.fatal("error starting MiniDFSCluster", e);
|
||||
throw e;
|
||||
|
|
|
@ -38,6 +38,9 @@ public class TestDeleteFamily extends HBaseTestCase {
|
|||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.miniHdfs.getFileSystem().getHomeDirectory().toString());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -78,6 +78,9 @@ public class TestGet extends HBaseTestCase {
|
|||
// Initialization
|
||||
|
||||
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
cluster.getFileSystem().getHomeDirectory().toString());
|
||||
|
||||
HTableDescriptor desc = new HTableDescriptor("test");
|
||||
desc.addFamily(new HColumnDescriptor(CONTENTS.toString()));
|
||||
|
|
|
@ -40,6 +40,9 @@ public class TestGet2 extends HBaseTestCase {
|
|||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.miniHdfs.getFileSystem().getHomeDirectory().toString());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -37,6 +37,9 @@ public class TestHLog extends HBaseTestCase implements HConstants {
|
|||
@Override
|
||||
public void setUp() throws Exception {
|
||||
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.cluster.getFileSystem().getHomeDirectory().toString());
|
||||
super.setUp();
|
||||
this.dir = new Path("/hbase", getName());
|
||||
if (fs.exists(dir)) {
|
||||
|
|
|
@ -96,6 +96,9 @@ implements RegionUnavailableListener {
|
|||
private void setup() throws IOException {
|
||||
|
||||
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.cluster.getFileSystem().getHomeDirectory().toString());
|
||||
|
||||
desc = new HTableDescriptor("test");
|
||||
desc.addFamily(new HColumnDescriptor("contents:"));
|
||||
|
|
|
@ -45,6 +45,9 @@ public class TestHStoreFile extends HBaseTestCase {
|
|||
public void setUp() throws Exception {
|
||||
try {
|
||||
this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.cluster.getFileSystem().getHomeDirectory().toString());
|
||||
this.dir = new Path(DIR, getName());
|
||||
} catch (IOException e) {
|
||||
StaticTestEnvironment.shutdownDfs(cluster);
|
||||
|
|
|
@ -95,6 +95,9 @@ public class TestLogRolling extends HBaseTestCase {
|
|||
try {
|
||||
super.setUp();
|
||||
dfs = new MiniDFSCluster(conf, 2, true, (String[]) null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.dfs.getFileSystem().getHomeDirectory().toString());
|
||||
} catch (Exception e) {
|
||||
StaticTestEnvironment.shutdownDfs(dfs);
|
||||
LOG.fatal("error during setUp: ", e);
|
||||
|
|
|
@ -39,4 +39,4 @@ public class TestMergeMeta extends AbstractMergeTestBase {
|
|||
assertNotNull(dfsCluster);
|
||||
HMerge.merge(conf, dfsCluster.getFileSystem(), HConstants.META_TABLE_NAME);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,6 +56,9 @@ public class TestScanner extends HBaseTestCase {
|
|||
@Override
|
||||
public void setUp() throws Exception {
|
||||
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.cluster.getFileSystem().getHomeDirectory().toString());
|
||||
super.setUp();
|
||||
|
||||
}
|
||||
|
|
|
@ -69,6 +69,9 @@ public class TestSplit extends MultiRegionTable {
|
|||
HRegion region = null;
|
||||
try {
|
||||
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
cluster.getFileSystem().getHomeDirectory().toString());
|
||||
HTableDescriptor htd = createTableDescriptor(getName());
|
||||
region = createNewHRegion(htd, null, null);
|
||||
basicSplit(region);
|
||||
|
|
|
@ -57,6 +57,9 @@ public class TestTimestamp extends HBaseTestCase {
|
|||
@Override
|
||||
public void setUp() throws Exception {
|
||||
this.cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.cluster.getFileSystem().getHomeDirectory().toString());
|
||||
super.setUp();
|
||||
}
|
||||
|
||||
|
|
|
@ -101,6 +101,9 @@ public class TestTableIndex extends MultiRegionTable {
|
|||
desc.addFamily(new HColumnDescriptor(OUTPUT_COLUMN));
|
||||
|
||||
dfsCluster = new MiniDFSCluster(conf, 1, true, (String[]) null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.dfsCluster.getFileSystem().getHomeDirectory().toString());
|
||||
|
||||
// Must call super.setUp after mini dfs cluster is started or else
|
||||
// filesystem ends up being local
|
||||
|
|
|
@ -115,6 +115,9 @@ public class TestTableMapReduce extends MultiRegionTable {
|
|||
@Override
|
||||
public void setUp() throws Exception {
|
||||
dfsCluster = new MiniDFSCluster(conf, 1, true, (String[])null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.dfsCluster.getFileSystem().getHomeDirectory().toString());
|
||||
|
||||
// Must call super.setup() after starting mini dfs cluster. Otherwise
|
||||
// we get a local file system instead of hdfs
|
||||
|
|
|
@ -75,9 +75,11 @@ public class TestMigrate extends HBaseTestCase {
|
|||
MiniDFSCluster dfsCluster = null;
|
||||
try {
|
||||
dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
dfsCluster.getFileSystem().getHomeDirectory().toString());
|
||||
FileSystem dfs = dfsCluster.getFileSystem();
|
||||
Path root = dfs.makeQualified(new Path(
|
||||
conf.get(HConstants.HBASE_DIR, HConstants.DEFAULT_HBASE_DIR)));
|
||||
Path root = dfs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR)));
|
||||
dfs.mkdirs(root);
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue