HBASE-426 hbase can't find remote filesystem
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@620058 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9dccb4c8b8
commit
8dafaa567a
|
@ -16,6 +16,7 @@ Hbase Change Log
|
||||||
HBASE-2 hlog numbers should wrap around when they reach 999
|
HBASE-2 hlog numbers should wrap around when they reach 999
|
||||||
(Bryan Duxbury via Stack)
|
(Bryan Duxbury via Stack)
|
||||||
HBASE-421 TestRegionServerExit broken
|
HBASE-421 TestRegionServerExit broken
|
||||||
|
HBASE-426 hbase can't find remote filesystem
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HBASE-415 Rewrite leases to use DelayedBlockingQueue instead of polling
|
HBASE-415 Rewrite leases to use DelayedBlockingQueue instead of polling
|
||||||
|
|
|
@ -32,9 +32,10 @@
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>hbase.rootdir</name>
|
<name>hbase.rootdir</name>
|
||||||
<value>file:///${hadoop.tmp.dir}/hbase</value>
|
<value>file:///tmp/hbase-${user.home}/hbase</value>
|
||||||
<description>The directory shared by region servers.
|
<description>The directory shared by region servers.
|
||||||
Should be fully-qualified to include the filesystem.
|
Should be fully-qualified to include the filesystem to use.
|
||||||
|
E.g: hdfs://NAMENODE_SERVER:PORT/HBASE_ROOTDIR
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
|
|
|
@ -70,7 +70,7 @@ public class HConnectionManager implements HConstants {
|
||||||
public static HConnection getConnection(HBaseConfiguration conf) {
|
public static HConnection getConnection(HBaseConfiguration conf) {
|
||||||
TableServers connection;
|
TableServers connection;
|
||||||
synchronized (HBASE_INSTANCES) {
|
synchronized (HBASE_INSTANCES) {
|
||||||
String instanceName = conf.get(HBASE_DIR, DEFAULT_HBASE_DIR);
|
String instanceName = conf.get(HBASE_DIR);
|
||||||
|
|
||||||
connection = HBASE_INSTANCES.get(instanceName);
|
connection = HBASE_INSTANCES.get(instanceName);
|
||||||
|
|
||||||
|
@ -88,9 +88,8 @@ public class HConnectionManager implements HConstants {
|
||||||
*/
|
*/
|
||||||
public static void deleteConnection(HBaseConfiguration conf) {
|
public static void deleteConnection(HBaseConfiguration conf) {
|
||||||
synchronized (HBASE_INSTANCES) {
|
synchronized (HBASE_INSTANCES) {
|
||||||
TableServers instance =
|
HBASE_INSTANCES.remove(conf.get(HBASE_DIR));
|
||||||
HBASE_INSTANCES.remove(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Encapsulates finding the servers for an HBase instance */
|
/* Encapsulates finding the servers for an HBase instance */
|
||||||
|
|
|
@ -83,9 +83,6 @@ public interface HConstants {
|
||||||
/** Parameter name for HBase instance root directory */
|
/** Parameter name for HBase instance root directory */
|
||||||
static final String HBASE_DIR = "hbase.rootdir";
|
static final String HBASE_DIR = "hbase.rootdir";
|
||||||
|
|
||||||
/** Default HBase instance root directory */
|
|
||||||
static final String DEFAULT_HBASE_DIR = "/hbase";
|
|
||||||
|
|
||||||
/** Used to construct the name of the log directory for a region server */
|
/** Used to construct the name of the log directory for a region server */
|
||||||
static final String HREGION_LOGDIR_NAME = "log";
|
static final String HREGION_LOGDIR_NAME = "log";
|
||||||
|
|
||||||
|
|
|
@ -598,7 +598,7 @@ public class HLog implements HConstants {
|
||||||
}
|
}
|
||||||
Configuration conf = new HBaseConfiguration();
|
Configuration conf = new HBaseConfiguration();
|
||||||
FileSystem fs = FileSystem.get(conf);
|
FileSystem fs = FileSystem.get(conf);
|
||||||
Path baseDir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
|
Path baseDir = new Path(conf.get(HBASE_DIR));
|
||||||
|
|
||||||
for (int i = 1; i < args.length; i++) {
|
for (int i = 1; i < args.length; i++) {
|
||||||
Path logPath = new Path(args[i]);
|
Path logPath = new Path(args[i]);
|
||||||
|
|
|
@ -99,7 +99,7 @@ class HMerge implements HConstants {
|
||||||
conf.getLong("hbase.hregion.max.filesize", DEFAULT_MAX_FILE_SIZE);
|
conf.getLong("hbase.hregion.max.filesize", DEFAULT_MAX_FILE_SIZE);
|
||||||
|
|
||||||
this.tabledir = new Path(
|
this.tabledir = new Path(
|
||||||
fs.makeQualified(new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR))),
|
fs.makeQualified(new Path(conf.get(HBASE_DIR))),
|
||||||
tableName.toString()
|
tableName.toString()
|
||||||
);
|
);
|
||||||
Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
|
Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
|
||||||
|
@ -296,7 +296,7 @@ class HMerge implements HConstants {
|
||||||
super(conf, fs, META_TABLE_NAME);
|
super(conf, fs, META_TABLE_NAME);
|
||||||
|
|
||||||
Path rootTableDir = HTableDescriptor.getTableDir(
|
Path rootTableDir = HTableDescriptor.getTableDir(
|
||||||
fs.makeQualified(new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR))),
|
fs.makeQualified(new Path(conf.get(HBASE_DIR))),
|
||||||
ROOT_TABLE_NAME);
|
ROOT_TABLE_NAME);
|
||||||
|
|
||||||
// Scan root region to find all the meta regions
|
// Scan root region to find all the meta regions
|
||||||
|
|
|
@ -879,28 +879,28 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public HMaster(HBaseConfiguration conf) throws IOException {
|
public HMaster(HBaseConfiguration conf) throws IOException {
|
||||||
this(new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR)),
|
this(new Path(conf.get(HBASE_DIR)),
|
||||||
new HServerAddress(conf.get(MASTER_ADDRESS, DEFAULT_MASTER_ADDRESS)),
|
new HServerAddress(conf.get(MASTER_ADDRESS, DEFAULT_MASTER_ADDRESS)),
|
||||||
conf);
|
conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Build the HMaster
|
* Build the HMaster
|
||||||
* @param rootdir base directory of this HBase instance
|
* @param rd base directory of this HBase instance. Must be fully
|
||||||
|
* qualified so includes filesystem to use.
|
||||||
* @param address server address and port number
|
* @param address server address and port number
|
||||||
* @param conf configuration
|
* @param conf configuration
|
||||||
*
|
*
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public HMaster(Path rootdir, HServerAddress address, HBaseConfiguration conf)
|
public HMaster(Path rd, HServerAddress address, HBaseConfiguration conf)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.fs = FileSystem.get(conf);
|
this.fs = FileSystem.get(conf);
|
||||||
this.rootdir = fs.makeQualified(rootdir);
|
this.rootdir = this.fs.makeQualified(rd);
|
||||||
this.conf.set(HConstants.HBASE_DIR, this.rootdir.toString());
|
this.conf.set(HConstants.HBASE_DIR, this.rootdir.toString());
|
||||||
this.rand = new Random();
|
this.rand = new Random();
|
||||||
|
|
||||||
Path rootRegionDir =
|
Path rootRegionDir =
|
||||||
HRegion.getRegionDir(rootdir, HRegionInfo.rootRegionInfo);
|
HRegion.getRegionDir(rootdir, HRegionInfo.rootRegionInfo);
|
||||||
LOG.info("Root region dir: " + rootRegionDir.toString());
|
LOG.info("Root region dir: " + rootRegionDir.toString());
|
||||||
|
|
|
@ -148,8 +148,8 @@ public class Migrate extends Configured implements Tool {
|
||||||
|
|
||||||
LOG.info("Starting upgrade" + (readOnly ? " check" : ""));
|
LOG.info("Starting upgrade" + (readOnly ? " check" : ""));
|
||||||
|
|
||||||
Path rootdir = fs.makeQualified(new Path( // get HBase root dir
|
Path rootdir =
|
||||||
conf.get(HConstants.HBASE_DIR, HConstants.DEFAULT_HBASE_DIR)));
|
fs.makeQualified(new Path(this.conf.get(HConstants.HBASE_DIR)));
|
||||||
|
|
||||||
if (!fs.exists(rootdir)) {
|
if (!fs.exists(rootdir)) {
|
||||||
throw new FileNotFoundException("HBase root directory " +
|
throw new FileNotFoundException("HBase root directory " +
|
||||||
|
|
|
@ -39,12 +39,12 @@ Start by defining the following directory variables for your convenience:
|
||||||
</p>
|
</p>
|
||||||
<p>
|
<p>
|
||||||
<ul>
|
<ul>
|
||||||
<li><code>${HBASE_HOME}</code>: The Hbase root.
|
<li><code>${HBASE_HOME}</code>: The HBase root.
|
||||||
</li>
|
</li>
|
||||||
</ul>
|
</ul>
|
||||||
</p>
|
</p>
|
||||||
<p>Edit <code>${HBASE_HOME}/conf/hbase-env.sh</code>. In this file you can
|
<p>Edit <code>${HBASE_HOME}/conf/hbase-env.sh</code>. In this file you can
|
||||||
set the heapsize for Hbase, etc. At a minimum, set
|
set the heapsize for HBase, etc. At a minimum, set
|
||||||
<code>JAVA_HOME</code> to the root of your Java installation.
|
<code>JAVA_HOME</code> to the root of your Java installation.
|
||||||
<p>
|
<p>
|
||||||
If you are running a standalone operation, proceed to <a href=#runandconfirm>Running
|
If you are running a standalone operation, proceed to <a href=#runandconfirm>Running
|
||||||
|
@ -54,11 +54,11 @@ and Confirming Your Installation</a>. If you are running a distributed operatio
|
||||||
<h2><a name="distributed" >Distributed Operation</a></h2>
|
<h2><a name="distributed" >Distributed Operation</a></h2>
|
||||||
<p>Distributed mode requires an instance of the Hadoop Distributed File System (DFS).
|
<p>Distributed mode requires an instance of the Hadoop Distributed File System (DFS).
|
||||||
See the Hadoop <a href="http://lucene.apache.org/hadoop/api/overview-summary.html#overview_description">
|
See the Hadoop <a href="http://lucene.apache.org/hadoop/api/overview-summary.html#overview_description">
|
||||||
requirements and instructions</a> for running a distributed operation. Configuring Hbase for a
|
requirements and instructions</a> for running a distributed operation. Configuring HBase for a
|
||||||
distributed operation requires modification of the following two files:
|
distributed operation requires modification of the following two files:
|
||||||
<code>${HBASE_HOME}/conf/hbase-site.xml</code> and <code>${HBASE_HOME}/conf/regionservers</code>.
|
<code>${HBASE_HOME}/conf/hbase-site.xml</code> and <code>${HBASE_HOME}/conf/regionservers</code>.
|
||||||
The former needs to be pointed at the running Hadoop DFS instance. The latter file lists
|
The former needs to be pointed at the running Hadoop DFS instance. The latter file lists
|
||||||
all members of the Hbase cluster.
|
all members of the HBase cluster.
|
||||||
</p>
|
</p>
|
||||||
<p>
|
<p>
|
||||||
<code>hbase-site.xml</code> allows the user to override the properties defined in
|
<code>hbase-site.xml</code> allows the user to override the properties defined in
|
||||||
|
@ -67,21 +67,22 @@ should never be modified). At a minimum the <code>hbase.master</code> and the
|
||||||
<code>hbase.rootdir</code> properties should be redefined
|
<code>hbase.rootdir</code> properties should be redefined
|
||||||
in <code>hbase-site.xml</code> to define the <code>host:port</code> pair on which to run the
|
in <code>hbase-site.xml</code> to define the <code>host:port</code> pair on which to run the
|
||||||
HMaster (<a href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseArchitecture">read about the
|
HMaster (<a href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseArchitecture">read about the
|
||||||
Hbase master, regionservers, etc</a>) and to point hbase the Hadoop filesystem to use:
|
Hbase master, regionservers, etc</a>) and to point hbase at the Hadoop filesystem to use. For
|
||||||
|
example:
|
||||||
</p>
|
</p>
|
||||||
<pre>
|
<pre>
|
||||||
<configuration>
|
<configuration>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>hbase.master</name>
|
<name>hbase.master</name>
|
||||||
<value>[YOUR_HOST]:[PORT]</value>
|
<value>http://localhost:60000</value>
|
||||||
<description>The host and port that the Hbase master runs at.
|
<description>The host and port that the HBase master runs at.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>hbase.rootdir</name>
|
<name>hbase.rootdir</name>
|
||||||
<value>FULLY_QUALIFIED_PATH</value>
|
<value>hdfs://localhost:9000/hbase</value>
|
||||||
<description>The directory shared by region servers.
|
<description>The directory shared by region servers.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
@ -98,41 +99,41 @@ host per line (This file is synonymous to the slaves file at
|
||||||
<p>If you are running in standalone, non-distributed mode, hbase by default uses
|
<p>If you are running in standalone, non-distributed mode, hbase by default uses
|
||||||
the local filesystem.</p>
|
the local filesystem.</p>
|
||||||
<p>If you are running a distributed cluster you will need to start the Hadoop DFS daemons
|
<p>If you are running a distributed cluster you will need to start the Hadoop DFS daemons
|
||||||
before starting Hbase and stop the daemons after Hbase has shut down. Start and
|
before starting HBase and stop the daemons after HBase has shut down. Start and
|
||||||
stop the Hadoop DFS daemons as per the Hadoop
|
stop the Hadoop DFS daemons as per the Hadoop
|
||||||
<a href="http://lucene.apache.org/hadoop/api/overview-summary.html">instructions</a>. Hbase
|
<a href="http://lucene.apache.org/hadoop/api/overview-summary.html">instructions</a>. HBase
|
||||||
does not normally use the mapreduce daemons. These do not need to be started.</p>
|
does not normally use the mapreduce daemons. These do not need to be started.</p>
|
||||||
|
|
||||||
<p>Start Hbase with the following command:
|
<p>Start HBase with the following command:
|
||||||
</p>
|
</p>
|
||||||
<pre>
|
<pre>
|
||||||
${HBASE_HOME}/bin/start-hbase.sh
|
${HBASE_HOME}/bin/start-hbase.sh
|
||||||
</pre>
|
</pre>
|
||||||
<p>
|
<p>
|
||||||
Once Hbase has started, enter <code>${HBASE_HOME}/bin/hbase shell</code> to obtain a
|
Once HBase has started, enter <code>${HBASE_HOME}/bin/hbase shell</code> to obtain a
|
||||||
shell against Hbase from which you can execute HQL commands (HQL is a severe subset of SQL).
|
shell against HBase from which you can execute HQL commands (HQL is a severe subset of SQL).
|
||||||
In the Hbase shell, type
|
In the HBase shell, type
|
||||||
<code>help;</code> to see a list of supported HQL commands. Note that all commands in the Hbase
|
<code>help;</code> to see a list of supported HQL commands. Note that all commands in the HBase
|
||||||
shell must end with <code>;</code>. Test your installation by creating, viewing, and dropping
|
shell must end with <code>;</code>. Test your installation by creating, viewing, and dropping
|
||||||
a table, as per the help instructions. Be patient with the <code>create</code> and
|
a table, as per the help instructions. Be patient with the <code>create</code> and
|
||||||
<code>drop</code> operations as they may each take 10 seconds or more. To stop hbase, exit the
|
<code>drop</code> operations as they may each take 10 seconds or more. To stop hbase, exit the
|
||||||
Hbase shell and enter:
|
HBase shell and enter:
|
||||||
</p>
|
</p>
|
||||||
<pre>
|
<pre>
|
||||||
${HBASE_HOME}/bin/stop-hbase.sh
|
${HBASE_HOME}/bin/stop-hbase.sh
|
||||||
</pre>
|
</pre>
|
||||||
<p>
|
<p>
|
||||||
If you are running a distributed operation, be sure to wait until Hbase has shut down completely
|
If you are running a distributed operation, be sure to wait until HBase has shut down completely
|
||||||
before stopping the Hadoop daemons.
|
before stopping the Hadoop daemons.
|
||||||
</p>
|
</p>
|
||||||
<p>
|
<p>
|
||||||
The default location for logs is <code>${HBASE_HOME}/logs</code>.
|
The default location for logs is <code>${HBASE_HOME}/logs</code>.
|
||||||
</p>
|
</p>
|
||||||
<p>Hbase also puts up a UI listing vital attributes. By default its deployed on the master host
|
<p>HBase also puts up a UI listing vital attributes. By default its deployed on the master host
|
||||||
at port 60010.</p>
|
at port 60010.</p>
|
||||||
|
|
||||||
<h2><a name="upgrading" >Upgrading</a></h2>
|
<h2><a name="upgrading" >Upgrading</a></h2>
|
||||||
<p>After installing the new Hbase, before starting your cluster, run the
|
<p>After installing the new HBase, before starting your cluster, run the
|
||||||
<code>${HBASE_DIR}/bin/hbase migrate</code> migration script. It will make any
|
<code>${HBASE_DIR}/bin/hbase migrate</code> migration script. It will make any
|
||||||
adjustments to the filesystem data under <code>hbase.rootdir</code> necessary to run
|
adjustments to the filesystem data under <code>hbase.rootdir</code> necessary to run
|
||||||
the hbase version (It does not change your install unless you explicitly ask it to).
|
the hbase version (It does not change your install unless you explicitly ask it to).
|
||||||
|
@ -140,8 +141,8 @@ the hbase version (It does not change your install unless you explicitly ask it
|
||||||
|
|
||||||
<h2><a name="related" >Related Documentation</a></h2>
|
<h2><a name="related" >Related Documentation</a></h2>
|
||||||
<ul>
|
<ul>
|
||||||
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase">Hbase Home Page</a>
|
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase">HBase Home Page</a>
|
||||||
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseArchitecture">Hbase Architecture</a>
|
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseArchitecture">HBase Architecture</a>
|
||||||
</ul>
|
</ul>
|
||||||
|
|
||||||
</body>
|
</body>
|
||||||
|
|
|
@ -22,13 +22,6 @@
|
||||||
*/
|
*/
|
||||||
-->
|
-->
|
||||||
<configuration>
|
<configuration>
|
||||||
<property>
|
|
||||||
<name>fs.default.name</name>
|
|
||||||
<value></value>
|
|
||||||
<description>Use hdfs as file system by default. Modify this to run on
|
|
||||||
local file system.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
<property>
|
||||||
<name>hbase.regionserver.msginterval</name>
|
<name>hbase.regionserver.msginterval</name>
|
||||||
<value>1000</value>
|
<value>1000</value>
|
||||||
|
@ -104,11 +97,6 @@
|
||||||
invoking an optional cache flush. Default 60,000.
|
invoking an optional cache flush. Default 60,000.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
|
||||||
<name>hbase.rootdir</name>
|
|
||||||
<value>/hbase</value>
|
|
||||||
<description>location of HBase instance in dfs</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
<property>
|
||||||
<name>hbase.hregion.max.filesize</name>
|
<name>hbase.hregion.max.filesize</name>
|
||||||
<value>67108864</value>
|
<value>67108864</value>
|
||||||
|
|
|
@ -68,6 +68,9 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
conf.setLong("hbase.hregion.max.filesize", 64L * 1024L * 1024L);
|
conf.setLong("hbase.hregion.max.filesize", 64L * 1024L * 1024L);
|
||||||
dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||||
|
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||||
|
this.conf.set(HConstants.HBASE_DIR,
|
||||||
|
this.dfsCluster.getFileSystem().getHomeDirectory().toString());
|
||||||
|
|
||||||
// Note: we must call super.setUp after starting the mini cluster or
|
// Note: we must call super.setUp after starting the mini cluster or
|
||||||
// we will end up with a local file system
|
// we will end up with a local file system
|
||||||
|
|
|
@ -22,7 +22,6 @@ package org.apache.hadoop.hbase;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.UnsupportedEncodingException;
|
import java.io.UnsupportedEncodingException;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Random;
|
|
||||||
|
|
||||||
import junit.framework.TestCase;
|
import junit.framework.TestCase;
|
||||||
|
|
||||||
|
@ -113,9 +112,8 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
fs.delete(testDir);
|
fs.delete(testDir);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
this.testDir = fs.makeQualified(
|
this.testDir =
|
||||||
new Path(conf.get(HConstants.HBASE_DIR, HConstants.DEFAULT_HBASE_DIR))
|
this.fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR)));
|
||||||
);
|
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.fatal("error during setup", e);
|
LOG.fatal("error during setup", e);
|
||||||
|
@ -147,10 +145,10 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
protected HRegion createNewHRegion(HTableDescriptor desc, Text startKey,
|
protected HRegion createNewHRegion(HTableDescriptor desc, Text startKey,
|
||||||
Text endKey) throws IOException {
|
Text endKey) throws IOException {
|
||||||
|
|
||||||
FileSystem fs = FileSystem.get(conf);
|
FileSystem filesystem = FileSystem.get(conf);
|
||||||
Path rootdir = fs.makeQualified(
|
Path rootdir = filesystem.makeQualified(
|
||||||
new Path(conf.get(HConstants.HBASE_DIR, HConstants.DEFAULT_HBASE_DIR)));
|
new Path(conf.get(HConstants.HBASE_DIR)));
|
||||||
fs.mkdirs(rootdir);
|
filesystem.mkdirs(rootdir);
|
||||||
|
|
||||||
return HRegion.createHRegion(new HRegionInfo(desc, startKey, endKey),
|
return HRegion.createHRegion(new HRegionInfo(desc, startKey, endKey),
|
||||||
rootdir, conf);
|
rootdir, conf);
|
||||||
|
@ -409,7 +407,6 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
*/
|
*/
|
||||||
public static class HRegionIncommon implements Incommon, FlushCache {
|
public static class HRegionIncommon implements Incommon, FlushCache {
|
||||||
final HRegion region;
|
final HRegion region;
|
||||||
private final Random rand = new Random();
|
|
||||||
private BatchUpdate batch;
|
private BatchUpdate batch;
|
||||||
|
|
||||||
private void checkBatch() {
|
private void checkBatch() {
|
||||||
|
@ -445,12 +442,13 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public void put(long lockid, Text column, byte[] val) {
|
public void put(@SuppressWarnings("unused") long lockid, Text column,
|
||||||
|
byte[] val) {
|
||||||
checkBatch();
|
checkBatch();
|
||||||
this.batch.put(column, val);
|
this.batch.put(column, val);
|
||||||
}
|
}
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public void delete(long lockid, Text column) {
|
public void delete(@SuppressWarnings("unused") long lockid, Text column) {
|
||||||
checkBatch();
|
checkBatch();
|
||||||
this.batch.delete(column);
|
this.batch.delete(column);
|
||||||
}
|
}
|
||||||
|
|
|
@ -139,8 +139,9 @@ public class MiniHBaseCluster implements HConstants {
|
||||||
|
|
||||||
private void init(final int nRegionNodes) throws IOException {
|
private void init(final int nRegionNodes) throws IOException {
|
||||||
try {
|
try {
|
||||||
this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
|
this.parentdir = this.fs.getHomeDirectory();
|
||||||
fs.mkdirs(parentdir);
|
this.conf.set(HConstants.HBASE_DIR, this.parentdir.toString());
|
||||||
|
this.fs.mkdirs(parentdir);
|
||||||
FSUtils.setVersion(fs, parentdir);
|
FSUtils.setVersion(fs, parentdir);
|
||||||
this.hbaseCluster = new LocalHBaseCluster(this.conf, nRegionNodes);
|
this.hbaseCluster = new LocalHBaseCluster(this.conf, nRegionNodes);
|
||||||
this.hbaseCluster.startup();
|
this.hbaseCluster.startup();
|
||||||
|
|
|
@ -59,6 +59,9 @@ public class TestCompaction extends HBaseTestCase {
|
||||||
@Override
|
@Override
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
this.cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
this.cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||||
|
// Make the hbase rootdir match the minidfs we just span up
|
||||||
|
this.conf.set(HConstants.HBASE_DIR,
|
||||||
|
this.cluster.getFileSystem().getHomeDirectory().toString());
|
||||||
super.setUp();
|
super.setUp();
|
||||||
HTableDescriptor htd = createTableDescriptor(getName());
|
HTableDescriptor htd = createTableDescriptor(getName());
|
||||||
this.r = createNewHRegion(htd, null, null);
|
this.r = createNewHRegion(htd, null, null);
|
||||||
|
|
|
@ -38,6 +38,9 @@ public class TestDeleteAll extends HBaseTestCase {
|
||||||
super.setUp();
|
super.setUp();
|
||||||
try {
|
try {
|
||||||
this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
|
this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
|
||||||
|
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||||
|
this.conf.set(HConstants.HBASE_DIR,
|
||||||
|
this.miniHdfs.getFileSystem().getHomeDirectory().toString());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.fatal("error starting MiniDFSCluster", e);
|
LOG.fatal("error starting MiniDFSCluster", e);
|
||||||
throw e;
|
throw e;
|
||||||
|
|
|
@ -38,6 +38,9 @@ public class TestDeleteFamily extends HBaseTestCase {
|
||||||
protected void setUp() throws Exception {
|
protected void setUp() throws Exception {
|
||||||
super.setUp();
|
super.setUp();
|
||||||
this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
|
this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
|
||||||
|
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||||
|
this.conf.set(HConstants.HBASE_DIR,
|
||||||
|
this.miniHdfs.getFileSystem().getHomeDirectory().toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -78,6 +78,9 @@ public class TestGet extends HBaseTestCase {
|
||||||
// Initialization
|
// Initialization
|
||||||
|
|
||||||
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||||
|
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||||
|
this.conf.set(HConstants.HBASE_DIR,
|
||||||
|
cluster.getFileSystem().getHomeDirectory().toString());
|
||||||
|
|
||||||
HTableDescriptor desc = new HTableDescriptor("test");
|
HTableDescriptor desc = new HTableDescriptor("test");
|
||||||
desc.addFamily(new HColumnDescriptor(CONTENTS.toString()));
|
desc.addFamily(new HColumnDescriptor(CONTENTS.toString()));
|
||||||
|
|
|
@ -40,6 +40,9 @@ public class TestGet2 extends HBaseTestCase {
|
||||||
protected void setUp() throws Exception {
|
protected void setUp() throws Exception {
|
||||||
super.setUp();
|
super.setUp();
|
||||||
this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
|
this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
|
||||||
|
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||||
|
this.conf.set(HConstants.HBASE_DIR,
|
||||||
|
this.miniHdfs.getFileSystem().getHomeDirectory().toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -37,6 +37,9 @@ public class TestHLog extends HBaseTestCase implements HConstants {
|
||||||
@Override
|
@Override
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||||
|
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||||
|
this.conf.set(HConstants.HBASE_DIR,
|
||||||
|
this.cluster.getFileSystem().getHomeDirectory().toString());
|
||||||
super.setUp();
|
super.setUp();
|
||||||
this.dir = new Path("/hbase", getName());
|
this.dir = new Path("/hbase", getName());
|
||||||
if (fs.exists(dir)) {
|
if (fs.exists(dir)) {
|
||||||
|
|
|
@ -96,6 +96,9 @@ implements RegionUnavailableListener {
|
||||||
private void setup() throws IOException {
|
private void setup() throws IOException {
|
||||||
|
|
||||||
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||||
|
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||||
|
this.conf.set(HConstants.HBASE_DIR,
|
||||||
|
this.cluster.getFileSystem().getHomeDirectory().toString());
|
||||||
|
|
||||||
desc = new HTableDescriptor("test");
|
desc = new HTableDescriptor("test");
|
||||||
desc.addFamily(new HColumnDescriptor("contents:"));
|
desc.addFamily(new HColumnDescriptor("contents:"));
|
||||||
|
|
|
@ -45,6 +45,9 @@ public class TestHStoreFile extends HBaseTestCase {
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
try {
|
try {
|
||||||
this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null);
|
this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null);
|
||||||
|
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||||
|
this.conf.set(HConstants.HBASE_DIR,
|
||||||
|
this.cluster.getFileSystem().getHomeDirectory().toString());
|
||||||
this.dir = new Path(DIR, getName());
|
this.dir = new Path(DIR, getName());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
StaticTestEnvironment.shutdownDfs(cluster);
|
StaticTestEnvironment.shutdownDfs(cluster);
|
||||||
|
|
|
@ -95,6 +95,9 @@ public class TestLogRolling extends HBaseTestCase {
|
||||||
try {
|
try {
|
||||||
super.setUp();
|
super.setUp();
|
||||||
dfs = new MiniDFSCluster(conf, 2, true, (String[]) null);
|
dfs = new MiniDFSCluster(conf, 2, true, (String[]) null);
|
||||||
|
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||||
|
this.conf.set(HConstants.HBASE_DIR,
|
||||||
|
this.dfs.getFileSystem().getHomeDirectory().toString());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
StaticTestEnvironment.shutdownDfs(dfs);
|
StaticTestEnvironment.shutdownDfs(dfs);
|
||||||
LOG.fatal("error during setUp: ", e);
|
LOG.fatal("error during setUp: ", e);
|
||||||
|
|
|
@ -39,4 +39,4 @@ public class TestMergeMeta extends AbstractMergeTestBase {
|
||||||
assertNotNull(dfsCluster);
|
assertNotNull(dfsCluster);
|
||||||
HMerge.merge(conf, dfsCluster.getFileSystem(), HConstants.META_TABLE_NAME);
|
HMerge.merge(conf, dfsCluster.getFileSystem(), HConstants.META_TABLE_NAME);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,6 +56,9 @@ public class TestScanner extends HBaseTestCase {
|
||||||
@Override
|
@Override
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||||
|
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||||
|
this.conf.set(HConstants.HBASE_DIR,
|
||||||
|
this.cluster.getFileSystem().getHomeDirectory().toString());
|
||||||
super.setUp();
|
super.setUp();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,6 +69,9 @@ public class TestSplit extends MultiRegionTable {
|
||||||
HRegion region = null;
|
HRegion region = null;
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||||
|
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||||
|
this.conf.set(HConstants.HBASE_DIR,
|
||||||
|
cluster.getFileSystem().getHomeDirectory().toString());
|
||||||
HTableDescriptor htd = createTableDescriptor(getName());
|
HTableDescriptor htd = createTableDescriptor(getName());
|
||||||
region = createNewHRegion(htd, null, null);
|
region = createNewHRegion(htd, null, null);
|
||||||
basicSplit(region);
|
basicSplit(region);
|
||||||
|
|
|
@ -57,6 +57,9 @@ public class TestTimestamp extends HBaseTestCase {
|
||||||
@Override
|
@Override
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
this.cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
this.cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||||
|
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||||
|
this.conf.set(HConstants.HBASE_DIR,
|
||||||
|
this.cluster.getFileSystem().getHomeDirectory().toString());
|
||||||
super.setUp();
|
super.setUp();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -101,6 +101,9 @@ public class TestTableIndex extends MultiRegionTable {
|
||||||
desc.addFamily(new HColumnDescriptor(OUTPUT_COLUMN));
|
desc.addFamily(new HColumnDescriptor(OUTPUT_COLUMN));
|
||||||
|
|
||||||
dfsCluster = new MiniDFSCluster(conf, 1, true, (String[]) null);
|
dfsCluster = new MiniDFSCluster(conf, 1, true, (String[]) null);
|
||||||
|
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||||
|
this.conf.set(HConstants.HBASE_DIR,
|
||||||
|
this.dfsCluster.getFileSystem().getHomeDirectory().toString());
|
||||||
|
|
||||||
// Must call super.setUp after mini dfs cluster is started or else
|
// Must call super.setUp after mini dfs cluster is started or else
|
||||||
// filesystem ends up being local
|
// filesystem ends up being local
|
||||||
|
|
|
@ -115,6 +115,9 @@ public class TestTableMapReduce extends MultiRegionTable {
|
||||||
@Override
|
@Override
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
dfsCluster = new MiniDFSCluster(conf, 1, true, (String[])null);
|
dfsCluster = new MiniDFSCluster(conf, 1, true, (String[])null);
|
||||||
|
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||||
|
this.conf.set(HConstants.HBASE_DIR,
|
||||||
|
this.dfsCluster.getFileSystem().getHomeDirectory().toString());
|
||||||
|
|
||||||
// Must call super.setup() after starting mini dfs cluster. Otherwise
|
// Must call super.setup() after starting mini dfs cluster. Otherwise
|
||||||
// we get a local file system instead of hdfs
|
// we get a local file system instead of hdfs
|
||||||
|
|
|
@ -75,9 +75,11 @@ public class TestMigrate extends HBaseTestCase {
|
||||||
MiniDFSCluster dfsCluster = null;
|
MiniDFSCluster dfsCluster = null;
|
||||||
try {
|
try {
|
||||||
dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||||
|
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||||
|
this.conf.set(HConstants.HBASE_DIR,
|
||||||
|
dfsCluster.getFileSystem().getHomeDirectory().toString());
|
||||||
FileSystem dfs = dfsCluster.getFileSystem();
|
FileSystem dfs = dfsCluster.getFileSystem();
|
||||||
Path root = dfs.makeQualified(new Path(
|
Path root = dfs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR)));
|
||||||
conf.get(HConstants.HBASE_DIR, HConstants.DEFAULT_HBASE_DIR)));
|
|
||||||
dfs.mkdirs(root);
|
dfs.mkdirs(root);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue