HBASE-13846 Run MiniCluster on top of other MiniDfsCluster
This commit is contained in:
parent
3ada2345d6
commit
0f93986015
|
@ -583,8 +583,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
||||||
true, null, null, hosts, null);
|
true, null, null, hosts, null);
|
||||||
|
|
||||||
// Set this just-started cluster as our filesystem.
|
// Set this just-started cluster as our filesystem.
|
||||||
FileSystem fs = this.dfsCluster.getFileSystem();
|
setFs();
|
||||||
FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
|
|
||||||
|
|
||||||
// Wait for the cluster to be totally up
|
// Wait for the cluster to be totally up
|
||||||
this.dfsCluster.waitClusterUp();
|
this.dfsCluster.waitClusterUp();
|
||||||
|
@ -595,6 +594,14 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
||||||
return this.dfsCluster;
|
return this.dfsCluster;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void setFs() throws IOException {
|
||||||
|
if(this.dfsCluster == null){
|
||||||
|
LOG.info("Skipping setting fs because dfsCluster is null");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
FileSystem fs = this.dfsCluster.getFileSystem();
|
||||||
|
FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
|
||||||
|
}
|
||||||
|
|
||||||
public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
|
public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
|
||||||
throws Exception {
|
throws Exception {
|
||||||
|
@ -965,7 +972,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
||||||
|
|
||||||
// Bring up mini dfs cluster. This spews a bunch of warnings about missing
|
// Bring up mini dfs cluster. This spews a bunch of warnings about missing
|
||||||
// scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
|
// scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
|
||||||
startMiniDFSCluster(numDataNodes, dataNodeHosts);
|
if(this.dfsCluster == null) {
|
||||||
|
dfsCluster = startMiniDFSCluster(numDataNodes, dataNodeHosts);
|
||||||
|
}
|
||||||
|
|
||||||
// Start up a zk cluster.
|
// Start up a zk cluster.
|
||||||
if (this.zkCluster == null) {
|
if (this.zkCluster == null) {
|
||||||
|
@ -2966,11 +2975,25 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
||||||
return dfsCluster;
|
return dfsCluster;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setDFSCluster(MiniDFSCluster cluster) throws IOException {
|
public void setDFSCluster(MiniDFSCluster cluster) throws IllegalStateException, IOException {
|
||||||
if (dfsCluster != null && dfsCluster.isClusterUp()) {
|
setDFSCluster(cluster, true);
|
||||||
throw new IOException("DFSCluster is already running! Shut it down first.");
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the MiniDFSCluster
|
||||||
|
* @param cluster cluster to use
|
||||||
|
* @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before
|
||||||
|
* it is set.
|
||||||
|
* @throws IllegalStateException if the passed cluster is up when it is required to be down
|
||||||
|
* @throws IOException if the FileSystem could not be set from the passed dfs cluster
|
||||||
|
*/
|
||||||
|
public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown)
|
||||||
|
throws IllegalStateException, IOException {
|
||||||
|
if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) {
|
||||||
|
throw new IllegalStateException("DFSCluster is already running! Shut it down first.");
|
||||||
}
|
}
|
||||||
this.dfsCluster = cluster;
|
this.dfsCluster = cluster;
|
||||||
|
this.setFs();
|
||||||
}
|
}
|
||||||
|
|
||||||
public FileSystem getTestFileSystem() throws IOException {
|
public FileSystem getTestFileSystem() throws IOException {
|
||||||
|
|
|
@ -0,0 +1,85 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test that an HBase cluster can run on top of an existing MiniDfsCluster
|
||||||
|
*/
|
||||||
|
@Category(MediumTests.class)
|
||||||
|
public class TestHBaseOnOtherDfsCluster {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testOveralyOnOtherCluster() throws Exception {
|
||||||
|
// just run HDFS
|
||||||
|
HBaseTestingUtility util1 = new HBaseTestingUtility();
|
||||||
|
MiniDFSCluster dfs = util1.startMiniDFSCluster(1);
|
||||||
|
|
||||||
|
// run HBase on that HDFS
|
||||||
|
HBaseTestingUtility util2 = new HBaseTestingUtility();
|
||||||
|
// set the dfs
|
||||||
|
util2.setDFSCluster(dfs, false);
|
||||||
|
util2.startMiniCluster();
|
||||||
|
|
||||||
|
//ensure that they are pointed at the same place
|
||||||
|
FileSystem fs = dfs.getFileSystem();
|
||||||
|
FileSystem targetFs = util2.getDFSCluster().getFileSystem();
|
||||||
|
assertFsSameUri(fs, targetFs);
|
||||||
|
|
||||||
|
fs = FileSystem.get(util1.getConfiguration());
|
||||||
|
targetFs = FileSystem.get(util2.getConfiguration());
|
||||||
|
assertFsSameUri(fs, targetFs);
|
||||||
|
|
||||||
|
Path randomFile = new Path("/"+UUID.randomUUID());
|
||||||
|
assertTrue(targetFs.createNewFile(randomFile));
|
||||||
|
assertTrue(fs.exists(randomFile));
|
||||||
|
|
||||||
|
// do a simple create/write to ensure the cluster works as expected
|
||||||
|
byte[] family = Bytes.toBytes("testfamily");
|
||||||
|
byte[] tablename = Bytes.toBytes("testtable");
|
||||||
|
HTable table = util2.createTable(tablename, family);
|
||||||
|
Put p = new Put(new byte[] { 1, 2, 3 });
|
||||||
|
p.add(family, null, new byte[] { 1 });
|
||||||
|
table.put(p);
|
||||||
|
table.flushCommits();
|
||||||
|
|
||||||
|
// shutdown and make sure cleanly shutting down
|
||||||
|
util2.shutdownMiniCluster();
|
||||||
|
util1.shutdownMiniDFSCluster();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void assertFsSameUri(FileSystem sourceFs, FileSystem targetFs) {
|
||||||
|
Path source = new Path(sourceFs.getUri());
|
||||||
|
Path target = new Path(targetFs.getUri());
|
||||||
|
assertEquals(source, target);
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue