HBASE-7594. TestLocalHBaseCluster failing on ubuntu2
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1437658 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
669626b15b
commit
44b054897b
|
@ -30,6 +30,7 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HFileProtos;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -114,7 +115,7 @@ public class FixedFileTrailer {
|
|||
private long lastDataBlockOffset;
|
||||
|
||||
/** Raw key comparator class name in version 2 */
|
||||
private String comparatorClassName = RawComparator.class.getName();
|
||||
private String comparatorClassName = KeyValue.KEY_COMPARATOR.getClass().getName();
|
||||
|
||||
/** The {@link HFile} format major version. */
|
||||
private final int majorVersion;
|
||||
|
@ -335,7 +336,7 @@ public class FixedFileTrailer {
|
|||
lastDataBlockOffset = builder.getLastDataBlockOffset();
|
||||
}
|
||||
if (builder.hasComparatorClassName()) {
|
||||
comparatorClassName = builder.getComparatorClassName();
|
||||
setComparatorClass(getComparatorClass(builder.getComparatorClassName()));
|
||||
}
|
||||
if (builder.hasCompressionCodec()) {
|
||||
compressionCodec = Compression.Algorithm.values()[builder.getCompressionCodec()];
|
||||
|
@ -367,7 +368,8 @@ public class FixedFileTrailer {
|
|||
numDataIndexLevels = input.readInt();
|
||||
firstDataBlockOffset = input.readLong();
|
||||
lastDataBlockOffset = input.readLong();
|
||||
comparatorClassName = Bytes.readStringFixedSize(input, MAX_COMPARATOR_NAME_LENGTH);
|
||||
setComparatorClass(getComparatorClass(Bytes.readStringFixedSize(input,
|
||||
MAX_COMPARATOR_NAME_LENGTH)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -584,7 +586,13 @@ public class FixedFileTrailer {
|
|||
|
||||
@SuppressWarnings("rawtypes")
|
||||
public void setComparatorClass(Class<? extends RawComparator> klass) {
|
||||
expectAtLeastMajorVersion(2);
|
||||
// Is the comparator instantiable
|
||||
try {
|
||||
klass.newInstance();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Comparator class " + klass.getName() +
|
||||
" is not instantiable", e);
|
||||
}
|
||||
comparatorClassName = klass.getName();
|
||||
}
|
||||
|
||||
|
@ -604,9 +612,11 @@ public class FixedFileTrailer {
|
|||
try {
|
||||
return getComparatorClass(comparatorClassName).newInstance();
|
||||
} catch (InstantiationException e) {
|
||||
throw new IOException(e);
|
||||
throw new IOException("Comparator class " + comparatorClassName +
|
||||
" is not instantiable", e);
|
||||
} catch (IllegalAccessException e) {
|
||||
throw new IOException(e);
|
||||
throw new IOException("Comparator class " + comparatorClassName +
|
||||
" is not instantiable", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -681,7 +681,6 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
|||
return startMiniCluster(numMasters, numSlaves, null);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Start up a minicluster of hbase, optionally dfs, and zookeeper.
|
||||
* Modifies Configuration. Homes the cluster data directory under a random
|
||||
|
@ -707,7 +706,41 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
|||
* @return Mini hbase cluster instance created.
|
||||
*/
|
||||
public MiniHBaseCluster startMiniCluster(final int numMasters,
|
||||
final int numSlaves, final String[] dataNodeHosts)
|
||||
final int numSlaves, final String[] dataNodeHosts) throws Exception {
|
||||
return startMiniCluster(numMasters, numSlaves, dataNodeHosts, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Start up a minicluster of hbase, optionally dfs, and zookeeper.
|
||||
* Modifies Configuration. Homes the cluster data directory under a random
|
||||
* subdirectory in a directory under System property test.build.data.
|
||||
* Directory is cleaned up on exit.
|
||||
* @param numMasters Number of masters to start up. We'll start this many
|
||||
* hbase masters. If numMasters > 1, you can find the active/primary master
|
||||
* with {@link MiniHBaseCluster#getMaster()}.
|
||||
* @param numSlaves Number of slaves to start up. We'll start this many
|
||||
* regionservers. If dataNodeHosts == null, this also indicates the number of
|
||||
* datanodes to start. If dataNodeHosts != null, the number of datanodes is
|
||||
* based on dataNodeHosts.length.
|
||||
* If numSlaves is > 1, then make sure
|
||||
* hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
|
||||
* bind errors.
|
||||
* @param dataNodeHosts hostnames DNs to run on.
|
||||
* This is useful if you want to run datanode on distinct hosts for things
|
||||
* like HDFS block location verification.
|
||||
* If you start MiniDFSCluster without host names,
|
||||
* all instances of the datanodes will have the same host name.
|
||||
* @param masterClass The class to use as HMaster, or null for default
|
||||
* @param regionserverClass The class to use as HRegionServer, or null for
|
||||
* default
|
||||
* @throws Exception
|
||||
* @see {@link #shutdownMiniCluster()}
|
||||
* @return Mini hbase cluster instance created.
|
||||
*/
|
||||
public MiniHBaseCluster startMiniCluster(final int numMasters,
|
||||
final int numSlaves, final String[] dataNodeHosts,
|
||||
Class<? extends HMaster> masterClass,
|
||||
Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass)
|
||||
throws Exception {
|
||||
int numDataNodes = numSlaves;
|
||||
if ( dataNodeHosts != null && dataNodeHosts.length != 0) {
|
||||
|
@ -736,7 +769,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
|||
}
|
||||
|
||||
// Start the MiniHBaseCluster
|
||||
return startMiniHBaseCluster(numMasters, numSlaves);
|
||||
return startMiniHBaseCluster(numMasters, numSlaves, masterClass, regionserverClass);
|
||||
}
|
||||
|
||||
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
|
||||
|
|
|
@ -22,12 +22,7 @@ import static org.junit.Assert.*;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
|
||||
import org.junit.Test;
|
||||
|
@ -40,46 +35,28 @@ public class TestLocalHBaseCluster {
|
|||
/**
|
||||
* Check that we can start a local HBase cluster specifying a custom master
|
||||
* and regionserver class and then cast back to those classes; also that
|
||||
* the cluster will launch and terminate cleanly. See HBASE-6011.
|
||||
* the cluster will launch and terminate cleanly. See HBASE-6011. Uses the
|
||||
* HBaseTestingUtility facilities for creating a LocalHBaseCluster with
|
||||
* custom master and regionserver classes.
|
||||
*/
|
||||
@Test
|
||||
public void testLocalHBaseCluster() throws Exception {
|
||||
Configuration conf = TEST_UTIL.getConfiguration();
|
||||
MiniZooKeeperCluster zkCluster = TEST_UTIL.startMiniZKCluster();
|
||||
conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(zkCluster.getClientPort()));
|
||||
LocalHBaseCluster cluster = new LocalHBaseCluster(conf, 1, 1, MyHMaster.class,
|
||||
MyHRegionServer.class);
|
||||
TEST_UTIL.startMiniCluster(1, 1, null, MyHMaster.class, MyHRegionServer.class);
|
||||
// Can we cast back to our master class?
|
||||
try {
|
||||
((MyHMaster)cluster.getMaster(0)).setZKCluster(zkCluster);
|
||||
int val = ((MyHMaster)TEST_UTIL.getHBaseCluster().getMaster(0)).echo(42);
|
||||
assertEquals(42, val);
|
||||
} catch (ClassCastException e) {
|
||||
fail("Could not cast master to our class");
|
||||
}
|
||||
// Can we cast back to our regionserver class?
|
||||
try {
|
||||
((MyHRegionServer)cluster.getRegionServer(0)).echo(42);
|
||||
int val = ((MyHRegionServer)TEST_UTIL.getHBaseCluster().getRegionServer(0)).echo(42);
|
||||
assertEquals(42, val);
|
||||
} catch (ClassCastException e) {
|
||||
fail("Could not cast regionserver to our class");
|
||||
}
|
||||
// Does the cluster start successfully?
|
||||
try {
|
||||
cluster.startup();
|
||||
waitForClusterUp(conf);
|
||||
} catch (IOException e) {
|
||||
fail("LocalHBaseCluster did not start successfully");
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
private void waitForClusterUp(Configuration conf) throws IOException {
|
||||
HTable t = new HTable(conf, HConstants.META_TABLE_NAME);
|
||||
ResultScanner s = t.getScanner(new Scan());
|
||||
while (s.next() != null) {
|
||||
continue;
|
||||
}
|
||||
s.close();
|
||||
t.close();
|
||||
TEST_UTIL.shutdownMiniCluster();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -87,34 +64,20 @@ public class TestLocalHBaseCluster {
|
|||
* running in local mode.
|
||||
*/
|
||||
public static class MyHMaster extends HMaster {
|
||||
private MiniZooKeeperCluster zkcluster = null;
|
||||
|
||||
public MyHMaster(Configuration conf) throws IOException, KeeperException,
|
||||
InterruptedException {
|
||||
super(conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
super.run();
|
||||
if (this.zkcluster != null) {
|
||||
try {
|
||||
this.zkcluster.shutdown();
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void setZKCluster(final MiniZooKeeperCluster zkcluster) {
|
||||
this.zkcluster = zkcluster;
|
||||
public int echo(int val) {
|
||||
return val;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A private regionserver class with a dummy method for testing casts
|
||||
*/
|
||||
public static class MyHRegionServer extends HRegionServer {
|
||||
public static class MyHRegionServer extends MiniHBaseCluster.MiniHBaseClusterRegionServer {
|
||||
|
||||
public MyHRegionServer(Configuration conf) throws IOException,
|
||||
InterruptedException {
|
||||
|
|
|
@ -347,11 +347,9 @@ public class TestHFile extends HBaseTestCase {
|
|||
assertTrue(Compression.Algorithm.LZ4.ordinal() == 4);
|
||||
}
|
||||
|
||||
public void testComparator() throws IOException {
|
||||
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
||||
Path mFile = new Path(ROOT_DIR, "meta.tfile");
|
||||
FSDataOutputStream fout = createFSOutput(mFile);
|
||||
KeyComparator comparator = new KeyComparator() {
|
||||
// This can't be an anonymous class because the compiler will not generate
|
||||
// a nullary constructor for it.
|
||||
static class CustomKeyComparator extends KeyComparator {
|
||||
@Override
|
||||
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2,
|
||||
int l2) {
|
||||
|
@ -361,7 +359,13 @@ public class TestHFile extends HBaseTestCase {
|
|||
public int compare(byte[] o1, byte[] o2) {
|
||||
return compare(o1, 0, o1.length, o2, 0, o2.length);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public void testComparator() throws IOException {
|
||||
if (cacheConf == null) cacheConf = new CacheConfig(conf);
|
||||
Path mFile = new Path(ROOT_DIR, "meta.tfile");
|
||||
FSDataOutputStream fout = createFSOutput(mFile);
|
||||
KeyComparator comparator = new CustomKeyComparator();
|
||||
Writer writer = HFile.getWriterFactory(conf, cacheConf)
|
||||
.withOutputStream(fout)
|
||||
.withBlockSize(minBlockSize)
|
||||
|
|
Loading…
Reference in New Issue