HBASE-19410 Move zookeeper related UTs to hbase-zookeeper and mark them as ZKTests

This commit is contained in:
zhangduo 2017-12-06 16:38:34 +08:00
parent 6da52052ee
commit 75cdbb5700
21 changed files with 428 additions and 383 deletions

View File

@ -17,5 +17,9 @@
*/
package org.apache.hadoop.hbase.testclassification;
/**
* For tests which test the general logic of zookeeper related tools, such as
* {@code RecoverableZooKeeper}, not for tests which depend on zookeeper.
*/
public interface ZKTests {
}

View File

@ -29,8 +29,9 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.Waiter.Predicate;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.yetus.audience.InterfaceAudience;
/**
* Common helpers for testing HBase that do not depend on specific server/etc. things.
@ -223,4 +224,28 @@ public class HBaseCommonTestingUtility {
} while (ntries < 30);
return ntries < 30;
}
/**
* Wrapper method for {@link Waiter#waitFor(Configuration, long, Predicate)}.
*/
public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
throws E {
return Waiter.waitFor(this.conf, timeout, predicate);
}
/**
* Wrapper method for {@link Waiter#waitFor(Configuration, long, long, Predicate)}.
*/
public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
throws E {
return Waiter.waitFor(this.conf, timeout, interval, predicate);
}
/**
* Wrapper method for {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)}.
*/
public <E extends Exception> long waitFor(long timeout, long interval,
boolean failIfTimeout, Predicate<E> predicate) throws E {
return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
}
}

View File

@ -220,10 +220,10 @@ public final class Waiter {
}
}
public static String getExplanation(Predicate explain) {
public static String getExplanation(Predicate<?> explain) {
if (explain instanceof ExplainingPredicate) {
try {
return " " + ((ExplainingPredicate) explain).explainFailure();
return " " + ((ExplainingPredicate<?>) explain).explainFailure();
} catch (Exception e) {
LOG.error("Failed to get explanation, ", e);
return e.getMessage();

View File

@ -162,8 +162,17 @@
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
</dependency>
<!--Some of the CPEPs use hbase server-side internals; they shouldn't!
-->
<!--Some of the CPEPs use hbase server-side internals; they shouldn't!-->
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-zookeeper</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-zookeeper</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>

View File

@ -147,6 +147,12 @@
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-zookeeper</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-zookeeper</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<!--Needed by ExportSnapshot. It is reading
Snapshot protos. TODO: Move to internal types.-->
@ -191,6 +197,7 @@
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-common</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
@ -226,6 +233,7 @@
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>

View File

@ -132,20 +132,6 @@
</excludes>
</configuration>
</plugin>
<!-- Make a jar and put the sources in the jar -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>jar</goal>
<goal>test-jar</goal>
</goals>
</execution>
</executions>
</plugin>
<!-- General ant tasks, bound to different build phases -->
<plugin>
<artifactId>maven-antrun-plugin</artifactId>
@ -414,6 +400,12 @@
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-zookeeper</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-metrics-api</artifactId>

View File

@ -21,6 +21,8 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
@ -51,7 +53,6 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import edu.umd.cs.findbugs.annotations.Nullable;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.logging.Log;
@ -64,16 +65,10 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ClusterStatus.Option;
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
import org.apache.hadoop.hbase.Waiter.Predicate;
import org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.assignment.RegionStateStore;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Consistency;
@ -81,14 +76,18 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.compress.Compression;
@ -100,8 +99,10 @@ import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.assignment.RegionStateStore;
import org.apache.hadoop.hbase.master.assignment.RegionStates;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.ChunkCreator;
@ -119,7 +120,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
@ -134,8 +135,8 @@ import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALFactory;
import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
import org.apache.hadoop.hbase.zookeeper.ZKConfig;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -144,13 +145,12 @@ import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.TaskLog;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.ZooKeeper.States;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
/**
* Facility for testing HBase. Replacement for
@ -171,8 +171,16 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
*/
@InterfaceAudience.Public
@SuppressWarnings("deprecation")
public class HBaseTestingUtility extends HBaseCommonTestingUtility {
private MiniZooKeeperCluster zkCluster = null;
public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* System property key to get test directory value. Name is as it is because mini dfs has
* hard-codings to put test data here. It should NOT be used directly in HBase, as it's a property
* used in mini dfs.
* @deprecated can be used only with mini dfs
*/
@Deprecated
private static final String TEST_DIRECTORY_KEY = "test.build.data";
public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
/**
@ -184,11 +192,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
public static final String PRESPLIT_TEST_TABLE_KEY = "hbase.test.pre-split-table";
public static final boolean PRESPLIT_TEST_TABLE = true;
/**
* Set if we were passed a zkCluster. If so, we won't shutdown zk as
* part of general shutdown.
*/
private boolean passedZkCluster = false;
private MiniDFSCluster dfsCluster = null;
private volatile HBaseCluster hbaseCluster = null;
@ -199,9 +203,6 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
private String hadoopLogDir;
/** Directory (a subdirectory of dataTestDir) used by the dfs cluster if any */
private File clusterTestDir = null;
/** Directory on test filesystem where we put the data for this instance of
* HBaseTestingUtility*/
private Path dataTestDirOnTestFS = null;
@ -213,16 +214,6 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
private boolean localMode = false;
/**
* System property key to get test directory value.
* Name is as it is because mini dfs has hard-codings to put test data here.
* It should NOT be used directly in HBase, as it's a property used in
* mini dfs.
* @deprecated can be used only with mini dfs
*/
@Deprecated
private static final String TEST_DIRECTORY_KEY = "test.build.data";
/** Filesystem URI used for map-reduce mini-cluster setup */
private static String FS_URI;
@ -495,37 +486,6 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
}
}
/**
* @return Where the DFS cluster will write data on the local subsystem.
* Creates it if it does not exist already. A subdir of {@link #getBaseTestDir()}
* @see #getTestFileSystem()
*/
Path getClusterTestDir() {
if (clusterTestDir == null){
setupClusterTestDir();
}
return new Path(clusterTestDir.getAbsolutePath());
}
/**
* Creates a directory for the DFS cluster, under the test data
*/
private void setupClusterTestDir() {
if (clusterTestDir != null) {
return;
}
// Using randomUUID ensures that multiple clusters can be launched by
// a same test, if it stops & starts them
Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
// Have it cleaned up on exit
boolean b = deleteOnExit();
if (b) clusterTestDir.deleteOnExit();
conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
}
/**
* Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
* to write temporary test data. Call this method after setting up the mini dfs cluster
@ -715,6 +675,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
/** This is used before starting HDFS and map-reduce mini-clusters */
private void createDirsAndSetProperties() throws IOException {
setupClusterTestDir();
conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
createDirAndSetProperty("cache_data", "test.cache.data");
createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
@ -792,83 +753,6 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
}
}
/**
* Call this if you only want a zk cluster.
* @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
* @throws Exception
* @see #shutdownMiniZKCluster()
* @return zk cluster started.
*/
public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
return startMiniZKCluster(1);
}
/**
* Call this if you only want a zk cluster.
* @param zooKeeperServerNum
* @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
* @throws Exception
* @see #shutdownMiniZKCluster()
* @return zk cluster started.
*/
public MiniZooKeeperCluster startMiniZKCluster(
final int zooKeeperServerNum,
final int ... clientPortList)
throws Exception {
setupClusterTestDir();
return startMiniZKCluster(clusterTestDir, zooKeeperServerNum, clientPortList);
}
private MiniZooKeeperCluster startMiniZKCluster(final File dir)
throws Exception {
return startMiniZKCluster(dir, 1, null);
}
/**
* Start a mini ZK cluster. If the property "test.hbase.zookeeper.property.clientPort" is set
* the port mentionned is used as the default port for ZooKeeper.
*/
private MiniZooKeeperCluster startMiniZKCluster(final File dir,
final int zooKeeperServerNum,
final int [] clientPortList)
throws Exception {
if (this.zkCluster != null) {
throw new IOException("Cluster already running at " + dir);
}
this.passedZkCluster = false;
this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
final int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
if (defPort > 0){
// If there is a port in the config file, we use it.
this.zkCluster.setDefaultClientPort(defPort);
}
if (clientPortList != null) {
// Ignore extra client ports
int clientPortListSize = (clientPortList.length <= zooKeeperServerNum) ?
clientPortList.length : zooKeeperServerNum;
for (int i=0; i < clientPortListSize; i++) {
this.zkCluster.addClientPort(clientPortList[i]);
}
}
int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
Integer.toString(clientPort));
return this.zkCluster;
}
/**
* Shuts down zk cluster created by call to {@link #startMiniZKCluster(File)}
* or does nothing.
* @throws IOException
* @see #startMiniZKCluster()
*/
public void shutdownMiniZKCluster() throws IOException {
if (this.zkCluster != null) {
this.zkCluster.shutdown();
this.zkCluster = null;
}
}
/**
* Start up a minicluster of hbase, dfs, and zookeeper.
@ -1078,8 +962,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
} else LOG.info("NOT STARTING DFS");
// Start up a zk cluster.
if (this.zkCluster == null) {
startMiniZKCluster(clusterTestDir);
if (getZkCluster() == null) {
startMiniZKCluster();
}
// Start the MiniHBaseCluster
@ -1197,30 +1081,14 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
this.connection = null;
}
shutdownMiniHBaseCluster();
if (!this.passedZkCluster){
shutdownMiniZKCluster();
}
shutdownMiniDFSCluster();
shutdownMiniZKCluster();
cleanupTestDir();
miniClusterRunning = false;
LOG.info("Minicluster is down");
}
/**
* @return True if we removed the test dirs
* @throws IOException
*/
@Override
public boolean cleanupTestDir() throws IOException {
boolean ret = super.cleanupTestDir();
if (deleteDir(this.clusterTestDir)) {
this.clusterTestDir = null;
return ret & true;
}
return false;
}
/**
* Shutdown HBase mini cluster. Does not shutdown zk or dfs if running.
* @throws IOException
@ -2902,30 +2770,6 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
private HBaseAdmin hbaseAdmin = null;
/**
* Returns a ZKWatcher instance.
* This instance is shared between HBaseTestingUtility instance users.
* Don't close it, it will be closed automatically when the
* cluster shutdowns
*
* @return The ZKWatcher instance.
* @throws IOException
*/
public synchronized ZKWatcher getZooKeeperWatcher()
throws IOException {
if (zooKeeperWatcher == null) {
zooKeeperWatcher = new ZKWatcher(conf, "testing utility",
new Abortable() {
@Override public void abort(String why, Throwable e) {
throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e);
}
@Override public boolean isAborted() {return false;}
});
}
return zooKeeperWatcher;
}
private ZKWatcher zooKeeperWatcher;
/**
@ -3008,16 +2852,6 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
return null;
}
public MiniZooKeeperCluster getZkCluster() {
return zkCluster;
}
public void setZkCluster(MiniZooKeeperCluster zkCluster) {
this.passedZkCluster = true;
this.zkCluster = zkCluster;
conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
}
public MiniDFSCluster getDFSCluster() {
return dfsCluster;
}
@ -3417,7 +3251,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
throws IOException {
final Table meta = getConnection().getTable(TableName.META_TABLE_NAME);
try {
long l = waitFor(timeout, 200, true, new ExplainingPredicate<IOException>() {
waitFor(timeout, 200, true, new ExplainingPredicate<IOException>() {
@Override
public String explainFailure() throws IOException {
return explainTableAvailability(tableName);
@ -3547,31 +3381,6 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
return getFromStoreFile(store,get);
}
/**
* Gets a ZKWatcher.
* @param TEST_UTIL
*/
public static ZKWatcher getZooKeeperWatcher(
HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException,
IOException {
ZKWatcher zkw = new ZKWatcher(TEST_UTIL.getConfiguration(),
"unittest", new Abortable() {
boolean aborted = false;
@Override
public void abort(String why, Throwable e) {
aborted = true;
throw new RuntimeException("Fatal ZK error, why=" + why, e);
}
@Override
public boolean isAborted() {
return aborted;
}
});
return zkw;
}
public static void assertKVListsEqual(String additionalMsg,
final List<? extends Cell> expected,
final List<? extends Cell> actual) {
@ -3773,13 +3582,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
}
}
public static String randomMultiCastAddress() {
return "226.1.1." + random.nextInt(254);
}
public static void waitForHostPort(String host, int port)
throws IOException {
final int maxTimeMs = 10000;
@ -4021,30 +3827,6 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
FS_URI = fsURI;
}
/**
* Wrapper method for {@link Waiter#waitFor(Configuration, long, Predicate)}.
*/
public <E extends Exception> long waitFor(long timeout, Predicate<E> predicate)
throws E {
return Waiter.waitFor(this.conf, timeout, predicate);
}
/**
* Wrapper method for {@link Waiter#waitFor(Configuration, long, long, Predicate)}.
*/
public <E extends Exception> long waitFor(long timeout, long interval, Predicate<E> predicate)
throws E {
return Waiter.waitFor(this.conf, timeout, interval, predicate);
}
/**
* Wrapper method for {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)}.
*/
public <E extends Exception> long waitFor(long timeout, long interval,
boolean failIfTimeout, Predicate<E> predicate) throws E {
return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
}
/**
* Returns a {@link Predicate} for checking that there are no regions in transition in master
*/

View File

@ -36,15 +36,15 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ZKTests;
import org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category({ MediumTests.class, ZKTests.class })
@Category({ MediumTests.class, ClientTests.class })
public class TestZKAsyncRegistry {
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();

View File

@ -31,9 +31,13 @@ import javax.security.auth.login.AppConfigurationEntry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TestZooKeeper;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.ZKTests;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
@ -43,7 +47,7 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category({MiscTests.class, MediumTests.class})
@Category({ ZKTests.class, MediumTests.class })
public class TestZooKeeperACL {
private final static Log LOG = LogFactory.getLog(TestZooKeeperACL.class);
private final static HBaseTestingUtility TEST_UTIL =
@ -89,9 +93,6 @@ public class TestZooKeeperACL {
TestZooKeeper.class.getName(), null);
}
/**
* @throws java.lang.Exception
*/
@AfterClass
public static void tearDownAfterClass() throws Exception {
if (!secureZKAvailable) {
@ -100,9 +101,6 @@ public class TestZooKeeperACL {
TEST_UTIL.shutdownMiniCluster();
}
/**
* @throws java.lang.Exception
*/
@Before
public void setUp() throws Exception {
if (!secureZKAvailable) {
@ -270,7 +268,8 @@ public class TestZooKeeperACL {
*/
@Test
public void testIsZooKeeperSecure() throws Exception {
boolean testJaasConfig = ZKUtil.isSecureZooKeeper(new Configuration(TEST_UTIL.getConfiguration()));
boolean testJaasConfig =
ZKUtil.isSecureZooKeeper(new Configuration(TEST_UTIL.getConfiguration()));
assertEquals(testJaasConfig, secureZKAvailable);
// Define Jaas configuration without ZooKeeper Jaas config
File saslConfFile = File.createTempFile("tmp", "fakeJaas.conf");

View File

@ -523,13 +523,21 @@
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>${project.version}</version>
<artifactId>hbase-zookeeper</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-zookeeper</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>${project.version}</version>
<scope>test</scope>
<type>test-jar</type>
</dependency>

View File

@ -78,6 +78,18 @@
<type>jar</type>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-zookeeper</artifactId>
<type>jar</type>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-zookeeper</artifactId>
<type>test-jar</type>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>

View File

@ -62,20 +62,6 @@
<skipAssembly>true</skipAssembly>
</configuration>
</plugin>
<!-- Make a jar and put the sources in the jar -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>jar</goal>
<goal>test-jar</goal>
</goals>
</execution>
</executions>
</plugin>
<!-- General plugins -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>

View File

@ -0,0 +1,216 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.File;
import java.io.IOException;
import java.util.UUID;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.yetus.audience.InterfaceAudience;
/**
* Helpers for testing HBase that do not depend on specific server/etc. things. The main difference
* from {@link HBaseCommonTestingUtility} is that we can start a zookeeper cluster.
*/
@InterfaceAudience.Public
public class HBaseZKTestingUtility extends HBaseCommonTestingUtility {
private MiniZooKeeperCluster zkCluster;
/**
* Set if we were passed a zkCluster. If so, we won't shutdown zk as part of general shutdown.
*/
private boolean passedZkCluster;
protected ZKWatcher zooKeeperWatcher;
/** Directory (a subdirectory of dataTestDir) used by the dfs cluster if any */
protected File clusterTestDir;
public HBaseZKTestingUtility() {
this(HBaseConfiguration.create());
}
public HBaseZKTestingUtility(Configuration conf) {
super(conf);
}
/**
* @return Where the cluster will write data on the local subsystem. Creates it if it does not
* exist already. A subdir of {@link #getBaseTestDir()}
* @see #getTestFileSystem()
*/
Path getClusterTestDir() {
if (clusterTestDir == null) {
setupClusterTestDir();
}
return new Path(clusterTestDir.getAbsolutePath());
}
/**
* Creates a directory for the cluster, under the test data
*/
protected void setupClusterTestDir() {
if (clusterTestDir != null) {
return;
}
// Using randomUUID ensures that multiple clusters can be launched by
// a same test, if it stops & starts them
Path testDir = getDataTestDir("cluster_" + UUID.randomUUID().toString());
clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
// Have it cleaned up on exit
boolean b = deleteOnExit();
if (b) {
clusterTestDir.deleteOnExit();
}
LOG.info("Created new mini-cluster data directory: " + clusterTestDir + ", deleteOnExit=" + b);
}
/**
* Call this if you only want a zk cluster.
* @see #shutdownMiniZKCluster()
* @return zk cluster started.
*/
public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
return startMiniZKCluster(1);
}
/**
* Call this if you only want a zk cluster.
* @see #shutdownMiniZKCluster()
* @return zk cluster started.
*/
public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum, int... clientPortList)
throws Exception {
setupClusterTestDir();
return startMiniZKCluster(clusterTestDir, zooKeeperServerNum, clientPortList);
}
/**
* Start a mini ZK cluster. If the property "test.hbase.zookeeper.property.clientPort" is set the
* port mentioned is used as the default port for ZooKeeper.
*/
private MiniZooKeeperCluster startMiniZKCluster(File dir, int zooKeeperServerNum,
int[] clientPortList) throws Exception {
if (this.zkCluster != null) {
throw new IOException("Cluster already running at " + dir);
}
this.passedZkCluster = false;
this.zkCluster = new MiniZooKeeperCluster(this.getConfiguration());
int defPort = this.conf.getInt("test.hbase.zookeeper.property.clientPort", 0);
if (defPort > 0) {
// If there is a port in the config file, we use it.
this.zkCluster.setDefaultClientPort(defPort);
}
if (clientPortList != null) {
// Ignore extra client ports
int clientPortListSize = (clientPortList.length <= zooKeeperServerNum) ? clientPortList.length
: zooKeeperServerNum;
for (int i = 0; i < clientPortListSize; i++) {
this.zkCluster.addClientPort(clientPortList[i]);
}
}
int clientPort = this.zkCluster.startup(dir, zooKeeperServerNum);
this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort));
return this.zkCluster;
}
public MiniZooKeeperCluster getZkCluster() {
return zkCluster;
}
public void setZkCluster(MiniZooKeeperCluster zkCluster) {
this.passedZkCluster = true;
this.zkCluster = zkCluster;
conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
}
/**
* Shuts down zk cluster created by call to {@link #startMiniZKCluster(File)} or does nothing.
* @see #startMiniZKCluster()
*/
public void shutdownMiniZKCluster() throws IOException {
if (!passedZkCluster && this.zkCluster != null) {
this.zkCluster.shutdown();
this.zkCluster = null;
}
}
/**
* Returns a ZKWatcher instance. This instance is shared between HBaseTestingUtility instance
* users. Don't close it, it will be closed automatically when the cluster shutdowns
* @return The ZKWatcher instance.
*/
public synchronized ZKWatcher getZooKeeperWatcher() throws IOException {
if (zooKeeperWatcher == null) {
zooKeeperWatcher = new ZKWatcher(conf, "testing utility", new Abortable() {
@Override
public void abort(String why, Throwable e) {
throw new RuntimeException("Unexpected abort in HBaseZKTestingUtility:" + why, e);
}
@Override
public boolean isAborted() {
return false;
}
});
}
return zooKeeperWatcher;
}
/**
* Gets a ZKWatcher.
*/
public static ZKWatcher getZooKeeperWatcher(HBaseZKTestingUtility testUtil)
throws ZooKeeperConnectionException, IOException {
ZKWatcher zkw = new ZKWatcher(testUtil.getConfiguration(), "unittest", new Abortable() {
boolean aborted = false;
@Override
public void abort(String why, Throwable e) {
aborted = true;
throw new RuntimeException("Fatal ZK error, why=" + why, e);
}
@Override
public boolean isAborted() {
return aborted;
}
});
return zkw;
}
/**
* @return True if we removed the test dirs
*/
@Override
public boolean cleanupTestDir() throws IOException {
boolean ret = super.cleanupTestDir();
if (deleteDir(this.clusterTestDir)) {
this.clusterTestDir = null;
return ret & true;
}
return false;
}
}

View File

@ -1,5 +1,4 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -18,6 +17,9 @@
*/
package org.apache.hadoop.hbase.zookeeper;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.IOException;
import java.util.Properties;
@ -25,23 +27,20 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HBaseZKTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.ZKTests;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
/**
* Test for HQuorumPeer.
*/
@Category({MiscTests.class, MediumTests.class})
@Category({ ZKTests.class, MediumTests.class })
public class TestHQuorumPeer {
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static final HBaseZKTestingUtility TEST_UTIL = new HBaseZKTestingUtility();
private static int PORT_NO = 21818;
private Path dataDir;
@ -103,4 +102,3 @@ public class TestHQuorumPeer {
assertEquals("foo:1234,bar:5678,baz:8888,qux:9012", s);
}
}

View File

@ -28,15 +28,14 @@ import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
import org.apache.hadoop.hbase.HBaseZKTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ZKTests;
import org.apache.zookeeper.CreateMode;
@ -52,9 +51,7 @@ import org.junit.experimental.categories.Category;
@Category({ ZKTests.class, MediumTests.class })
public class TestReadOnlyZKClient {
private static HBaseCommonTestingUtility UTIL = new HBaseCommonTestingUtility();
private static MiniZooKeeperCluster CLUSTER;
private static HBaseZKTestingUtility UTIL = new HBaseZKTestingUtility();
private static int PORT;
@ -67,11 +64,9 @@ public class TestReadOnlyZKClient {
private static ReadOnlyZKClient RO_ZK;
@BeforeClass
public static void setUp() throws IOException, InterruptedException, KeeperException {
File file =
new File(UTIL.getDataTestDir("zkcluster_" + UUID.randomUUID().toString()).toString());
CLUSTER = new MiniZooKeeperCluster(UTIL.getConfiguration());
PORT = CLUSTER.startup(file);
public static void setUp() throws Exception {
PORT = UTIL.startMiniZKCluster().getClientPort();
ZooKeeper zk = new ZooKeeper("localhost:" + PORT, 10000, e -> {
});
DATA = new byte[10];
@ -94,18 +89,28 @@ public class TestReadOnlyZKClient {
@AfterClass
public static void tearDown() throws IOException {
RO_ZK.close();
CLUSTER.shutdown();
UTIL.shutdownMiniZKCluster();
UTIL.cleanupTestDir();
}
@Test
public void testGetAndExists() throws InterruptedException, ExecutionException {
public void testGetAndExists() throws Exception {
assertArrayEquals(DATA, RO_ZK.get(PATH).get());
assertEquals(CHILDREN, RO_ZK.exists(PATH).get().getNumChildren());
assertNotNull(RO_ZK.getZooKeeper());
// a little longer than keep alive millis
Thread.sleep(5000);
assertNull(RO_ZK.getZooKeeper());
// The zookeeper client should be closed finally after the keep alive time elapsed
UTIL.waitFor(10000, new ExplainingPredicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return RO_ZK.getZooKeeper() == null;
}
@Override
public String explainFailure() throws Exception {
return "Connection to zookeeper is still alive";
}
});
}
@Test
@ -129,7 +134,7 @@ public class TestReadOnlyZKClient {
assertArrayEquals(DATA, RO_ZK.get(PATH).get());
ZooKeeper zk = RO_ZK.getZooKeeper();
long sessionId = zk.getSessionId();
CLUSTER.getZooKeeperServers().get(0).closeSession(sessionId);
UTIL.getZkCluster().getZooKeeperServers().get(0).closeSession(sessionId);
// should not reach keep alive so still the same instance
assertSame(zk, RO_ZK.getZooKeeper());

View File

@ -1,4 +1,4 @@
/*
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -24,10 +24,10 @@ import java.lang.reflect.Field;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HBaseZKTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.ZKTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
@ -40,10 +40,10 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category({MiscTests.class, MediumTests.class})
@Category({ ZKTests.class, MediumTests.class })
public class TestRecoverableZooKeeper {
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final static HBaseZKTestingUtility TEST_UTIL = new HBaseZKTestingUtility();
Abortable abortable = new Abortable() {
@Override
@ -107,8 +107,9 @@ public class TestRecoverableZooKeeper {
throwExceptionInNumOperations = 0;
throw new KeeperException.ConnectionLossException();
}
if (throwExceptionInNumOperations > 0)
if (throwExceptionInNumOperations > 0) {
throwExceptionInNumOperations--;
}
}
@Override

View File

@ -1,4 +1,4 @@
/*
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,28 +15,32 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.zookeeper;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseZKTestingUtility;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.ZKTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
*/
@Category({MiscTests.class, MediumTests.class})
@Category({ ZKTests.class, MediumTests.class })
public class TestZKLeaderManager {
private static final Log LOG = LogFactory.getLog(TestZKLeaderManager.class);
@ -119,12 +123,12 @@ public class TestZKLeaderManager {
}
}
private static HBaseTestingUtility TEST_UTIL;
private static HBaseZKTestingUtility TEST_UTIL;
private static MockLeader[] CANDIDATES;
@BeforeClass
public static void setupBeforeClass() throws Exception {
TEST_UTIL = new HBaseTestingUtility();
TEST_UTIL = new HBaseZKTestingUtility();
TEST_UTIL.startMiniZKCluster();
Configuration conf = TEST_UTIL.getConfiguration();
@ -233,4 +237,3 @@ public class TestZKLeaderManager {
}
}

View File

@ -24,13 +24,15 @@ import static org.junit.Assert.assertTrue;
import java.security.Permission;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseZKTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.testclassification.ZKTests;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category({MiscTests.class, SmallTests.class})
@Category({ ZKTests.class, SmallTests.class })
public class TestZKMainServer {
// ZKMS calls System.exit. Catch the call and prevent exit using trick described up in
// http://stackoverflow.com/questions/309396/java-how-to-test-methods-that-call-system-exit
@ -63,12 +65,11 @@ public class TestZKMainServer {
/**
* We need delete of a znode to work at least.
* @throws Exception
*/
@Test
public void testCommandLineWorks() throws Exception {
System.setSecurityManager(new NoExitSecurityManager());
HBaseTestingUtility htu = new HBaseTestingUtility();
HBaseZKTestingUtility htu = new HBaseZKTestingUtility();
htu.getConfiguration().setInt(HConstants.ZK_SESSION_TIMEOUT, 1000);
htu.startMiniZKCluster();
try {

View File

@ -17,10 +17,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.zookeeper;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@ -32,9 +30,9 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HBaseZKTestingUtility;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.ZKTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
import org.apache.zookeeper.CreateMode;
@ -49,10 +47,10 @@ import org.junit.experimental.categories.Category;
/**
* Test ZooKeeper multi-update functionality
*/
@Category({MiscTests.class, MediumTests.class})
@Category({ ZKTests.class, MediumTests.class })
public class TestZKMulti {
private static final Log LOG = LogFactory.getLog(TestZKMulti.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final static HBaseZKTestingUtility TEST_UTIL = new HBaseZKTestingUtility();
private static ZKWatcher zkw = null;
@BeforeClass

View File

@ -25,21 +25,20 @@ import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Random;
import java.util.concurrent.Semaphore;
import java.util.concurrent.ThreadLocalRandom;
import junit.framework.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.master.TestActiveMasterManager.NodeDeletionListener;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HBaseZKTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.ZKTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooDefs.Ids;
import org.apache.zookeeper.ZooKeeper;
import org.junit.AfterClass;
@ -47,12 +46,10 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category({MiscTests.class, MediumTests.class})
@Category({ ZKTests.class, MediumTests.class })
public class TestZKNodeTracker {
private static final Log LOG = LogFactory.getLog(TestZKNodeTracker.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final static Random rand = new Random();
private final static HBaseZKTestingUtility TEST_UTIL = new HBaseZKTestingUtility();
@BeforeClass
public static void setUpBeforeClass() throws Exception {
@ -66,13 +63,11 @@ public class TestZKNodeTracker {
/**
* Test that we can interrupt a node that is blocked on a wait.
* @throws IOException
* @throws InterruptedException
*/
@Test public void testInterruptible() throws IOException, InterruptedException {
@Test
public void testInterruptible() throws IOException, InterruptedException {
Abortable abortable = new StubAbortable();
ZKWatcher zk = new ZKWatcher(TEST_UTIL.getConfiguration(),
"testInterruptible", abortable);
ZKWatcher zk = new ZKWatcher(TEST_UTIL.getConfiguration(), "testInterruptible", abortable);
final TestTracker tracker = new TestTracker(zk, "/xyz", abortable);
tracker.start();
Thread t = new Thread() {
@ -86,7 +81,9 @@ public class TestZKNodeTracker {
}
};
t.start();
while (!t.isAlive()) Threads.sleep(1);
while (!t.isAlive()) {
Threads.sleep(1);
}
tracker.stop();
t.join();
// If it wasn't interruptible, we'd never get to here.
@ -99,8 +96,8 @@ public class TestZKNodeTracker {
"testNodeTracker", abortable);
ZKUtil.createAndFailSilent(zk, zk.znodePaths.baseZNode);
final String node =
ZNodePaths.joinZNode(zk.znodePaths.baseZNode, new Long(rand.nextLong()).toString());
final String node = ZNodePaths.joinZNode(zk.znodePaths.baseZNode,
Long.toString(ThreadLocalRandom.current().nextLong()));
final byte [] dataOne = Bytes.toBytes("dataOne");
final byte [] dataTwo = Bytes.toBytes("dataTwo");
@ -132,9 +129,9 @@ public class TestZKNodeTracker {
// Create a completely separate zk connection for test triggers and avoid
// any weird watcher interactions from the test
final ZooKeeper zkconn = new ZooKeeper(
ZKConfig.getZKQuorumServersString(TEST_UTIL.getConfiguration()), 60000,
new StubWatcher());
final ZooKeeper zkconn =
new ZooKeeper(ZKConfig.getZKQuorumServersString(TEST_UTIL.getConfiguration()), 60000, e -> {
});
// Add the node with data one
zkconn.create(node, dataOne, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
@ -247,7 +244,7 @@ public class TestZKNodeTracker {
}
public static class TestingZKListener extends ZKListener {
private static final Log LOG = LogFactory.getLog(NodeDeletionListener.class);
private static final Log LOG = LogFactory.getLog(TestingZKListener.class);
private Semaphore deletedLock;
private Semaphore createdLock;
@ -307,12 +304,6 @@ public class TestZKNodeTracker {
public boolean isAborted() {
return false;
}
}
public static class StubWatcher implements Watcher {
@Override
public void process(WatchedEvent event) {}
}
@Test
@ -331,21 +322,20 @@ public class TestZKNodeTracker {
// Check that we manage the case when there is no data
ZKUtil.createAndFailSilent(zkw, nodeName);
MasterAddressTracker.deleteIfEquals(zkw, sn.toString());
Assert.assertFalse(ZKUtil.getData(zkw, nodeName) == null);
assertNotNull(ZKUtil.getData(zkw, nodeName));
// Check that we don't delete if we're not supposed to
ZKUtil.setData(zkw, nodeName, MasterAddressTracker.toByteArray(sn, 0));
MasterAddressTracker.deleteIfEquals(zkw, ServerName.valueOf("127.0.0.2:52", 45L).toString());
Assert.assertFalse(ZKUtil.getData(zkw, nodeName) == null);
assertNotNull(ZKUtil.getData(zkw, nodeName));
// Check that we delete when we're supposed to
ZKUtil.setData(zkw, nodeName,MasterAddressTracker.toByteArray(sn, 0));
MasterAddressTracker.deleteIfEquals(zkw, sn.toString());
Assert.assertTrue( ZKUtil.getData(zkw, nodeName)== null );
assertNull(ZKUtil.getData(zkw, nodeName));
// Check that we support the case when the znode does not exist
MasterAddressTracker.deleteIfEquals(zkw, sn.toString()); // must not throw an exception
}
}

View File

@ -759,6 +759,7 @@
<phase>prepare-package</phase>
<goals>
<goal>jar-no-fork</goal>
<goal>test-jar-no-fork</goal>
</goals>
</execution>
</executions>
@ -1800,6 +1801,13 @@
<groupId>org.apache.hbase</groupId>
<version>${project.version}</version>
</dependency>
<dependency>
<artifactId>hbase-zookeeper</artifactId>
<groupId>org.apache.hbase</groupId>
<version>${project.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<!-- General dependencies -->
<dependency>
<groupId>com.github.stephenc.findbugs</groupId>