HBASE-23993 Use loopback for zk standalone server in minizkcluster (#1291)

hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java
 Have client and server use loopback instead of 'localhost'

Signed-off-by: Duo Zhang <zhangduo@apache.org>
Signed-off-by: Jan Hentschel <janh@apache.org>
This commit is contained in:
Michael Stack 2020-03-17 20:14:24 -07:00 committed by stack
parent 9190a6b59a
commit 3e1cf00c71
10 changed files with 47 additions and 40 deletions

View File

@ -66,7 +66,7 @@ public class TestZooKeeperScanPolicyObserver {
.createTable(TableDescriptorBuilder.newBuilder(NAME) .createTable(TableDescriptorBuilder.newBuilder(NAME)
.setCoprocessor(ZooKeeperScanPolicyObserver.class.getName()) .setCoprocessor(ZooKeeperScanPolicyObserver.class.getName())
.setValue(ZooKeeperScanPolicyObserver.ZK_ENSEMBLE_KEY, .setValue(ZooKeeperScanPolicyObserver.ZK_ENSEMBLE_KEY,
"localhost:" + UTIL.getZkCluster().getClientPort()) UTIL.getZkCluster().getAddress().toString())
.setValue(ZooKeeperScanPolicyObserver.ZK_SESSION_TIMEOUT_KEY, "2000") .setValue(ZooKeeperScanPolicyObserver.ZK_SESSION_TIMEOUT_KEY, "2000")
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build()).build()); .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build()).build());
TABLE = UTIL.getConnection().getTable(NAME); TABLE = UTIL.getConnection().getTable(NAME);

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState; import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.hbase.zookeeper.ZKConfig;
import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException;
import org.junit.Test; import org.junit.Test;
@ -373,7 +374,8 @@ public abstract class TestReplicationStateBasic {
} }
// Add peers for the corresponding queues so they are not orphans // Add peers for the corresponding queues so they are not orphans
rp.getPeerStorage().addPeer("qId" + i, rp.getPeerStorage().addPeer("qId" + i,
ReplicationPeerConfig.newBuilder().setClusterKey("localhost:2818:/bogus" + i).build(), ReplicationPeerConfig.newBuilder().
setClusterKey(MiniZooKeeperCluster.HOST + ":2818:/bogus" + i).build(),
true, SyncReplicationState.NONE); true, SyncReplicationState.NONE);
} }
} }

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
import org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient; import org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;
@ -101,7 +102,7 @@ public class TestZKConnectionRegistry {
public void testIndependentZKConnections() throws IOException { public void testIndependentZKConnections() throws IOException {
try (ReadOnlyZKClient zk1 = REGISTRY.getZKClient()) { try (ReadOnlyZKClient zk1 = REGISTRY.getZKClient()) {
Configuration otherConf = new Configuration(TEST_UTIL.getConfiguration()); Configuration otherConf = new Configuration(TEST_UTIL.getConfiguration());
otherConf.set(HConstants.ZOOKEEPER_QUORUM, "localhost"); otherConf.set(HConstants.ZOOKEEPER_QUORUM, MiniZooKeeperCluster.HOST);
try (ZKConnectionRegistry otherRegistry = new ZKConnectionRegistry(otherConf)) { try (ZKConnectionRegistry otherRegistry = new ZKConnectionRegistry(otherConf)) {
ReadOnlyZKClient zk2 = otherRegistry.getZKClient(); ReadOnlyZKClient zk2 = otherRegistry.getZKClient();
assertNotSame("Using a different configuration / quorum should result in different " + assertNotSame("Using a different configuration / quorum should result in different " +

View File

@ -25,7 +25,6 @@ import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.spy; import static org.mockito.Mockito.spy;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
@ -78,7 +77,7 @@ public class TestReplicationProcedureRetry {
Admin admin = UTIL.getAdmin(); Admin admin = UTIL.getAdmin();
String peerId = "1"; String peerId = "1";
ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder()
.setClusterKey("localhost:" + UTIL.getZkCluster().getClientPort() + ":/hbase2").build(); .setClusterKey(UTIL.getZkCluster().getAddress().toString() + ":/hbase2").build();
admin.addReplicationPeer(peerId, peerConfig, true); admin.addReplicationPeer(peerId, peerConfig, true);
assertEquals(peerConfig.getClusterKey(), assertEquals(peerConfig.getClusterKey(),

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -24,7 +24,6 @@ import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when; import static org.mockito.Mockito.when;
import java.io.IOException; import java.io.IOException;
import java.lang.reflect.Field; import java.lang.reflect.Field;
import java.net.URLEncoder; import java.net.URLEncoder;
@ -101,10 +100,8 @@ import org.junit.experimental.categories.Category;
import org.junit.rules.TestName; import org.junit.rules.TestName;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor;
@ -502,7 +499,7 @@ public abstract class TestReplicationSourceManager {
String replicationSourceImplName = conf.get("replication.replicationsource.implementation"); String replicationSourceImplName = conf.get("replication.replicationsource.implementation");
final String peerId = "FakePeer"; final String peerId = "FakePeer";
final ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() final ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder()
.setClusterKey("localhost:" + utility.getZkCluster().getClientPort() + ":/hbase").build(); .setClusterKey(utility.getZkCluster().getAddress().toString() + ":/hbase").build();
try { try {
DummyServer server = new DummyServer(); DummyServer server = new DummyServer();
ReplicationQueueStorage rq = ReplicationStorageFactory ReplicationQueueStorage rq = ReplicationStorageFactory
@ -557,7 +554,7 @@ public abstract class TestReplicationSourceManager {
public void testRemovePeerMetricsCleanup() throws Exception { public void testRemovePeerMetricsCleanup() throws Exception {
final String peerId = "DummyPeer"; final String peerId = "DummyPeer";
final ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() final ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder()
.setClusterKey("localhost:" + utility.getZkCluster().getClientPort() + ":/hbase").build(); .setClusterKey(utility.getZkCluster().getAddress().toString() + ":/hbase").build();
try { try {
MetricsReplicationSourceSource globalSource = getGlobalSource(); MetricsReplicationSourceSource globalSource = getGlobalSource();
final int globalLogQueueSizeInitial = globalSource.getSizeOfLogQueue(); final int globalLogQueueSizeInitial = globalSource.getSizeOfLogQueue();
@ -700,8 +697,6 @@ public abstract class TestReplicationSourceManager {
/** /**
* Remove a peer and wait for it to get cleaned up * Remove a peer and wait for it to get cleaned up
* @param peerId
* @throws Exception
*/ */
private void removePeerAndWait(final String peerId) throws Exception { private void removePeerAndWait(final String peerId) throws Exception {
final ReplicationPeers rp = manager.getReplicationPeers(); final ReplicationPeers rp = manager.getReplicationPeers();

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -31,7 +31,6 @@ import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy; import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times; import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verify;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
@ -62,7 +61,6 @@ import org.junit.runner.RunWith;
import org.mockito.ArgumentMatcher; import org.mockito.ArgumentMatcher;
import org.mockito.Mock; import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner; import org.mockito.runners.MockitoJUnitRunner;
import org.apache.hbase.thirdparty.com.google.common.collect.Iterables; import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
@RunWith(MockitoJUnitRunner.class) @RunWith(MockitoJUnitRunner.class)
@ -104,7 +102,8 @@ public class TestCanaryTool {
@Test @Test
public void testZookeeperCanaryPermittedFailuresArgumentWorks() throws Exception { public void testZookeeperCanaryPermittedFailuresArgumentWorks() throws Exception {
final String[] args = { "-t", "10000", "-zookeeper", "-treatFailureAsError", "-permittedZookeeperFailures", "1" }; final String[] args = { "-t", "10000", "-zookeeper", "-treatFailureAsError",
"-permittedZookeeperFailures", "1" };
testZookeeperCanaryWithArgs(args); testZookeeperCanaryWithArgs(args);
} }
@ -211,8 +210,10 @@ public class TestCanaryTool {
assertEquals(0, ToolRunner.run(testingUtility.getConfiguration(), canary, args)); assertEquals(0, ToolRunner.run(testingUtility.getConfiguration(), canary, args));
verify(sink, times(tableNames.length)).initializeAndGetReadLatencyForTable(isA(String.class)); verify(sink, times(tableNames.length)).initializeAndGetReadLatencyForTable(isA(String.class));
for (int i=0; i<2; i++) { for (int i=0; i<2; i++) {
assertNotEquals("verify non-null read latency", null, sink.getReadLatencyMap().get(tableNames[i].getNameAsString())); assertNotEquals("verify non-null read latency", null,
assertNotEquals("verify non-zero read latency", 0L, sink.getReadLatencyMap().get(tableNames[i].getNameAsString())); sink.getReadLatencyMap().get(tableNames[i].getNameAsString()));
assertNotEquals("verify non-zero read latency", 0L,
sink.getReadLatencyMap().get(tableNames[i].getNameAsString()));
} }
// One table's timeout is set for 0 ms and thus, should lead to an error. // One table's timeout is set for 0 ms and thus, should lead to an error.
verify(mockAppender, times(1)).doAppend(argThat(new ArgumentMatcher<LoggingEvent>() { verify(mockAppender, times(1)).doAppend(argThat(new ArgumentMatcher<LoggingEvent>() {
@ -309,7 +310,8 @@ public class TestCanaryTool {
private void testZookeeperCanaryWithArgs(String[] args) throws Exception { private void testZookeeperCanaryWithArgs(String[] args) throws Exception {
Integer port = Integer port =
Iterables.getOnlyElement(testingUtility.getZkCluster().getClientPortList(), null); Iterables.getOnlyElement(testingUtility.getZkCluster().getClientPortList(), null);
testingUtility.getConfiguration().set(HConstants.ZOOKEEPER_QUORUM, "localhost:" + port); String hostPort = testingUtility.getZkCluster().getAddress().toString();
testingUtility.getConfiguration().set(HConstants.ZOOKEEPER_QUORUM, hostPort);
ExecutorService executor = new ScheduledThreadPoolExecutor(2); ExecutorService executor = new ScheduledThreadPoolExecutor(2);
CanaryTool.ZookeeperStdOutSink sink = spy(new CanaryTool.ZookeeperStdOutSink()); CanaryTool.ZookeeperStdOutSink sink = spy(new CanaryTool.ZookeeperStdOutSink());
CanaryTool canary = new CanaryTool(executor, sink); CanaryTool canary = new CanaryTool(executor, sink);
@ -317,7 +319,6 @@ public class TestCanaryTool {
String baseZnode = testingUtility.getConfiguration() String baseZnode = testingUtility.getConfiguration()
.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); .get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
verify(sink, atLeastOnce()) verify(sink, atLeastOnce()).publishReadTiming(eq(baseZnode), eq(hostPort), anyLong());
.publishReadTiming(eq(baseZnode), eq("localhost:" + port), anyLong());
} }
} }

View File

@ -598,7 +598,7 @@ module Hbase
end end
define_test "set_peer_bandwidth: works with peer bandwidth upper limit" do define_test "set_peer_bandwidth: works with peer bandwidth upper limit" do
cluster_key = "localhost:2181:/hbase-test" cluster_key = org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster::HOST + ":2181:/hbase-test"
args = { CLUSTER_KEY => cluster_key } args = { CLUSTER_KEY => cluster_key }
command(:add_peer, @peer_id, args) command(:add_peer, @peer_id, args)
@ -644,7 +644,7 @@ module Hbase
end end
define_test "get_peer_config: works with simple clusterKey peer" do define_test "get_peer_config: works with simple clusterKey peer" do
cluster_key = "localhost:2181:/hbase-test" cluster_key = org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster::HOST + ":2181:/hbase-test"
args = { CLUSTER_KEY => cluster_key } args = { CLUSTER_KEY => cluster_key }
command(:add_peer, @peer_id, args) command(:add_peer, @peer_id, args)
peer_config = command(:get_peer_config, @peer_id) peer_config = command(:get_peer_config, @peer_id)
@ -654,7 +654,7 @@ module Hbase
end end
define_test "get_peer_config: works with replicationendpointimpl peer and config params" do define_test "get_peer_config: works with replicationendpointimpl peer and config params" do
cluster_key = 'localhost:2181:/hbase-test' cluster_key = org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster::HOST + ":2181:/hbase-test"
repl_impl = 'org.apache.hadoop.hbase.replication.DummyReplicationEndpoint' repl_impl = 'org.apache.hadoop.hbase.replication.DummyReplicationEndpoint'
config_params = { "config1" => "value1", "config2" => "value2" } config_params = { "config1" => "value1", "config2" => "value2" }
args = { CLUSTER_KEY => cluster_key, ENDPOINT_CLASSNAME => repl_impl, args = { CLUSTER_KEY => cluster_key, ENDPOINT_CLASSNAME => repl_impl,
@ -670,7 +670,7 @@ module Hbase
end end
define_test "list_peer_configs: returns all peers' ReplicationPeerConfig objects" do define_test "list_peer_configs: returns all peers' ReplicationPeerConfig objects" do
cluster_key = "localhost:2181:/hbase-test" cluster_key = org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster::HOST + ":2181:/hbase-test"
args = { CLUSTER_KEY => cluster_key } args = { CLUSTER_KEY => cluster_key }
peer_id_second = '2' peer_id_second = '2'
command(:add_peer, @peer_id, args) command(:add_peer, @peer_id, args)

View File

@ -25,12 +25,14 @@ import java.io.PrintWriter;
import java.io.StringWriter; import java.io.StringWriter;
import java.net.BindException; import java.net.BindException;
import java.net.ConnectException; import java.net.ConnectException;
import java.net.InetAddress;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.net.Address;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
@ -49,12 +51,13 @@ import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti
@InterfaceAudience.Public @InterfaceAudience.Public
public class MiniZooKeeperCluster { public class MiniZooKeeperCluster {
private static final Logger LOG = LoggerFactory.getLogger(MiniZooKeeperCluster.class); private static final Logger LOG = LoggerFactory.getLogger(MiniZooKeeperCluster.class);
private static final int TICK_TIME = 2000; private static final int TICK_TIME = 2000;
private static final int TIMEOUT = 1000; private static final int TIMEOUT = 1000;
private static final int DEFAULT_CONNECTION_TIMEOUT = 30000; private static final int DEFAULT_CONNECTION_TIMEOUT = 30000;
private static final byte[] STATIC_BYTES = Bytes.toBytes("stat"); private static final byte[] STATIC_BYTES = Bytes.toBytes("stat");
private final int connectionTimeout; private final int connectionTimeout;
public static final String LOOPBACK_HOST = InetAddress.getLoopbackAddress().getHostName();
public static final String HOST = LOOPBACK_HOST;
private boolean started; private boolean started;
@ -236,7 +239,7 @@ public class MiniZooKeeperCluster {
while (true) { while (true) {
try { try {
standaloneServerFactory = new NIOServerCnxnFactory(); standaloneServerFactory = new NIOServerCnxnFactory();
standaloneServerFactory.configure(new InetSocketAddress(currentClientPort), standaloneServerFactory.configure(new InetSocketAddress(LOOPBACK_HOST, currentClientPort),
configuration.getInt(HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS, configuration.getInt(HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS,
HConstants.DEFAULT_ZOOKEEPER_MAX_CLIENT_CNXNS)); HConstants.DEFAULT_ZOOKEEPER_MAX_CLIENT_CNXNS));
} catch (BindException e) { } catch (BindException e) {
@ -418,7 +421,7 @@ public class MiniZooKeeperCluster {
long start = System.currentTimeMillis(); long start = System.currentTimeMillis();
while (true) { while (true) {
try { try {
send4LetterWord("localhost", port, "stat", (int)timeout); send4LetterWord(HOST, port, "stat", (int)timeout);
} catch (IOException e) { } catch (IOException e) {
return true; return true;
} }
@ -441,7 +444,7 @@ public class MiniZooKeeperCluster {
long start = System.currentTimeMillis(); long start = System.currentTimeMillis();
while (true) { while (true) {
try { try {
String result = send4LetterWord("localhost", port, "stat", (int)timeout); String result = send4LetterWord(HOST, port, "stat", (int)timeout);
if (result.startsWith("Zookeeper version:") && !result.contains("READ-ONLY")) { if (result.startsWith("Zookeeper version:") && !result.contains("READ-ONLY")) {
return true; return true;
} else { } else {
@ -449,10 +452,10 @@ public class MiniZooKeeperCluster {
} }
} catch (ConnectException e) { } catch (ConnectException e) {
// ignore as this is expected, do not log stacktrace // ignore as this is expected, do not log stacktrace
LOG.info("localhost:{} not up: {}", port, e.toString()); LOG.info("{}:{} not up: {}", HOST, port, e.toString());
} catch (IOException e) { } catch (IOException e) {
// ignore as this is expected // ignore as this is expected
LOG.info("localhost:{} not up", port, e); LOG.info("{}:{} not up", HOST, port, e);
} }
if (System.currentTimeMillis() > start + timeout) { if (System.currentTimeMillis() > start + timeout) {
@ -472,6 +475,13 @@ public class MiniZooKeeperCluster {
: clientPortList.get(activeZKServerIndex); : clientPortList.get(activeZKServerIndex);
} }
/**
* @return Address for this cluster instance.
*/
public Address getAddress() {
return Address.fromParts(HOST, getClientPort());
}
List<ZooKeeperServer> getZooKeeperServers() { List<ZooKeeperServer> getZooKeeperServers() {
return zooKeeperServers; return zooKeeperServers;
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -36,7 +36,6 @@ import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times; import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when; import static org.mockito.Mockito.when;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
@ -83,8 +82,9 @@ public class TestReadOnlyZKClient {
@BeforeClass @BeforeClass
public static void setUp() throws Exception { public static void setUp() throws Exception {
final int port = UTIL.startMiniZKCluster().getClientPort(); final int port = UTIL.startMiniZKCluster().getClientPort();
String hostPort = UTIL.getZkCluster().getAddress().toString();
ZooKeeper zk = ZooKeeperHelper.getConnectedZooKeeper("localhost:" + port, 10000); ZooKeeper zk = ZooKeeperHelper.getConnectedZooKeeper(hostPort, 10000);
DATA = new byte[10]; DATA = new byte[10];
ThreadLocalRandom.current().nextBytes(DATA); ThreadLocalRandom.current().nextBytes(DATA);
zk.create(PATH, DATA, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); zk.create(PATH, DATA, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
@ -93,7 +93,7 @@ public class TestReadOnlyZKClient {
} }
zk.close(); zk.close();
Configuration conf = UTIL.getConfiguration(); Configuration conf = UTIL.getConfiguration();
conf.set(HConstants.ZOOKEEPER_QUORUM, "localhost:" + port); conf.set(HConstants.ZOOKEEPER_QUORUM, hostPort);
conf.setInt(ReadOnlyZKClient.RECOVERY_RETRY, 3); conf.setInt(ReadOnlyZKClient.RECOVERY_RETRY, 3);
conf.setInt(ReadOnlyZKClient.RECOVERY_RETRY_INTERVAL_MILLIS, 100); conf.setInt(ReadOnlyZKClient.RECOVERY_RETRY_INTERVAL_MILLIS, 100);
conf.setInt(ReadOnlyZKClient.KEEPALIVE_MILLIS, 3000); conf.setInt(ReadOnlyZKClient.KEEPALIVE_MILLIS, 3000);

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.zookeeper;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.security.Permission; import java.security.Permission;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
@ -84,8 +83,8 @@ public class TestZKMainServer {
ZKUtil.checkExists(zkw, znode); ZKUtil.checkExists(zkw, znode);
boolean exception = false; boolean exception = false;
try { try {
ZKMainServer.main(new String [] {"-server", ZKMainServer.main(new String [] {"-server", htu.getZkCluster().getAddress().toString(),
"localhost:" + htu.getZkCluster().getClientPort(), "delete", znode}); "delete", znode});
} catch (ExitException ee) { } catch (ExitException ee) {
// ZKMS calls System.exit which should trigger this exception. // ZKMS calls System.exit which should trigger this exception.
exception = true; exception = true;