HBASE-4746 Use a random ZK client port in unit tests so we can run them in parallel

(Mikhail Bautin)


git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1198856 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Zhihong Yu 2011-11-07 18:22:57 +00:00
parent 02f6104dc2
commit f7e925c8d3
28 changed files with 220 additions and 218 deletions

View File

@ -32,6 +32,8 @@ Release 0.93.0 - Unreleased
(Jieshan Bean) (Jieshan Bean)
HBASE-4696 HRegionThriftServer' might have to indefinitely do redirtects (jgray) HBASE-4696 HRegionThriftServer' might have to indefinitely do redirtects (jgray)
HBASE-1744 Thrift server to match the new java api (Tim Sell) HBASE-1744 Thrift server to match the new java api (Tim Sell)
HBASE-4746 Use a random ZK client port in unit tests so we can run them in parallel
(Mikhail Bautin)
BUG FIXES BUG FIXES
HBASE-4488 Store could miss rows during flush (Lars H via jgray) HBASE-4488 Store could miss rows during flush (Lars H via jgray)

View File

@ -99,8 +99,22 @@ public final class HConstants {
/** Name of ZooKeeper config file in conf/ directory. */ /** Name of ZooKeeper config file in conf/ directory. */
public static final String ZOOKEEPER_CONFIG_NAME = "zoo.cfg"; public static final String ZOOKEEPER_CONFIG_NAME = "zoo.cfg";
/** Common prefix of ZooKeeper configuration properties */
public static final String ZK_CFG_PROPERTY_PREFIX =
"hbase.zookeeper.property.";
public static final int ZK_CFG_PROPERTY_PREFIX_LEN =
ZK_CFG_PROPERTY_PREFIX.length();
/**
* The ZK client port key in the ZK properties map. The name reflects the
* fact that this is not an HBase configuration key.
*/
public static final String CLIENT_PORT_STR = "clientPort";
/** Parameter name for the client port that the zookeeper listens on */ /** Parameter name for the client port that the zookeeper listens on */
public static final String ZOOKEEPER_CLIENT_PORT = "hbase.zookeeper.property.clientPort"; public static final String ZOOKEEPER_CLIENT_PORT =
ZK_CFG_PROPERTY_PREFIX + CLIENT_PORT_STR;
/** Default client port that the zookeeper listens on */ /** Default client port that the zookeeper listens on */
public static final int DEFAULT_ZOOKEPER_CLIENT_PORT = 2181; public static final int DEFAULT_ZOOKEPER_CLIENT_PORT = 2181;
@ -116,8 +130,16 @@ public final class HConstants {
public static final String DEFAULT_ZOOKEEPER_ZNODE_PARENT = "/hbase"; public static final String DEFAULT_ZOOKEEPER_ZNODE_PARENT = "/hbase";
/** Parameter name for the limit on concurrent client-side zookeeper connections */ /**
public static final String ZOOKEEPER_MAX_CLIENT_CNXNS = "hbase.zookeeper.property.maxClientCnxns"; * Parameter name for the limit on concurrent client-side zookeeper
* connections
*/
public static final String ZOOKEEPER_MAX_CLIENT_CNXNS =
ZK_CFG_PROPERTY_PREFIX + "maxClientCnxns";
/** Parameter name for the ZK data directory */
public static final String ZOOKEEPER_DATA_DIR =
ZK_CFG_PROPERTY_PREFIX + "dataDir";
/** Default limit on concurrent client-side zookeeper connections */ /** Default limit on concurrent client-side zookeeper connections */
public static final int DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS = 30; public static final int DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS = 30;

View File

@ -126,37 +126,6 @@ public class HTable implements HTableInterface, Closeable {
private int operationTimeout; private int operationTimeout;
private static final int DOPUT_WB_CHECK = 10; // i.e., doPut checks the writebuffer every X Puts. private static final int DOPUT_WB_CHECK = 10; // i.e., doPut checks the writebuffer every X Puts.
/**
* Creates an object to access a HBase table.
* Internally it creates a new instance of {@link Configuration} and a new
* client to zookeeper as well as other resources. It also comes up with
* a fresh view of the cluster and must do discovery from scratch of region
* locations; i.e. it will not make use of already-cached region locations if
* available. Use only when being quick and dirty.
* @throws IOException if a remote or network exception occurs
* @see #HTable(Configuration, String)
*/
public HTable(final String tableName)
throws IOException {
this(HBaseConfiguration.create(), Bytes.toBytes(tableName));
}
/**
* Creates an object to access a HBase table.
* Internally it creates a new instance of {@link Configuration} and a new
* client to zookeeper as well as other resources. It also comes up with
* a fresh view of the cluster and must do discovery from scratch of region
* locations; i.e. it will not make use of already-cached region locations if
* available. Use only when being quick and dirty.
* @param tableName Name of the table.
* @throws IOException if a remote or network exception occurs
* @see #HTable(Configuration, String)
*/
public HTable(final byte [] tableName)
throws IOException {
this(HBaseConfiguration.create(), tableName);
}
/** /**
* Creates an object to access a HBase table. * Creates an object to access a HBase table.
* Shares zookeeper connection and other resources with other HTable instances * Shares zookeeper connection and other resources with other HTable instances
@ -237,7 +206,9 @@ public class HTable implements HTableInterface, Closeable {
} }
/** /**
* Tells whether or not a table is enabled or not. * Tells whether or not a table is enabled or not. This method creates a
* new HBase configuration, so it might make your unit tests fail due to
* incorrect ZK client port.
* @param tableName Name of table to check. * @param tableName Name of table to check.
* @return {@code true} if table is online. * @return {@code true} if table is online.
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
@ -249,11 +220,13 @@ public class HTable implements HTableInterface, Closeable {
} }
/** /**
* Tells whether or not a table is enabled or not. * Tells whether or not a table is enabled or not. This method creates a
* new HBase configuration, so it might make your unit tests fail due to
* incorrect ZK client port.
* @param tableName Name of table to check. * @param tableName Name of table to check.
* @return {@code true} if table is online. * @return {@code true} if table is online.
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
* @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])} * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])}
*/ */
@Deprecated @Deprecated
public static boolean isTableEnabled(byte[] tableName) throws IOException { public static boolean isTableEnabled(byte[] tableName) throws IOException {

View File

@ -345,7 +345,7 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
public HTableWrapper(byte[] tableName) throws IOException { public HTableWrapper(byte[] tableName) throws IOException {
this.tableName = tableName; this.tableName = tableName;
this.table = new HTable(tableName); this.table = new HTable(conf, tableName);
openTables.add(this); openTables.add(this);
} }

View File

@ -28,7 +28,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@ -62,6 +61,9 @@ implements Configurable {
*/ */
public static final String QUORUM_ADDRESS = "hbase.mapred.output.quorum"; public static final String QUORUM_ADDRESS = "hbase.mapred.output.quorum";
/** Optional job parameter to specify peer cluster's ZK client port */
public static final String QUORUM_PORT = "hbase.mapred.output.quorum.port";
/** Optional specification of the rs class name of the peer cluster */ /** Optional specification of the rs class name of the peer cluster */
public static final String public static final String
REGION_SERVER_CLASS = "hbase.mapred.output.rs.class"; REGION_SERVER_CLASS = "hbase.mapred.output.rs.class";
@ -182,6 +184,7 @@ implements Configurable {
throw new IllegalArgumentException("Must specify table name"); throw new IllegalArgumentException("Must specify table name");
} }
String address = this.conf.get(QUORUM_ADDRESS); String address = this.conf.get(QUORUM_ADDRESS);
int zkClientPort = conf.getInt(QUORUM_PORT, 0);
String serverClass = this.conf.get(REGION_SERVER_CLASS); String serverClass = this.conf.get(REGION_SERVER_CLASS);
String serverImpl = this.conf.get(REGION_SERVER_IMPL); String serverImpl = this.conf.get(REGION_SERVER_IMPL);
try { try {
@ -192,6 +195,9 @@ implements Configurable {
this.conf.set(HConstants.REGION_SERVER_CLASS, serverClass); this.conf.set(HConstants.REGION_SERVER_CLASS, serverClass);
this.conf.set(HConstants.REGION_SERVER_IMPL, serverImpl); this.conf.set(HConstants.REGION_SERVER_IMPL, serverImpl);
} }
if (zkClientPort != 0) {
conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClientPort);
}
this.table = new HTable(this.conf, tableName); this.table = new HTable(this.conf, tableName);
this.table.setAutoFlush(false); this.table.setAutoFlush(false);
LOG.info("Created table instance for " + tableName); LOG.info("Created table instance for " + tableName);

View File

@ -117,10 +117,11 @@ public class HMasterCommandLine extends ServerCommandLine {
if (LocalHBaseCluster.isLocal(conf)) { if (LocalHBaseCluster.isLocal(conf)) {
final MiniZooKeeperCluster zooKeeperCluster = final MiniZooKeeperCluster zooKeeperCluster =
new MiniZooKeeperCluster(); new MiniZooKeeperCluster();
File zkDataPath = new File(conf.get("hbase.zookeeper.property.dataDir")); File zkDataPath = new File(conf.get(HConstants.ZOOKEEPER_DATA_DIR));
int zkClientPort = conf.getInt("hbase.zookeeper.property.clientPort", 0); int zkClientPort = conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 0);
if (zkClientPort == 0) { if (zkClientPort == 0) {
throw new IOException("No config value for hbase.zookeeper.property.clientPort"); throw new IOException("No config value for "
+ HConstants.ZOOKEEPER_CLIENT_PORT);
} }
zooKeeperCluster.setDefaultClientPort(zkClientPort); zooKeeperCluster.setDefaultClientPort(zkClientPort);
int clientPort = zooKeeperCluster.startup(zkDataPath); int clientPort = zooKeeperCluster.startup(zkDataPath);
@ -131,7 +132,7 @@ public class HMasterCommandLine extends ServerCommandLine {
System.err.println(errorMsg); System.err.println(errorMsg);
throw new IOException(errorMsg); throw new IOException(errorMsg);
} }
conf.set("hbase.zookeeper.property.clientPort", conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
Integer.toString(clientPort)); Integer.toString(clientPort));
// Need to have the zk cluster shutdown when master is shutdown. // Need to have the zk cluster shutdown when master is shutdown.
// Run a subclass that does the zk cluster shutdown on its way out. // Run a subclass that does the zk cluster shutdown on its way out.

View File

@ -23,6 +23,7 @@ import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.*;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.HTablePool; import org.apache.hadoop.hbase.client.HTablePool;
@ -52,7 +53,7 @@ import java.util.concurrent.atomic.AtomicInteger;
public class ThriftHBaseServiceHandler implements THBaseService.Iface { public class ThriftHBaseServiceHandler implements THBaseService.Iface {
// TODO: Size of pool configuraple // TODO: Size of pool configuraple
private final HTablePool htablePool = new HTablePool(); private final HTablePool htablePool;
private static final Log LOG = LogFactory.getLog(ThriftHBaseServiceHandler.class); private static final Log LOG = LogFactory.getLog(ThriftHBaseServiceHandler.class);
// nextScannerId and scannerMap are used to manage scanner state // nextScannerId and scannerMap are used to manage scanner state
@ -60,6 +61,10 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface {
private final AtomicInteger nextScannerId = new AtomicInteger(0); private final AtomicInteger nextScannerId = new AtomicInteger(0);
private final Map<Integer, ResultScanner> scannerMap = new ConcurrentHashMap<Integer, ResultScanner>(); private final Map<Integer, ResultScanner> scannerMap = new ConcurrentHashMap<Integer, ResultScanner>();
public ThriftHBaseServiceHandler(Configuration conf) {
htablePool = new HTablePool(conf, Integer.MAX_VALUE);
}
private HTableInterface getTable(byte[] tableName) { private HTableInterface getTable(byte[] tableName) {
return htablePool.getTable(tableName); return htablePool.getTable(tableName);
} }

View File

@ -29,6 +29,7 @@ import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser; import org.apache.commons.cli.PosixParser;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.thrift2.generated.THBaseService; import org.apache.hadoop.hbase.thrift2.generated.THBaseService;
import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.protocol.TCompactProtocol; import org.apache.thrift.protocol.TCompactProtocol;
@ -196,7 +197,8 @@ public class ThriftServer {
// Construct correct ProtocolFactory // Construct correct ProtocolFactory
TProtocolFactory protocolFactory = getTProtocolFactory(cmd.hasOption("compact")); TProtocolFactory protocolFactory = getTProtocolFactory(cmd.hasOption("compact"));
THBaseService.Iface handler = new ThriftHBaseServiceHandler(); THBaseService.Iface handler = new ThriftHBaseServiceHandler(
HBaseConfiguration.create());
THBaseService.Processor processor = new THBaseService.Processor(handler); THBaseService.Processor processor = new THBaseService.Processor(handler);
boolean framed = cmd.hasOption("framed") || nonblocking || hsha; boolean framed = cmd.hasOption("framed") || nonblocking || hsha;

View File

@ -430,7 +430,7 @@ public class FSTableDescriptors implements TableDescriptors {
FileSystem fs = FSUtils.getCurrentFileSystem(conf); FileSystem fs = FSUtils.getCurrentFileSystem(conf);
FileStatus status = getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName); FileStatus status = getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName);
// The below deleteDirectory works for either file or directory. // The below deleteDirectory works for either file or directory.
if (status != null && fs.exists(status.getPath())) { if (status != null && fs.exists(status.getPath())) {
FSUtils.deleteDirectory(fs, status.getPath()); FSUtils.deleteDirectory(fs, status.getPath());
} }
} }

View File

@ -373,7 +373,7 @@ public class RegionSplitter {
if (!conf.getBoolean("split.verify", true)) { if (!conf.getBoolean("split.verify", true)) {
// NOTE: createTable is synchronous on the table, but not on the regions // NOTE: createTable is synchronous on the table, but not on the regions
HTable table = new HTable(tableName); HTable table = new HTable(conf, tableName);
int onlineRegions = 0; int onlineRegions = 0;
while (onlineRegions < splitCount) { while (onlineRegions < splitCount) {
onlineRegions = table.getRegionsInfo().size(); onlineRegions = table.getRegionsInfo().size();

View File

@ -30,6 +30,7 @@ import java.net.InetSocketAddress;
import java.net.Socket; import java.net.Socket;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -51,8 +52,10 @@ public class MiniZooKeeperCluster {
private boolean started; private boolean started;
private int defaultClientPort = 21818; // use non-standard port /** The default port. If zero, we use a random port. */
private int clientPort = defaultClientPort; private int defaultClientPort = 0;
private int clientPort;
private List<NIOServerCnxn.Factory> standaloneServerFactoryList; private List<NIOServerCnxn.Factory> standaloneServerFactoryList;
private List<ZooKeeperServer> zooKeeperServers; private List<ZooKeeperServer> zooKeeperServers;
@ -71,11 +74,24 @@ public class MiniZooKeeperCluster {
} }
public void setDefaultClientPort(int clientPort) { public void setDefaultClientPort(int clientPort) {
if (clientPort <= 0) {
throw new IllegalArgumentException("Invalid default ZK client port: "
+ clientPort);
}
this.defaultClientPort = clientPort; this.defaultClientPort = clientPort;
} }
public int getDefaultClientPort() { /**
return defaultClientPort; * Selects a ZK client port. Returns the default port if specified.
* Otherwise, returns a random port. The random port is selected from the
* range between 49152 to 65535. These ports cannot be registered with IANA
* and are intended for dynamic allocation (see http://bit.ly/dynports).
*/
private int selectClientPort() {
if (defaultClientPort > 0) {
return defaultClientPort;
}
return 0xc000 + new Random().nextInt(0x3f00);
} }
public void setTickTime(int tickTime) { public void setTickTime(int tickTime) {
@ -100,8 +116,7 @@ public class MiniZooKeeperCluster {
FileTxnLog.setPreallocSize(100); FileTxnLog.setPreallocSize(100);
} }
public int startup(File baseDir) throws IOException, public int startup(File baseDir) throws IOException, InterruptedException {
InterruptedException {
return startup(baseDir,1); return startup(baseDir,1);
} }
@ -120,11 +135,12 @@ public class MiniZooKeeperCluster {
setupTestEnv(); setupTestEnv();
shutdown(); shutdown();
int tentativePort = selectClientPort();
// running all the ZK servers // running all the ZK servers
for (int i = 0; i < numZooKeeperServers; i++) { for (int i = 0; i < numZooKeeperServers; i++) {
File dir = new File(baseDir, "zookeeper_"+i).getAbsoluteFile(); File dir = new File(baseDir, "zookeeper_"+i).getAbsoluteFile();
recreateDir(dir); recreateDir(dir);
clientPort = defaultClientPort;
int tickTimeToUse; int tickTimeToUse;
if (this.tickTime > 0) { if (this.tickTime > 0) {
tickTimeToUse = this.tickTime; tickTimeToUse = this.tickTime;
@ -135,12 +151,13 @@ public class MiniZooKeeperCluster {
NIOServerCnxn.Factory standaloneServerFactory; NIOServerCnxn.Factory standaloneServerFactory;
while (true) { while (true) {
try { try {
standaloneServerFactory = standaloneServerFactory = new NIOServerCnxn.Factory(
new NIOServerCnxn.Factory(new InetSocketAddress(clientPort)); new InetSocketAddress(tentativePort));
} catch (BindException e) { } catch (BindException e) {
LOG.info("Failed binding ZK Server to client port: " + clientPort); LOG.debug("Failed binding ZK Server to client port: " +
//this port is already in use. try to use another tentativePort);
clientPort++; // This port is already in use, try to use another.
tentativePort++;
continue; continue;
} }
break; break;
@ -148,11 +165,12 @@ public class MiniZooKeeperCluster {
// Start up this ZK server // Start up this ZK server
standaloneServerFactory.startup(server); standaloneServerFactory.startup(server);
if (!waitForServerUp(clientPort, CONNECTION_TIMEOUT)) { if (!waitForServerUp(tentativePort, CONNECTION_TIMEOUT)) {
throw new IOException("Waiting for startup of standalone server"); throw new IOException("Waiting for startup of standalone server");
} }
clientPortList.add(clientPort); // We have selected this port as a client port.
clientPortList.add(tentativePort);
standaloneServerFactoryList.add(standaloneServerFactory); standaloneServerFactoryList.add(standaloneServerFactory);
zooKeeperServers.add(server); zooKeeperServers.add(server);
} }
@ -162,7 +180,7 @@ public class MiniZooKeeperCluster {
started = true; started = true;
clientPort = clientPortList.get(activeZKServerIndex); clientPort = clientPortList.get(activeZKServerIndex);
LOG.info("Started MiniZK Cluster and connect 1 ZK server " + LOG.info("Started MiniZK Cluster and connect 1 ZK server " +
"on client port: " + clientPort); "on client port: " + clientPort);
return clientPort; return clientPort;
} }
@ -345,4 +363,8 @@ public class MiniZooKeeperCluster {
} }
return false; return false;
} }
public int getClientPort() {
return clientPort;
}
} }

View File

@ -45,10 +45,6 @@ public class ZKConfig {
private static final String VARIABLE_END = "}"; private static final String VARIABLE_END = "}";
private static final int VARIABLE_END_LENGTH = VARIABLE_END.length(); private static final int VARIABLE_END_LENGTH = VARIABLE_END.length();
private static final String ZK_CFG_PROPERTY = "hbase.zookeeper.property.";
private static final int ZK_CFG_PROPERTY_SIZE = ZK_CFG_PROPERTY.length();
private static final String ZK_CLIENT_PORT_KEY = "clientPort";
/** /**
* Make a Properties object holding ZooKeeper config equivalent to zoo.cfg. * Make a Properties object holding ZooKeeper config equivalent to zoo.cfg.
* If there is a zoo.cfg in the classpath, simply read it in. Otherwise parse * If there is a zoo.cfg in the classpath, simply read it in. Otherwise parse
@ -78,8 +74,8 @@ public class ZKConfig {
// Directly map all of the hbase.zookeeper.property.KEY properties. // Directly map all of the hbase.zookeeper.property.KEY properties.
for (Entry<String, String> entry : conf) { for (Entry<String, String> entry : conf) {
String key = entry.getKey(); String key = entry.getKey();
if (key.startsWith(ZK_CFG_PROPERTY)) { if (key.startsWith(HConstants.ZK_CFG_PROPERTY_PREFIX)) {
String zkKey = key.substring(ZK_CFG_PROPERTY_SIZE); String zkKey = key.substring(HConstants.ZK_CFG_PROPERTY_PREFIX_LEN);
String value = entry.getValue(); String value = entry.getValue();
// If the value has variables substitutions, need to do a get. // If the value has variables substitutions, need to do a get.
if (value.contains(VARIABLE_START)) { if (value.contains(VARIABLE_START)) {
@ -89,10 +85,10 @@ public class ZKConfig {
} }
} }
// If clientPort is not set, assign the default // If clientPort is not set, assign the default.
if (zkProperties.getProperty(ZK_CLIENT_PORT_KEY) == null) { if (zkProperties.getProperty(HConstants.CLIENT_PORT_STR) == null) {
zkProperties.put(ZK_CLIENT_PORT_KEY, zkProperties.put(HConstants.CLIENT_PORT_STR,
HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT); HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT);
} }
// Create the server.X properties. // Create the server.X properties.

View File

@ -183,7 +183,7 @@ public class ZKUtil {
throws IOException{ throws IOException{
String[] parts = transformClusterKey(key); String[] parts = transformClusterKey(key);
conf.set(HConstants.ZOOKEEPER_QUORUM, parts[0]); conf.set(HConstants.ZOOKEEPER_QUORUM, parts[0]);
conf.set("hbase.zookeeper.property.clientPort", parts[1]); conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, parts[1]);
conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parts[2]); conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parts[2]);
} }

View File

@ -413,7 +413,7 @@ public class HBaseTestingUtility {
this.passedZkCluster = false; this.passedZkCluster = false;
this.zkCluster = new MiniZooKeeperCluster(); this.zkCluster = new MiniZooKeeperCluster();
int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum); int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
this.conf.set("hbase.zookeeper.property.clientPort", this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
Integer.toString(clientPort)); Integer.toString(clientPort));
return this.zkCluster; return this.zkCluster;
} }
@ -1308,6 +1308,7 @@ public class HBaseTestingUtility {
public void setZkCluster(MiniZooKeeperCluster zkCluster) { public void setZkCluster(MiniZooKeeperCluster zkCluster) {
this.passedZkCluster = true; this.passedZkCluster = true;
this.zkCluster = zkCluster; this.zkCluster = zkCluster;
conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkCluster.getClientPort());
} }
public MiniDFSCluster getDFSCluster() { public MiniDFSCluster getDFSCluster() {
@ -1599,4 +1600,12 @@ public class HBaseTestingUtility {
return "<out_of_range>"; return "<out_of_range>";
} }
} }
public String getClusterKey() {
return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
+ conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":"
+ conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
}
} }

View File

@ -1137,7 +1137,7 @@ public class PerformanceEvaluation {
// just started up // just started up
FileSystem fs = dfsCluster.getFileSystem(); FileSystem fs = dfsCluster.getFileSystem();
conf.set("fs.default.name", fs.getUri().toString()); conf.set("fs.default.name", fs.getUri().toString());
conf.set("hbase.zookeeper.property.clientPort", Integer.toString(zooKeeperPort)); conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(zooKeeperPort));
Path parentdir = fs.getHomeDirectory(); Path parentdir = fs.getHomeDirectory();
conf.set(HConstants.HBASE_DIR, parentdir.toString()); conf.set(HConstants.HBASE_DIR, parentdir.toString());
fs.mkdirs(parentdir); fs.mkdirs(parentdir);

View File

@ -69,8 +69,8 @@ public class TestHBaseTestingUtility {
// Cluster 2 // Cluster 2
HBaseTestingUtility htu2 = new HBaseTestingUtility(); HBaseTestingUtility htu2 = new HBaseTestingUtility();
htu2.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); htu2.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
htu2.getConfiguration().set("hbase.zookeeper.property.clientPort", htu2.getConfiguration().set(HConstants.ZOOKEEPER_CLIENT_PORT,
htu1.getConfiguration().get("hbase.zookeeper.property.clientPort", "-1")); htu1.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT, "-1"));
htu2.setZkCluster(htu1.getZkCluster()); htu2.setZkCluster(htu1.getZkCluster());
// Cluster 3; seed it with the conf from htu1 so we pickup the 'right' // Cluster 3; seed it with the conf from htu1 so we pickup the 'right'
@ -78,8 +78,8 @@ public class TestHBaseTestingUtility {
// start of minizkcluster. // start of minizkcluster.
HBaseTestingUtility htu3 = new HBaseTestingUtility(); HBaseTestingUtility htu3 = new HBaseTestingUtility();
htu3.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/3"); htu3.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/3");
htu3.getConfiguration().set("hbase.zookeeper.property.clientPort", htu3.getConfiguration().set(HConstants.ZOOKEEPER_CLIENT_PORT,
htu1.getConfiguration().get("hbase.zookeeper.property.clientPort", "-1")); htu1.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT, "-1"));
htu3.setZkCluster(htu1.getZkCluster()); htu3.setZkCluster(htu1.getZkCluster());
try { try {

View File

@ -235,7 +235,7 @@ public class TestZooKeeper {
assertEquals(znode, parts[2]); assertEquals(znode, parts[2]);
ZKUtil.applyClusterKeyToConf(conf, key); ZKUtil.applyClusterKeyToConf(conf, key);
assertEquals(parts[0], conf.get(HConstants.ZOOKEEPER_QUORUM)); assertEquals(parts[0], conf.get(HConstants.ZOOKEEPER_QUORUM));
assertEquals(parts[1], conf.get("hbase.zookeeper.property.clientPort")); assertEquals(parts[1], conf.get(HConstants.ZOOKEEPER_CLIENT_PORT));
assertEquals(parts[2], conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); assertEquals(parts[2], conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT));
String reconstructedKey = ZKUtil.getZooKeeperClusterKey(conf); String reconstructedKey = ZKUtil.getZooKeeperClusterKey(conf);
assertEquals(key, reconstructedKey); assertEquals(key, reconstructedKey);

View File

@ -21,9 +21,6 @@
package org.apache.hadoop.hbase.coprocessor; package org.apache.hadoop.hbase.coprocessor;
import java.io.IOException; import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.*;
@ -31,10 +28,7 @@ import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
@ -75,7 +69,8 @@ public class TestRegionServerCoprocessorExceptionWithAbort {
byte[] TEST_FAMILY = Bytes.toBytes("aaa"); byte[] TEST_FAMILY = Bytes.toBytes("aaa");
HTable table = TEST_UTIL.createTable(TEST_TABLE, TEST_FAMILY); HTable table = TEST_UTIL.createTable(TEST_TABLE, TEST_FAMILY);
TEST_UTIL.createMultiRegions(table, TEST_FAMILY); TEST_UTIL.waitUntilAllRegionsAssigned(
TEST_UTIL.createMultiRegions(table, TEST_FAMILY));
// Note which regionServer will abort (after put is attempted). // Note which regionServer will abort (after put is attempted).
HRegionServer regionServer = HRegionServer regionServer =

View File

@ -21,9 +21,6 @@
package org.apache.hadoop.hbase.coprocessor; package org.apache.hadoop.hbase.coprocessor;
import java.io.IOException; import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.*;
@ -32,10 +29,7 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
@ -51,6 +45,7 @@ import static org.junit.Assert.*;
*/ */
public class TestRegionServerCoprocessorExceptionWithRemove { public class TestRegionServerCoprocessorExceptionWithRemove {
public static class BuggyRegionObserver extends SimpleRegionObserver { public static class BuggyRegionObserver extends SimpleRegionObserver {
@SuppressWarnings("null")
@Override @Override
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> c, public void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Put put, final WALEdit edit,
@ -66,8 +61,6 @@ public class TestRegionServerCoprocessorExceptionWithRemove {
private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static ZooKeeperWatcher zkw = null;
@BeforeClass @BeforeClass
public static void setupBeforeClass() throws Exception { public static void setupBeforeClass() throws Exception {
// set configure to indicate which cp should be loaded // set configure to indicate which cp should be loaded
@ -97,7 +90,8 @@ public class TestRegionServerCoprocessorExceptionWithRemove {
byte[] TEST_FAMILY = Bytes.toBytes("aaa"); byte[] TEST_FAMILY = Bytes.toBytes("aaa");
HTable table = TEST_UTIL.createTable(TEST_TABLE, TEST_FAMILY); HTable table = TEST_UTIL.createTable(TEST_TABLE, TEST_FAMILY);
TEST_UTIL.createMultiRegions(table, TEST_FAMILY); TEST_UTIL.waitUntilAllRegionsAssigned(
TEST_UTIL.createMultiRegions(table, TEST_FAMILY));
// Note which regionServer that should survive the buggy coprocessor's // Note which regionServer that should survive the buggy coprocessor's
// prePut(). // prePut().
HRegionServer regionServer = HRegionServer regionServer =

View File

@ -25,7 +25,6 @@ import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -48,30 +47,27 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.After; import org.junit.After;
import org.junit.AfterClass; import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
public class TestMasterReplication { public class TestMasterReplication {
private static final Log LOG = LogFactory.getLog(TestReplication.class); private static final Log LOG = LogFactory.getLog(TestReplication.class);
private static Configuration conf1; private Configuration conf1;
private static Configuration conf2; private Configuration conf2;
private static Configuration conf3; private Configuration conf3;
private static String clusterKey1; private HBaseTestingUtility utility1;
private static String clusterKey2; private HBaseTestingUtility utility2;
private static String clusterKey3; private HBaseTestingUtility utility3;
private MiniZooKeeperCluster miniZK;
private static HBaseTestingUtility utility1;
private static HBaseTestingUtility utility2;
private static HBaseTestingUtility utility3;
private static final long SLEEP_TIME = 500; private static final long SLEEP_TIME = 500;
private static final int NB_RETRIES = 10; private static final int NB_RETRIES = 10;
@ -86,10 +82,10 @@ public class TestMasterReplication {
private static final byte[] put = Bytes.toBytes("put"); private static final byte[] put = Bytes.toBytes("put");
private static final byte[] delete = Bytes.toBytes("delete"); private static final byte[] delete = Bytes.toBytes("delete");
private static HTableDescriptor table; private HTableDescriptor table;
@BeforeClass @Before
public static void setUpBeforeClass() throws Exception { public void setUp() throws Exception {
conf1 = HBaseConfiguration.create(); conf1 = HBaseConfiguration.create();
conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
// smaller block size and capacity to trigger more operations // smaller block size and capacity to trigger more operations
@ -103,36 +99,31 @@ public class TestMasterReplication {
conf1.setBoolean("dfs.support.append", true); conf1.setBoolean("dfs.support.append", true);
conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
"org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter"); CoprocessorCounter.class.getName());
utility1 = new HBaseTestingUtility(conf1); utility1 = new HBaseTestingUtility(conf1);
utility1.startMiniZKCluster(); utility1.startMiniZKCluster();
MiniZooKeeperCluster miniZK = utility1.getZkCluster(); miniZK = utility1.getZkCluster();
// By setting the mini ZK cluster through this method, even though this is
// already utility1's mini ZK cluster, we are telling utility1 not to shut
// the mini ZK cluster when we shut down the HBase cluster.
utility1.setZkCluster(miniZK);
new ZooKeeperWatcher(conf1, "cluster1", null, true); new ZooKeeperWatcher(conf1, "cluster1", null, true);
conf2 = new Configuration(conf1); conf2 = new Configuration(conf1);
conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
conf3 = new Configuration(conf1);
conf3.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/3");
utility2 = new HBaseTestingUtility(conf2); utility2 = new HBaseTestingUtility(conf2);
utility2.setZkCluster(miniZK); utility2.setZkCluster(miniZK);
new ZooKeeperWatcher(conf2, "cluster3", null, true); new ZooKeeperWatcher(conf2, "cluster2", null, true);
conf3 = new Configuration(conf1);
conf3.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/3");
utility3 = new HBaseTestingUtility(conf3); utility3 = new HBaseTestingUtility(conf3);
utility3.setZkCluster(miniZK); utility3.setZkCluster(miniZK);
new ZooKeeperWatcher(conf3, "cluster3", null, true); new ZooKeeperWatcher(conf3, "cluster3", null, true);
clusterKey1 = conf1.get(HConstants.ZOOKEEPER_QUORUM)+":" +
conf1.get("hbase.zookeeper.property.clientPort")+":/1";
clusterKey2 = conf2.get(HConstants.ZOOKEEPER_QUORUM)+":" +
conf2.get("hbase.zookeeper.property.clientPort")+":/2";
clusterKey3 = conf3.get(HConstants.ZOOKEEPER_QUORUM)+":" +
conf3.get("hbase.zookeeper.property.clientPort")+":/3";
table = new HTableDescriptor(tableName); table = new HTableDescriptor(tableName);
HColumnDescriptor fam = new HColumnDescriptor(famName); HColumnDescriptor fam = new HColumnDescriptor(famName);
fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
@ -141,6 +132,11 @@ public class TestMasterReplication {
table.addFamily(fam); table.addFamily(fam);
} }
@After
public void tearDown() throws IOException {
miniZK.shutdown();
}
@Test(timeout=300000) @Test(timeout=300000)
public void testCyclicReplication() throws Exception { public void testCyclicReplication() throws Exception {
LOG.info("testCyclicReplication"); LOG.info("testCyclicReplication");
@ -161,9 +157,9 @@ public class TestMasterReplication {
HTable htable3 = new HTable(conf3, tableName); HTable htable3 = new HTable(conf3, tableName);
htable3.setWriteBufferSize(1024); htable3.setWriteBufferSize(1024);
admin1.addPeer("1", clusterKey2); admin1.addPeer("1", utility2.getClusterKey());
admin2.addPeer("1", clusterKey3); admin2.addPeer("1", utility3.getClusterKey());
admin3.addPeer("1", clusterKey1); admin3.addPeer("1", utility1.getClusterKey());
// put "row" and wait 'til it got around // put "row" and wait 'til it got around
putAndWait(row, famName, htable1, htable3); putAndWait(row, famName, htable1, htable3);
@ -213,8 +209,8 @@ public class TestMasterReplication {
htable2.setWriteBufferSize(1024); htable2.setWriteBufferSize(1024);
// set M-M // set M-M
admin1.addPeer("1", clusterKey2); admin1.addPeer("1", utility2.getClusterKey());
admin2.addPeer("1", clusterKey1); admin2.addPeer("1", utility1.getClusterKey());
// add rows to both clusters, // add rows to both clusters,
// make sure they are both replication // make sure they are both replication

View File

@ -56,9 +56,6 @@ public class TestMultiSlaveReplication {
private static Configuration conf2; private static Configuration conf2;
private static Configuration conf3; private static Configuration conf3;
private static String clusterKey2;
private static String clusterKey3;
private static HBaseTestingUtility utility1; private static HBaseTestingUtility utility1;
private static HBaseTestingUtility utility2; private static HBaseTestingUtility utility2;
private static HBaseTestingUtility utility3; private static HBaseTestingUtility utility3;
@ -111,12 +108,6 @@ public class TestMultiSlaveReplication {
utility3.setZkCluster(miniZK); utility3.setZkCluster(miniZK);
new ZooKeeperWatcher(conf3, "cluster3", null, true); new ZooKeeperWatcher(conf3, "cluster3", null, true);
clusterKey2 = conf2.get(HConstants.ZOOKEEPER_QUORUM)+":" +
conf2.get("hbase.zookeeper.property.clientPort")+":/2";
clusterKey3 = conf3.get(HConstants.ZOOKEEPER_QUORUM)+":" +
conf3.get("hbase.zookeeper.property.clientPort")+":/3";
table = new HTableDescriptor(tableName); table = new HTableDescriptor(tableName);
HColumnDescriptor fam = new HColumnDescriptor(famName); HColumnDescriptor fam = new HColumnDescriptor(famName);
fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
@ -143,7 +134,7 @@ public class TestMultiSlaveReplication {
HTable htable3 = new HTable(conf3, tableName); HTable htable3 = new HTable(conf3, tableName);
htable3.setWriteBufferSize(1024); htable3.setWriteBufferSize(1024);
admin1.addPeer("1", clusterKey2); admin1.addPeer("1", utility2.getClusterKey());
// put "row" and wait 'til it got around, then delete // put "row" and wait 'til it got around, then delete
putAndWait(row, famName, htable1, htable2); putAndWait(row, famName, htable1, htable2);
@ -158,7 +149,7 @@ public class TestMultiSlaveReplication {
// after the log was rolled put a new row // after the log was rolled put a new row
putAndWait(row3, famName, htable1, htable2); putAndWait(row3, famName, htable1, htable2);
admin1.addPeer("2", clusterKey3); admin1.addPeer("2", utility3.getClusterKey());
// put a row, check it was replicated to all clusters // put a row, check it was replicated to all clusters
putAndWait(row1, famName, htable1, htable2, htable3); putAndWait(row1, famName, htable1, htable2, htable3);

View File

@ -65,7 +65,6 @@ public class TestReplication {
private static ZooKeeperWatcher zkw2; private static ZooKeeperWatcher zkw2;
private static ReplicationAdmin admin; private static ReplicationAdmin admin;
private static String slaveClusterKey;
private static HTable htable1; private static HTable htable1;
private static HTable htable2; private static HTable htable2;
@ -122,9 +121,7 @@ public class TestReplication {
utility2.setZkCluster(miniZK); utility2.setZkCluster(miniZK);
zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true); zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true);
slaveClusterKey = conf2.get(HConstants.ZOOKEEPER_QUORUM)+":" + admin.addPeer("2", utility2.getClusterKey());
conf2.get("hbase.zookeeper.property.clientPort")+":/2";
admin.addPeer("2", slaveClusterKey);
setIsReplication(true); setIsReplication(true);
LOG.info("Setup second Zk"); LOG.info("Setup second Zk");
@ -389,7 +386,7 @@ public class TestReplication {
} }
} }
admin.addPeer("2", slaveClusterKey); admin.addPeer("2", utility2.getClusterKey());
Thread.sleep(SLEEP_TIME); Thread.sleep(SLEEP_TIME);
rowKey = Bytes.toBytes("do rep"); rowKey = Bytes.toBytes("do rep");
put = new Put(rowKey); put = new Put(rowKey);

View File

@ -103,9 +103,9 @@ public class TestReplicationSourceManager {
zkw = new ZooKeeperWatcher(conf, "test", null); zkw = new ZooKeeperWatcher(conf, "test", null);
ZKUtil.createWithParents(zkw, "/hbase/replication"); ZKUtil.createWithParents(zkw, "/hbase/replication");
ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1"); ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1");
ZKUtil.setData(zkw, "/hbase/replication/peers/1",Bytes.toBytes( ZKUtil.setData(zkw, "/hbase/replication/peers/1",
conf.get(HConstants.ZOOKEEPER_QUORUM)+":" + Bytes.toBytes(conf.get(HConstants.ZOOKEEPER_QUORUM) + ":"
conf.get("hbase.zookeeper.property.clientPort")+":/1")); + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":/1"));
ZKUtil.createWithParents(zkw, "/hbase/replication/state"); ZKUtil.createWithParents(zkw, "/hbase/replication/state");
ZKUtil.setData(zkw, "/hbase/replication/state", Bytes.toBytes("true")); ZKUtil.setData(zkw, "/hbase/replication/state", Bytes.toBytes("true"));

View File

@ -113,9 +113,13 @@ public class TestThriftHBaseServiceHandler {
} }
private ThriftHBaseServiceHandler createHandler() {
return new ThriftHBaseServiceHandler(UTIL.getConfiguration());
}
@Test @Test
public void testExists() throws TIOError, TException { public void testExists() throws TIOError, TException {
ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(); ThriftHBaseServiceHandler handler = createHandler();
byte[] rowName = "testExists".getBytes(); byte[] rowName = "testExists".getBytes();
ByteBuffer table = ByteBuffer.wrap(tableAname); ByteBuffer table = ByteBuffer.wrap(tableAname);
@ -137,7 +141,7 @@ public class TestThriftHBaseServiceHandler {
@Test @Test
public void testPutGet() throws Exception { public void testPutGet() throws Exception {
ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(); ThriftHBaseServiceHandler handler = createHandler();
byte[] rowName = "testPutGet".getBytes(); byte[] rowName = "testPutGet".getBytes();
ByteBuffer table = ByteBuffer.wrap(tableAname); ByteBuffer table = ByteBuffer.wrap(tableAname);
@ -162,7 +166,7 @@ public class TestThriftHBaseServiceHandler {
@Test @Test
public void testPutGetMultiple() throws Exception { public void testPutGetMultiple() throws Exception {
ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(); ThriftHBaseServiceHandler handler = createHandler();
ByteBuffer table = ByteBuffer.wrap(tableAname); ByteBuffer table = ByteBuffer.wrap(tableAname);
byte[] rowName1 = "testPutGetMultiple1".getBytes(); byte[] rowName1 = "testPutGetMultiple1".getBytes();
byte[] rowName2 = "testPutGetMultiple2".getBytes(); byte[] rowName2 = "testPutGetMultiple2".getBytes();
@ -194,7 +198,7 @@ public class TestThriftHBaseServiceHandler {
@Test @Test
public void testDeleteMultiple() throws Exception { public void testDeleteMultiple() throws Exception {
ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(); ThriftHBaseServiceHandler handler = createHandler();
ByteBuffer table = ByteBuffer.wrap(tableAname); ByteBuffer table = ByteBuffer.wrap(tableAname);
byte[] rowName1 = "testDeleteMultiple1".getBytes(); byte[] rowName1 = "testDeleteMultiple1".getBytes();
byte[] rowName2 = "testDeleteMultiple2".getBytes(); byte[] rowName2 = "testDeleteMultiple2".getBytes();
@ -224,7 +228,7 @@ public class TestThriftHBaseServiceHandler {
@Test @Test
public void testDelete() throws Exception { public void testDelete() throws Exception {
ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(); ThriftHBaseServiceHandler handler = createHandler();
byte[] rowName = "testDelete".getBytes(); byte[] rowName = "testDelete".getBytes();
ByteBuffer table = ByteBuffer.wrap(tableAname); ByteBuffer table = ByteBuffer.wrap(tableAname);
@ -261,7 +265,7 @@ public class TestThriftHBaseServiceHandler {
@Test @Test
public void testDeleteAllTimestamps() throws Exception { public void testDeleteAllTimestamps() throws Exception {
ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(); ThriftHBaseServiceHandler handler = createHandler();
byte[] rowName = "testDeleteAllTimestamps".getBytes(); byte[] rowName = "testDeleteAllTimestamps".getBytes();
ByteBuffer table = ByteBuffer.wrap(tableAname); ByteBuffer table = ByteBuffer.wrap(tableAname);
@ -301,7 +305,7 @@ public class TestThriftHBaseServiceHandler {
@Test @Test
public void testDeleteSingleTimestamp() throws Exception { public void testDeleteSingleTimestamp() throws Exception {
ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(); ThriftHBaseServiceHandler handler = createHandler();
byte[] rowName = "testDeleteSingleTimestamp".getBytes(); byte[] rowName = "testDeleteSingleTimestamp".getBytes();
ByteBuffer table = ByteBuffer.wrap(tableAname); ByteBuffer table = ByteBuffer.wrap(tableAname);
@ -346,7 +350,7 @@ public class TestThriftHBaseServiceHandler {
@Test @Test
public void testIncrement() throws Exception { public void testIncrement() throws Exception {
ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(); ThriftHBaseServiceHandler handler = createHandler();
byte[] rowName = "testIncrement".getBytes(); byte[] rowName = "testIncrement".getBytes();
ByteBuffer table = ByteBuffer.wrap(tableAname); ByteBuffer table = ByteBuffer.wrap(tableAname);
@ -379,7 +383,7 @@ public class TestThriftHBaseServiceHandler {
*/ */
@Test @Test
public void testCheckAndPut() throws Exception { public void testCheckAndPut() throws Exception {
ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(); ThriftHBaseServiceHandler handler = createHandler();
byte[] rowName = "testCheckAndPut".getBytes(); byte[] rowName = "testCheckAndPut".getBytes();
ByteBuffer table = ByteBuffer.wrap(tableAname); ByteBuffer table = ByteBuffer.wrap(tableAname);
@ -426,7 +430,7 @@ public class TestThriftHBaseServiceHandler {
*/ */
@Test @Test
public void testCheckAndDelete() throws Exception { public void testCheckAndDelete() throws Exception {
ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(); ThriftHBaseServiceHandler handler = createHandler();
byte[] rowName = "testCheckAndDelete".getBytes(); byte[] rowName = "testCheckAndDelete".getBytes();
ByteBuffer table = ByteBuffer.wrap(tableAname); ByteBuffer table = ByteBuffer.wrap(tableAname);
@ -469,7 +473,7 @@ public class TestThriftHBaseServiceHandler {
@Test @Test
public void testScan() throws Exception { public void testScan() throws Exception {
ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(); ThriftHBaseServiceHandler handler = createHandler();
ByteBuffer table = ByteBuffer.wrap(tableAname); ByteBuffer table = ByteBuffer.wrap(tableAname);
TScan scan = new TScan(); TScan scan = new TScan();

View File

@ -60,7 +60,7 @@ public class TestMergeTool extends HBaseTestCase {
// find a zk ensemble put up by another concurrent test and this will // find a zk ensemble put up by another concurrent test and this will
// mess up this test. Choose unlikely port. Default test port is 21818. // mess up this test. Choose unlikely port. Default test port is 21818.
// Default zk port is 2181. // Default zk port is 2181.
this.conf.setInt("hbase.zookeeper.property.clientPort", 10001); this.conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, 10001);
this.conf.set("hbase.hstore.compactionThreshold", "2"); this.conf.set("hbase.hstore.compactionThreshold", "2");

View File

@ -336,18 +336,4 @@ public class TestRegionSplitter {
} }
return -1; return -1;
} }
/**
* Inserts some meaningless data into a CF so the regions can be split.
*/
static void insertSomeData(String table) throws IOException {
HTable hTable = new HTable(table);
for(byte b=Byte.MIN_VALUE; b<Byte.MAX_VALUE; b++) {
byte[] whateverBytes = new byte[] {b};
Put p = new Put(whateverBytes);
p.setWriteToWAL(false);
p.add(CF_NAME.getBytes(), whateverBytes, whateverBytes);
hTable.put(p);
}
}
} }

View File

@ -50,8 +50,8 @@ public class TestHQuorumPeer {
@Before public void setup() throws IOException { @Before public void setup() throws IOException {
// Set it to a non-standard port. // Set it to a non-standard port.
TEST_UTIL.getConfiguration().setInt("hbase.zookeeper.property.clientPort", TEST_UTIL.getConfiguration().setInt(HConstants.ZOOKEEPER_CLIENT_PORT,
PORT_NO); PORT_NO);
this.dataDir = TEST_UTIL.getDataTestDir(this.getClass().getName()); this.dataDir = TEST_UTIL.getDataTestDir(this.getClass().getName());
FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
if (fs.exists(this.dataDir)) { if (fs.exists(this.dataDir)) {
@ -66,7 +66,7 @@ public class TestHQuorumPeer {
@Test public void testMakeZKProps() { @Test public void testMakeZKProps() {
Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
conf.set("hbase.zookeeper.property.dataDir", this.dataDir.toString()); conf.set(HConstants.ZOOKEEPER_DATA_DIR, this.dataDir.toString());
Properties properties = ZKConfig.makeZKProps(conf); Properties properties = ZKConfig.makeZKProps(conf);
assertEquals(dataDir.toString(), (String)properties.get("dataDir")); assertEquals(dataDir.toString(), (String)properties.get("dataDir"));
assertEquals(Integer.valueOf(PORT_NO), assertEquals(Integer.valueOf(PORT_NO),

View File

@ -24,6 +24,7 @@ import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.junit.Test; import org.junit.Test;
@ -32,10 +33,10 @@ public class TestZooKeeperMainServerArg {
@Test public void test() { @Test public void test() {
Configuration c = HBaseConfiguration.create(); Configuration c = HBaseConfiguration.create();
assertEquals("localhost:" + c.get("hbase.zookeeper.property.clientPort"), assertEquals("localhost:" + c.get(HConstants.ZOOKEEPER_CLIENT_PORT),
parser.parse(c)); parser.parse(c));
final String port = "1234"; final String port = "1234";
c.set("hbase.zookeeper.property.clientPort", port); c.set(HConstants.ZOOKEEPER_CLIENT_PORT, port);
c.set("hbase.zookeeper.quorum", "example.com"); c.set("hbase.zookeeper.quorum", "example.com");
assertEquals("example.com:" + port, parser.parse(c)); assertEquals("example.com:" + port, parser.parse(c));
c.set("hbase.zookeeper.quorum", "example1.com,example2.com,example3.com"); c.set("hbase.zookeeper.quorum", "example1.com,example2.com,example3.com");