diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 55b927f7eee..d935a08a533 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.TagUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Consistency;
@@ -79,6 +78,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.hadoop.hbase.util.Bytes;
@@ -1839,4 +1839,4 @@ public final class ProtobufUtil {
int port = Addressing.parsePort(str);
return ServerName.valueOf(hostname, port, -1L);
}
-}
\ No newline at end of file
+}
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index f83df33df8e..75199a61dc3 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -93,9 +93,9 @@ public class TestAsyncProcess {
private static final byte[] FAILS = "FAILS".getBytes();
private static final Configuration CONF = new Configuration();
private static final ConnectionConfiguration CONNECTION_CONFIG = new ConnectionConfiguration(CONF);
- private static final ServerName sn = ServerName.valueOf("s1:1,1");
- private static final ServerName sn2 = ServerName.valueOf("s2:2,2");
- private static final ServerName sn3 = ServerName.valueOf("s3:3,3");
+ private static final ServerName sn = ServerName.valueOf("s1,1,1");
+ private static final ServerName sn2 = ServerName.valueOf("s2,2,2");
+ private static final ServerName sn3 = ServerName.valueOf("s3,3,3");
private static final HRegionInfo hri1 =
new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
private static final HRegionInfo hri2 =
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java
index c0ece3ca15a..531fd0fe151 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java
@@ -54,8 +54,8 @@ public class TestSimpleRequestController {
private static final byte[] DUMMY_BYTES_1 = "DUMMY_BYTES_1".getBytes();
private static final byte[] DUMMY_BYTES_2 = "DUMMY_BYTES_2".getBytes();
private static final byte[] DUMMY_BYTES_3 = "DUMMY_BYTES_3".getBytes();
- private static final ServerName SN = ServerName.valueOf("s1:1,1");
- private static final ServerName SN2 = ServerName.valueOf("s2:2,2");
+ private static final ServerName SN = ServerName.valueOf("s1,1,1");
+ private static final ServerName SN2 = ServerName.valueOf("s2,2,2");
private static final HRegionInfo HRI1
= new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
private static final HRegionInfo HRI2
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
index 4ae500a2df6..3166f6d9fd1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
@@ -29,13 +29,15 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
-import com.google.common.net.HostAndPort;
import com.google.common.net.InetAddresses;
+import org.apache.hadoop.hbase.util.Address;
+
+
/**
- * Instance of an HBase ServerName.
- * A server name is used uniquely identifying a server instance in a cluster and is made
- * of the combination of hostname, port, and startcode. The startcode distingushes restarted
+ * Name of a particular incarnation of an HBase Server.
+ * A {@link ServerName} is used uniquely identifying a server instance in a cluster and is made
+ * of the combination of hostname, port, and startcode. The startcode distinguishes restarted
* servers on same hostname and port (startcode is usually timestamp of server startup). The
* {@link #toString()} format of ServerName is safe to use in the filesystem and as znode name
* up in ZooKeeper. Its format is:
@@ -44,15 +46,19 @@ import com.google.common.net.InetAddresses;
* For example, if hostname is www.example.org
, port is 1234
,
* and the startcode for the regionserver is 1212121212
, then
* the {@link #toString()} would be www.example.org,1234,1212121212
.
- *
+ *
*
You can obtain a versioned serialized form of this class by calling
- * {@link #getVersionedBytes()}. To deserialize, call {@link #parseVersionedServerName(byte[])}
- *
+ * {@link #getVersionedBytes()}. To deserialize, call
+ * {@link #parseVersionedServerName(byte[])}.
+ *
+ *
Use {@link #getAddress()} to obtain the Server hostname + port
+ * (Endpoint/Socket Address).
+ *
*
Immutable.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
- public class ServerName implements Comparable, Serializable {
+public class ServerName implements Comparable, Serializable {
private static final long serialVersionUID = 1367463982557264981L;
/**
@@ -86,10 +92,8 @@ import com.google.common.net.InetAddresses;
public static final String UNKNOWN_SERVERNAME = "#unknown#";
private final String servername;
- private final String hostnameOnly;
- private final int port;
private final long startcode;
- private transient HostAndPort hostAndPort;
+ private transient Address address;
/**
* Cached versioned bytes of this ServerName instance.
@@ -99,23 +103,15 @@ import com.google.common.net.InetAddresses;
public static final List EMPTY_SERVER_LIST = new ArrayList(0);
protected ServerName(final String hostname, final int port, final long startcode) {
- // Drop the domain is there is one; no need of it in a local cluster. With it, we get long
- // unwieldy names.
- this.hostnameOnly = hostname;
- this.port = port;
- this.startcode = startcode;
- this.servername = getServerName(hostname, port, startcode);
+ this(Address.fromParts(hostname, port), startcode);
}
- /**
- * @param hostname
- * @return hostname minus the domain, if there is one (will do pass-through on ip addresses)
- */
- static String getHostNameMinusDomain(final String hostname) {
- if (InetAddresses.isInetAddress(hostname)) return hostname;
- String [] parts = hostname.split("\\.");
- if (parts == null || parts.length == 0) return hostname;
- return parts[0];
+ private ServerName(final Address address, final long startcode) {
+ // Use HostAndPort to host port and hostname. Does validation and can do ipv6
+ this.address = address;
+ this.startcode = startcode;
+ this.servername = getServerName(this.address.getHostname(),
+ this.address.getPort(), startcode);
}
private ServerName(final String serverName) {
@@ -124,10 +120,28 @@ import com.google.common.net.InetAddresses;
}
private ServerName(final String hostAndPort, final long startCode) {
- this(Addressing.parseHostname(hostAndPort),
- Addressing.parsePort(hostAndPort), startCode);
+ this(Address.fromString(hostAndPort), startCode);
}
+ /**
+ * @param hostname
+ * @return hostname minus the domain, if there is one (will do pass-through on ip addresses)
+ * @deprecated Since 2.0. This is for internal use only.
+ */
+ @Deprecated
+ // Make this private in hbase-3.0.
+ static String getHostNameMinusDomain(final String hostname) {
+ if (InetAddresses.isInetAddress(hostname)) return hostname;
+ String [] parts = hostname.split("\\.");
+ if (parts == null || parts.length == 0) return hostname;
+ return parts[0];
+ }
+
+ /**
+ * @deprecated Since 2.0. Use {@link #valueOf(String)}
+ */
+ @Deprecated
+ // This is unused. Get rid of it.
public static String parseHostname(final String serverName) {
if (serverName == null || serverName.length() <= 0) {
throw new IllegalArgumentException("Passed hostname is null or empty");
@@ -139,11 +153,21 @@ import com.google.common.net.InetAddresses;
return serverName.substring(0, index);
}
+ /**
+ * @deprecated Since 2.0. Use {@link #valueOf(String)}
+ */
+ @Deprecated
+ // This is unused. Get rid of it.
public static int parsePort(final String serverName) {
String [] split = serverName.split(SERVERNAME_SEPARATOR);
return Integer.parseInt(split[1]);
}
+ /**
+ * @deprecated Since 2.0. Use {@link #valueOf(String)}
+ */
+ @Deprecated
+ // This is unused. Get rid of it.
public static long parseStartcode(final String serverName) {
int index = serverName.lastIndexOf(SERVERNAME_SEPARATOR);
return Long.parseLong(serverName.substring(index + 1));
@@ -189,7 +213,8 @@ import com.google.common.net.InetAddresses;
*/
public String toShortString() {
return Addressing.createHostAndPortStr(
- getHostNameMinusDomain(hostnameOnly), port);
+ getHostNameMinusDomain(this.address.getHostname()),
+ this.address.getPort());
}
/**
@@ -208,11 +233,11 @@ import com.google.common.net.InetAddresses;
}
public String getHostname() {
- return hostnameOnly;
+ return this.address.getHostname();
}
public int getPort() {
- return port;
+ return this.address.getPort();
}
public long getStartcode() {
@@ -226,7 +251,10 @@ import com.google.common.net.InetAddresses;
* @param startcode
* @return Server name made of the concatenation of hostname, port and
* startcode formatted as <hostname> ',' <port> ',' <startcode>
+ * @deprecated Since 2.0. Use {@link ServerName#valueOf(String, int, long)} instead.
*/
+ @Deprecated
+ // TODO: Make this private in hbase-3.0.
static String getServerName(String hostName, int port, long startcode) {
final StringBuilder name = new StringBuilder(hostName.length() + 1 + 5 + 1 + 13);
name.append(hostName.toLowerCase(Locale.ROOT));
@@ -242,7 +270,9 @@ import com.google.common.net.InetAddresses;
* @param startcode
* @return Server name made of the concatenation of hostname, port and
* startcode formatted as <hostname> ',' <port> ',' <startcode>
+ * @deprecated Since 2.0. Use {@link ServerName#valueOf(String, long)} instead.
*/
+ @Deprecated
public static String getServerName(final String hostAndPort,
final long startcode) {
int index = hostAndPort.indexOf(":");
@@ -254,22 +284,23 @@ import com.google.common.net.InetAddresses;
/**
* @return Hostname and port formatted as described at
* {@link Addressing#createHostAndPortStr(String, int)}
+ * @deprecated Since 2.0. Use {@link #getAddress()} instead.
*/
+ @Deprecated
public String getHostAndPort() {
- return Addressing.createHostAndPortStr(hostnameOnly, port);
+ return this.address.toString();
}
- public HostAndPort getHostPort() {
- if (hostAndPort == null) {
- hostAndPort = HostAndPort.fromParts(hostnameOnly, port);
- }
- return hostAndPort;
+ public Address getAddress() {
+ return this.address;
}
/**
* @param serverName ServerName in form specified by {@link #getServerName()}
* @return The server start code parsed from servername
+ * @deprecated Since 2.0. Use instance of ServerName to pull out start code.
*/
+ @Deprecated
public static long getServerStartcodeFromServerName(final String serverName) {
int index = serverName.lastIndexOf(SERVERNAME_SEPARATOR);
return Long.parseLong(serverName.substring(index + 1));
@@ -279,7 +310,9 @@ import com.google.common.net.InetAddresses;
* Utility method to excise the start code from a server name
* @param inServerName full server name
* @return server name less its start code
+ * @deprecated Since 2.0. Use {@link #getAddress()}
*/
+ @Deprecated
public static String getServerNameLessStartCode(String inServerName) {
if (inServerName != null && inServerName.length() > 0) {
int index = inServerName.lastIndexOf(SERVERNAME_SEPARATOR);
@@ -296,7 +329,6 @@ import com.google.common.net.InetAddresses;
if (compare != 0) return compare;
compare = this.getPort() - other.getPort();
if (compare != 0) return compare;
-
return Long.compare(this.getStartcode(), other.getStartcode());
}
@@ -320,6 +352,7 @@ import com.google.common.net.InetAddresses;
*/
public static boolean isSameHostnameAndPort(final ServerName left,
final ServerName right) {
+ // TODO: Make this left.getAddress().equals(right.getAddress())
if (left == null) return false;
if (right == null) return false;
return left.getHostname().compareToIgnoreCase(right.getHostname()) == 0 &&
@@ -365,4 +398,4 @@ import com.google.common.net.InetAddresses;
if (str == null ||str.isEmpty()) return false;
return SERVERNAME_PATTERN.matcher(str).matches();
}
-}
\ No newline at end of file
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
index c4bb157c082..c1e9db7e5ab 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
@@ -1,6 +1,4 @@
/**
- * Copyright The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -21,7 +19,6 @@
package org.apache.hadoop.hbase.rsgroup;
import java.util.Collection;
-import java.util.NavigableSet;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
@@ -29,10 +26,7 @@ import java.util.TreeSet;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.util.Addressing;
-
-import com.google.common.collect.Sets;
-import com.google.common.net.HostAndPort;
+import org.apache.hadoop.hbase.util.Address;
/**
* Stores the group information of region server groups.
@@ -45,18 +39,18 @@ public class RSGroupInfo {
public static final String NAMESPACEDESC_PROP_GROUP = "hbase.rsgroup.name";
private String name;
- private SortedSet servers;
- private NavigableSet tables;
+ // Keep servers in a sorted set so has an expected ordering when displayed.
+ private SortedSet servers;
+ // Keep tables sorted too.
+ private SortedSet tables;
public RSGroupInfo(String name) {
- this(name, Sets.newHashSet(), Sets.newTreeSet());
+ this(name, new TreeSet(), new TreeSet());
}
- RSGroupInfo(String name,
- Set servers,
- NavigableSet tables) {
+ RSGroupInfo(String name, SortedSet servers, SortedSet tables) {
this.name = name;
- this.servers = new TreeSet<>(new Addressing.HostAndPortComparable());
+ this.servers = servers == null? new TreeSet(): servers;
this.servers.addAll(servers);
this.tables = new TreeSet<>(tables);
}
@@ -79,7 +73,7 @@ public class RSGroupInfo {
*
* @param hostPort the server
*/
- public void addServer(HostAndPort hostPort){
+ public void addServer(Address hostPort){
servers.add(hostPort);
}
@@ -88,7 +82,7 @@ public class RSGroupInfo {
*
* @param hostPort the servers
*/
- public void addAllServers(Collection hostPort){
+ public void addAllServers(Collection hostPort){
servers.addAll(hostPort);
}
@@ -96,7 +90,7 @@ public class RSGroupInfo {
* @param hostPort hostPort of the server
* @return true, if a server with hostPort is found
*/
- public boolean containsServer(HostAndPort hostPort) {
+ public boolean containsServer(Address hostPort) {
return servers.contains(hostPort);
}
@@ -105,7 +99,7 @@ public class RSGroupInfo {
*
* @return set of servers
*/
- public Set getServers() {
+ public Set getServers() {
return servers;
}
@@ -114,7 +108,7 @@ public class RSGroupInfo {
*
* @param hostPort HostPort of the server to remove
*/
- public boolean removeServer(HostAndPort hostPort) {
+ public boolean removeServer(Address hostPort) {
return servers.remove(hostPort);
}
@@ -122,7 +116,7 @@ public class RSGroupInfo {
* Set of tables that are members of this group
* @return set of tables
*/
- public NavigableSet getTables() {
+ public SortedSet getTables() {
return tables;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Address.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Address.java
new file mode 100644
index 00000000000..5ea5fbf3b9d
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Address.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+import com.google.common.net.HostAndPort;
+
+/**
+ * An immutable type to hold a hostname and port combo, like an Endpoint
+ * or java.net.InetSocketAddress (but without danger of our calling
+ * resolve -- we do NOT want a resolve happening every time we want
+ * to hold a hostname and port combo). This class is also <>.
+ * In implementation this class is a facade over Guava's {@link HostAndPort}.
+ * We cannot have Guava classes in our API hence this Type.
+ */
+@InterfaceStability.Evolving
+@InterfaceAudience.Public
+public class Address implements Comparable
{
+ private HostAndPort hostAndPort;
+
+ private Address(HostAndPort hostAndPort) {
+ this.hostAndPort = hostAndPort;
+ }
+
+ public static Address fromParts(String hostname, int port) {
+ return new Address(HostAndPort.fromParts(hostname, port));
+ }
+
+ public static Address fromString(String hostnameAndPort) {
+ return new Address(HostAndPort.fromString(hostnameAndPort));
+ }
+
+ public String getHostname() {
+ return this.hostAndPort.getHostText();
+ }
+
+ public int getPort() {
+ return this.hostAndPort.getPort();
+ }
+
+ @Override
+ public String toString() {
+ return this.hostAndPort.toString();
+ }
+
+ @Override
+ // Don't use HostAndPort equals... It is wonky including
+ // ipv6 brackets
+ public boolean equals(Object other) {
+ if (this == other) {
+ return true;
+ }
+ if (other instanceof Address) {
+ Address that = (Address)other;
+ return this.getHostname().equals(that.getHostname()) &&
+ this.getPort() == that.getPort();
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return this.getHostname().hashCode() ^ getPort();
+ }
+
+ @Override
+ public int compareTo(Address that) {
+ int compare = this.getHostname().compareTo(that.getHostname());
+ if (compare != 0) return compare;
+ return this.getPort() - that.getPort();
+ }
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
index 71f6127b8f5..31fb1f557ea 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
@@ -24,13 +24,10 @@ import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.NetworkInterface;
import java.net.SocketException;
-import java.util.Comparator;
import java.util.Enumeration;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import com.google.common.net.HostAndPort;
-
/**
* Utility for network addresses, resolving and naming.
*/
@@ -39,25 +36,6 @@ public class Addressing {
public static final String VALID_PORT_REGEX = "[\\d]+";
public static final String HOSTNAME_PORT_SEPARATOR = ":";
- /**
- * HostAndPort Comparator.
- * Does compare on HostAndPort instances. This comparator says that instances that have same
- * host and port are the same. This is a little different than HostAndPort#equals. It does
- * NOT consider two ipv6 HostAndPort instances the same if they have the same hostname
- * and port and they differ only in the fact that one provided brackets around the ipv6
- * hostname while the other did not: i.e. HostAndPort does NOT equate
- * {@code HostAndPort.fromParts("[2001:db8::1]", 888);} and
- * {@code HostAndPort.fromParts("2001:db8::1", 888);}.
- */
- public static class HostAndPortComparable implements Comparator {
- @Override
- public int compare(HostAndPort left, HostAndPort right) {
- int compare = left.getHostText().compareTo(right.getHostText());
- if (compare != 0) return compare;
- return left.getPort() - right.getPort();
- }
- }
-
/**
* @param hostAndPort Formatted as <hostname> ':' <port>
* @return An InetSocketInstance
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestAddressing.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestAddressing.java
deleted file mode 100644
index 97aaa1bc533..00000000000
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestAddressing.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hadoop.hbase.util;
-
-import static org.junit.Assert.*;
-
-import org.apache.hadoop.hbase.testclassification.MiscTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.util.Addressing.HostAndPortComparable;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.google.common.net.HostAndPort;
-
-@Category({MiscTests.class, SmallTests.class})
-public class TestAddressing {
-
- @Test
- public void testHostAndPortComparable() {
- HostAndPortComparable c = new HostAndPortComparable();
- HostAndPort left = HostAndPort.fromParts("[2001:db8::1]", 888);
- HostAndPort right = HostAndPort.fromParts("2001:db8::1", 888);
- assertTrue(left.toString() + " " + right.toString(), c.compare(left, right) == 0);
- }
-}
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/rsgroup/IntegrationTestRSGroup.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/rsgroup/IntegrationTestRSGroup.java
index f72b5f79eb3..da94bd7661e 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/rsgroup/IntegrationTestRSGroup.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/rsgroup/IntegrationTestRSGroup.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
import org.junit.After;
-import org.junit.Before;
+import org.junit.BeforeClass;
import org.junit.experimental.categories.Category;
/**
@@ -39,7 +39,7 @@ public class IntegrationTestRSGroup extends TestRSGroupsBase {
private final static Log LOG = LogFactory.getLog(IntegrationTestRSGroup.class);
private static boolean initialized = false;
- @Before
+ @BeforeClass
public void beforeMethod() throws Exception {
if(!initialized) {
LOG.info("Setting up IntegrationTestGroup");
@@ -49,7 +49,7 @@ public class IntegrationTestRSGroup extends TestRSGroupsBase {
//set shared configs
admin = TEST_UTIL.getAdmin();
cluster = TEST_UTIL.getHBaseClusterInterface();
- rsGroupAdmin = new VerifyingRSGroupAdminClient(rsGroupAdmin.newClient(TEST_UTIL.getConnection()),
+ rsGroupAdmin = new VerifyingRSGroupAdminClient(new RSGroupAdminClient(TEST_UTIL.getConnection()),
TEST_UTIL.getConfiguration());
LOG.info("Done initializing cluster");
initialized = true;
diff --git a/hbase-rsgroup/pom.xml b/hbase-rsgroup/pom.xml
index 86d87f8fa7c..c9d60955caa 100644
--- a/hbase-rsgroup/pom.xml
+++ b/hbase-rsgroup/pom.xml
@@ -118,11 +118,6 @@
org.apache.hbase
hbase-common
-
- org.apache.hbase
- hbase-common
- test-jar
-
org.apache.hbase
hbase-protocol
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
index f94d0f65729..df8129dcd2b 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
@@ -1,6 +1,4 @@
/**
- * Copyright The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -19,8 +17,6 @@
*/
package org.apache.hadoop.hbase.rsgroup;
-import com.google.common.net.HostAndPort;
-
import java.io.Closeable;
import java.io.IOException;
import java.util.List;
@@ -28,31 +24,20 @@ import java.util.Set;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.util.Address;
/**
* Group user API interface used between client and server.
*/
@InterfaceAudience.Private
-public abstract class RSGroupAdmin implements Closeable {
-
- /**
- * Create a new RSGroupAdmin client
- * @param conn connection RSGroupAdmin instance will use
- * @return a new RSGroupAdmin client
- * @throws IOException on failure to create new client
- */
- public static RSGroupAdmin newClient(Connection conn) throws IOException {
- return new RSGroupAdminClient(conn);
- }
-
+public interface RSGroupAdmin {
/**
* Gets the regionserver group information.
*
* @param groupName the group name
* @return An instance of RSGroupInfo
*/
- public abstract RSGroupInfo getRSGroupInfo(String groupName) throws IOException;
+ RSGroupInfo getRSGroupInfo(String groupName) throws IOException;
/**
* Gets the regionserver group info of table.
@@ -60,7 +45,7 @@ public abstract class RSGroupAdmin implements Closeable {
* @param tableName the table name
* @return An instance of RSGroupInfo.
*/
- public abstract RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException;
+ RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException;
/**
* Move a set of serves to another group
@@ -70,7 +55,8 @@ public abstract class RSGroupAdmin implements Closeable {
* @param targetGroup the target group
* @throws java.io.IOException Signals that an I/O exception has occurred.
*/
- public abstract void moveServers(Set servers, String targetGroup) throws IOException;
+ void moveServers(Set servers, String targetGroup)
+ throws IOException;
/**
* Move tables to a new group.
@@ -79,21 +65,21 @@ public abstract class RSGroupAdmin implements Closeable {
* @param targetGroup target group
* @throws java.io.IOException on failure to move tables
*/
- public abstract void moveTables(Set tables, String targetGroup) throws IOException;
+ void moveTables(Set tables, String targetGroup) throws IOException;
/**
* Add a new group
* @param name name of the group
* @throws java.io.IOException on failure to add group
*/
- public abstract void addRSGroup(String name) throws IOException;
+ void addRSGroup(String name) throws IOException;
/**
* Remove a regionserver group
* @param name name of the group
* @throws java.io.IOException on failure to remove group
*/
- public abstract void removeRSGroup(String name) throws IOException;
+ void removeRSGroup(String name) throws IOException;
/**
* Balance the regions in a group
@@ -102,14 +88,14 @@ public abstract class RSGroupAdmin implements Closeable {
* @return boolean whether balance ran or not
* @throws java.io.IOException on unexpected failure to balance group
*/
- public abstract boolean balanceRSGroup(String name) throws IOException;
+ boolean balanceRSGroup(String name) throws IOException;
/**
* Lists the existing groups.
*
* @return Collection of RSGroupInfo.
*/
- public abstract List listRSGroups() throws IOException;
+ List listRSGroups() throws IOException;
/**
* Retrieve the RSGroupInfo a server is affiliated to
@@ -117,5 +103,5 @@ public abstract class RSGroupAdmin implements Closeable {
* @return RSGroupInfo associated with the server
* @throws java.io.IOException on unexpected failure to retrieve GroupInfo
*/
- public abstract RSGroupInfo getRSGroupOfServer(HostAndPort hostPort) throws IOException;
-}
+ RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException;
+}
\ No newline at end of file
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
index 81a45e41afa..c199b2fd79c 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
@@ -1,6 +1,4 @@
/**
- * Copyright The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -19,20 +17,13 @@
*/
package org.apache.hadoop.hbase.rsgroup;
-import com.google.common.collect.Sets;
-import com.google.common.net.HostAndPort;
-import com.google.protobuf.ServiceException;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
@@ -40,19 +31,21 @@ import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
+import org.apache.hadoop.hbase.util.Address;
+
+import com.google.common.collect.Sets;
+import com.google.protobuf.ServiceException;
/**
* Client used for managing region server group information.
*/
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-class RSGroupAdminClient extends RSGroupAdmin {
- private RSGroupAdminProtos.RSGroupAdminService.BlockingInterface proxy;
- private static final Log LOG = LogFactory.getLog(RSGroupAdminClient.class);
+@InterfaceAudience.Private
+class RSGroupAdminClient implements RSGroupAdmin {
+ private RSGroupAdminProtos.RSGroupAdminService.BlockingInterface stub;
public RSGroupAdminClient(Connection conn) throws IOException {
- proxy = RSGroupAdminProtos.RSGroupAdminService.newBlockingStub(
+ stub = RSGroupAdminProtos.RSGroupAdminService.newBlockingStub(
conn.getAdmin().coprocessorService());
}
@@ -60,7 +53,7 @@ class RSGroupAdminClient extends RSGroupAdmin {
public RSGroupInfo getRSGroupInfo(String groupName) throws IOException {
try {
RSGroupAdminProtos.GetRSGroupInfoResponse resp =
- proxy.getRSGroupInfo(null,
+ stub.getRSGroupInfo(null,
RSGroupAdminProtos.GetRSGroupInfoRequest.newBuilder()
.setRSGroupName(groupName).build());
if(resp.hasRSGroupInfo()) {
@@ -79,7 +72,7 @@ class RSGroupAdminClient extends RSGroupAdmin {
.setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
try {
- GetRSGroupInfoOfTableResponse resp = proxy.getRSGroupInfoOfTable(null, request);
+ GetRSGroupInfoOfTableResponse resp = stub.getRSGroupInfoOfTable(null, request);
if (resp.hasRSGroupInfo()) {
return RSGroupSerDe.toGroupInfo(resp.getRSGroupInfo());
}
@@ -90,11 +83,11 @@ class RSGroupAdminClient extends RSGroupAdmin {
}
@Override
- public void moveServers(Set servers, String targetGroup) throws IOException {
+ public void moveServers(Set servers, String targetGroup) throws IOException {
Set hostPorts = Sets.newHashSet();
- for(HostAndPort el: servers) {
+ for(Address el: servers) {
hostPorts.add(HBaseProtos.ServerName.newBuilder()
- .setHostName(el.getHostText())
+ .setHostName(el.getHostname())
.setPort(el.getPort())
.build());
}
@@ -104,7 +97,7 @@ class RSGroupAdminClient extends RSGroupAdmin {
.addAllServers(hostPorts).build();
try {
- proxy.moveServers(null, request);
+ stub.moveServers(null, request);
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
@@ -119,7 +112,7 @@ class RSGroupAdminClient extends RSGroupAdmin {
builder.addTableName(ProtobufUtil.toProtoTableName(tableName));
}
try {
- proxy.moveTables(null, builder.build());
+ stub.moveTables(null, builder.build());
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
@@ -131,7 +124,7 @@ class RSGroupAdminClient extends RSGroupAdmin {
RSGroupAdminProtos.AddRSGroupRequest.newBuilder()
.setRSGroupName(groupName).build();
try {
- proxy.addRSGroup(null, request);
+ stub.addRSGroup(null, request);
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
@@ -143,7 +136,7 @@ class RSGroupAdminClient extends RSGroupAdmin {
RSGroupAdminProtos.RemoveRSGroupRequest.newBuilder()
.setRSGroupName(name).build();
try {
- proxy.removeRSGroup(null, request);
+ stub.removeRSGroup(null, request);
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
@@ -156,7 +149,7 @@ class RSGroupAdminClient extends RSGroupAdmin {
.setRSGroupName(name).build();
try {
- return proxy.balanceRSGroup(null, request).getBalanceRan();
+ return stub.balanceRSGroup(null, request).getBalanceRan();
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
@@ -166,7 +159,7 @@ class RSGroupAdminClient extends RSGroupAdmin {
public List listRSGroups() throws IOException {
try {
List resp =
- proxy.listRSGroupInfos(null,
+ stub.listRSGroupInfos(null,
RSGroupAdminProtos.ListRSGroupInfosRequest.newBuilder().build()).getRSGroupInfoList();
List result = new ArrayList(resp.size());
for(RSGroupProtos.RSGroupInfo entry: resp) {
@@ -179,16 +172,16 @@ class RSGroupAdminClient extends RSGroupAdmin {
}
@Override
- public RSGroupInfo getRSGroupOfServer(HostAndPort hostPort) throws IOException {
+ public RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException {
RSGroupAdminProtos.GetRSGroupInfoOfServerRequest request =
RSGroupAdminProtos.GetRSGroupInfoOfServerRequest.newBuilder()
.setServer(HBaseProtos.ServerName.newBuilder()
- .setHostName(hostPort.getHostText())
+ .setHostName(hostPort.getHostname())
.setPort(hostPort.getPort())
.build())
.build();
try {
- GetRSGroupInfoOfServerResponse resp = proxy.getRSGroupInfoOfServer(null, request);
+ GetRSGroupInfoOfServerResponse resp = stub.getRSGroupInfoOfServer(null, request);
if (resp.hasRSGroupInfo()) {
return RSGroupSerDe.toGroupInfo(resp.getRSGroupInfo());
}
@@ -197,8 +190,4 @@ class RSGroupAdminClient extends RSGroupAdmin {
throw ProtobufUtil.handleRemoteException(e);
}
}
-
- @Override
- public void close() throws IOException {
- }
}
\ No newline at end of file
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index 2dd3b00b60f..eb701792969 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -1,6 +1,4 @@
/**
- * Copyright The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -74,19 +72,21 @@ import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGro
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.util.Address;
import com.google.common.collect.Sets;
-import com.google.common.net.HostAndPort;
import com.google.protobuf.RpcCallback;
import com.google.protobuf.RpcController;
import com.google.protobuf.Service;
+
@InterfaceAudience.Private
public class RSGroupAdminEndpoint extends RSGroupAdminService implements CoprocessorService,
Coprocessor, MasterObserver {
private MasterServices master = null;
- private static RSGroupInfoManagerImpl groupInfoManager;
+ // TODO: Static? Fix.
+ private static RSGroupInfoManager groupInfoManager;
private RSGroupAdminServer groupAdminServer;
@Override
@@ -125,7 +125,7 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService implements Coproce
setStaticGroupInfoManager(groupInfoManager);
}
- public RSGroupInfoManager getGroupInfoManager() {
+ RSGroupInfoManager getGroupInfoManager() {
return groupInfoManager;
}
@@ -133,112 +133,100 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService implements Coproce
public void getRSGroupInfo(RpcController controller,
GetRSGroupInfoRequest request,
RpcCallback done) {
- GetRSGroupInfoResponse response = null;
- try {
- GetRSGroupInfoResponse.Builder builder =
+ GetRSGroupInfoResponse.Builder builder =
GetRSGroupInfoResponse.newBuilder();
- RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupInfo(request.getRSGroupName());
- if(RSGroupInfo != null) {
- builder.setRSGroupInfo(RSGroupSerDe.toProtoGroupInfo(RSGroupInfo));
+ String groupName = request.getRSGroupName();
+ try {
+ RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
+ if (rsGroupInfo != null) {
+ builder.setRSGroupInfo(RSGroupSerDe.toProtoGroupInfo(rsGroupInfo));
}
- response = builder.build();
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
- done.run(response);
+ done.run(builder.build());
}
@Override
public void getRSGroupInfoOfTable(RpcController controller,
GetRSGroupInfoOfTableRequest request,
RpcCallback done) {
- GetRSGroupInfoOfTableResponse response = null;
- try {
- GetRSGroupInfoOfTableResponse.Builder builder =
+ GetRSGroupInfoOfTableResponse.Builder builder =
GetRSGroupInfoOfTableResponse.newBuilder();
+ try {
TableName tableName = ProtobufUtil.toTableName(request.getTableName());
RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupInfoOfTable(tableName);
- if (RSGroupInfo == null) {
- response = builder.build();
- } else {
- response = builder.setRSGroupInfo(RSGroupSerDe.toProtoGroupInfo(RSGroupInfo)).build();
+ if (RSGroupInfo != null) {
+ builder.setRSGroupInfo(RSGroupSerDe.toProtoGroupInfo(RSGroupInfo));
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
- done.run(response);
+ done.run(builder.build());
}
@Override
public void moveServers(RpcController controller,
MoveServersRequest request,
RpcCallback done) {
- RSGroupAdminProtos.MoveServersResponse response = null;
- try {
- RSGroupAdminProtos.MoveServersResponse.Builder builder =
+ RSGroupAdminProtos.MoveServersResponse.Builder builder =
RSGroupAdminProtos.MoveServersResponse.newBuilder();
- Set hostPorts = Sets.newHashSet();
+ try {
+ Set hostPorts = Sets.newHashSet();
for(HBaseProtos.ServerName el: request.getServersList()) {
- hostPorts.add(HostAndPort.fromParts(el.getHostName(), el.getPort()));
+ hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
}
groupAdminServer.moveServers(hostPorts, request.getTargetGroup());
- response = builder.build();
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
- done.run(response);
+ done.run(builder.build());
}
@Override
public void moveTables(RpcController controller,
MoveTablesRequest request,
RpcCallback done) {
- MoveTablesResponse response = null;
- try {
- MoveTablesResponse.Builder builder =
+ MoveTablesResponse.Builder builder =
MoveTablesResponse.newBuilder();
+ try {
Set tables = new HashSet(request.getTableNameList().size());
for(HBaseProtos.TableName tableName: request.getTableNameList()) {
tables.add(ProtobufUtil.toTableName(tableName));
}
groupAdminServer.moveTables(tables, request.getTargetGroup());
- response = builder.build();
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
- done.run(response);
+ done.run(builder.build());
}
@Override
public void addRSGroup(RpcController controller,
AddRSGroupRequest request,
RpcCallback done) {
- AddRSGroupResponse response = null;
- try {
- AddRSGroupResponse.Builder builder =
+ AddRSGroupResponse.Builder builder =
AddRSGroupResponse.newBuilder();
+ try {
groupAdminServer.addRSGroup(request.getRSGroupName());
- response = builder.build();
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
- done.run(response);
+ done.run(builder.build());
}
@Override
public void removeRSGroup(RpcController controller,
RemoveRSGroupRequest request,
RpcCallback done) {
- RemoveRSGroupResponse response = null;
- try {
- RemoveRSGroupResponse.Builder builder =
+ RemoveRSGroupResponse.Builder builder =
RemoveRSGroupResponse.newBuilder();
+ try {
groupAdminServer.removeRSGroup(request.getRSGroupName());
- response = builder.build();
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
- done.run(response);
+ done.run(builder.build());
}
@Override
@@ -259,18 +247,16 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService implements Coproce
public void listRSGroupInfos(RpcController controller,
ListRSGroupInfosRequest request,
RpcCallback done) {
- ListRSGroupInfosResponse response = null;
- try {
- ListRSGroupInfosResponse.Builder builder =
+ ListRSGroupInfosResponse.Builder builder =
ListRSGroupInfosResponse.newBuilder();
+ try {
for(RSGroupInfo RSGroupInfo : groupAdminServer.listRSGroups()) {
builder.addRSGroupInfo(RSGroupSerDe.toProtoGroupInfo(RSGroupInfo));
}
- response = builder.build();
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
- done.run(response);
+ done.run(builder.build());
}
@Override
@@ -279,8 +265,8 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService implements Coproce
RpcCallback done) {
GetRSGroupInfoOfServerResponse.Builder builder = GetRSGroupInfoOfServerResponse.newBuilder();
try {
- HostAndPort hp =
- HostAndPort.fromParts(request.getServer().getHostName(), request.getServer().getPort());
+ Address hp =
+ Address.fromParts(request.getServer().getHostName(), request.getServer().getPort());
RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupOfServer(hp);
if (RSGroupInfo != null) {
builder.setRSGroupInfo(RSGroupSerDe.toProtoGroupInfo(RSGroupInfo));
@@ -1040,14 +1026,14 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService implements Coproce
}
@Override
- public void preMoveServers(ObserverContext ctx, Set
- servers, String targetGroup) throws IOException {
- }
+ public void preMoveServers(ObserverContext ctx,
+ Set servers, String targetGroup)
+ throws IOException {}
@Override
- public void postMoveServers(ObserverContext ctx, Set
- servers, String targetGroup) throws IOException {
- }
+ public void postMoveServers(ObserverContext ctx,
+ Set servers, String targetGroup)
+ throws IOException {}
@Override
public void preMoveTables(ObserverContext ctx, Set
@@ -1203,4 +1189,4 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService implements Coproce
// TODO Auto-generated method stub
}
-}
\ No newline at end of file
+}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index cae7ba44f2c..d502066d6a9 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -1,6 +1,4 @@
/**
- * Copyright The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -19,19 +17,16 @@
*/
package org.apache.hadoop.hbase.rsgroup;
-import static org.apache.hadoop.hbase.rsgroup.Utility.getOnlineServers;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
-import java.util.NavigableMap;
import java.util.Set;
-import java.util.concurrent.ConcurrentSkipListMap;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
@@ -51,30 +46,26 @@ import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.master.locking.LockManager;
import org.apache.hadoop.hbase.master.locking.LockProcedure;
-import org.apache.hadoop.hbase.util.Addressing;
+import org.apache.hadoop.hbase.util.Address;
+import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
-import com.google.common.net.HostAndPort;
/**
- * Service to support Region Server Grouping (HBase-6721)
+ * Service to support Region Server Grouping (HBase-6721).
*/
@InterfaceAudience.Private
-public class RSGroupAdminServer extends RSGroupAdmin {
+public class RSGroupAdminServer implements RSGroupAdmin {
private static final Log LOG = LogFactory.getLog(RSGroupAdminServer.class);
private MasterServices master;
- // List of servers that are being moved from one group to another
- // Key=host:port,Value=targetGroup
- private NavigableMap serversInTransition =
- new ConcurrentSkipListMap(new Addressing.HostAndPortComparable());
- private RSGroupInfoManager rsgroupInfoManager;
+ private final RSGroupInfoManager rsGroupInfoManager;
public RSGroupAdminServer(MasterServices master,
RSGroupInfoManager RSGroupInfoManager) throws IOException {
this.master = master;
- this.rsgroupInfoManager = RSGroupInfoManager;
+ this.rsGroupInfoManager = RSGroupInfoManager;
}
@Override
@@ -88,149 +79,166 @@ public class RSGroupAdminServer extends RSGroupAdmin {
return groupName == null? null: getRSGroupInfoManager().getRSGroup(groupName);
}
+ private void checkOnlineServersOnly(Set servers) throws ConstraintException {
+ Set onlineServers = new HashSet();
+ for(ServerName server: master.getServerManager().getOnlineServers().keySet()) {
+ onlineServers.add(server.getAddress());
+ }
+ for (Address el: servers) {
+ if (!onlineServers.contains(el)) {
+ throw new ConstraintException(
+ "Server " + el + " is not an online server in 'default' RSGroup.");
+ }
+ }
+ }
+
+ /**
+ * Check passed name. Fail if nulls or if corresponding RSGroupInfo not found.
+ * @return The RSGroupInfo named name
+ * @throws IOException
+ */
+ private RSGroupInfo getAndCheckRSGroupInfo(String name)
+ throws IOException {
+ if (StringUtils.isEmpty(name)) {
+ throw new ConstraintException("RSGroup cannot be null.");
+ }
+ RSGroupInfo rsgi = getRSGroupInfo(name);
+ if (rsgi == null) {
+ throw new ConstraintException("RSGroup does not exist: " + name);
+ }
+ return rsgi;
+ }
+
+ /**
+ * @return List of Regions associated with this server
.
+ */
+ private List getRegions(final Address server) {
+ LinkedList regions = new LinkedList();
+ for (Map.Entry el :
+ master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) {
+ if (el.getValue().getAddress().equals(server)) {
+ addRegion(regions, el.getKey());
+ }
+ }
+ for (RegionState state:
+ this.master.getAssignmentManager().getRegionStates().getRegionsInTransition()) {
+ if (state.getServerName().getAddress().equals(server)) {
+ addRegion(regions, state.getRegion());
+ }
+ }
+ return regions;
+ }
+
+ private void addRegion(final LinkedList regions, HRegionInfo hri) {
+ // If meta, move it last otherwise other unassigns fail because meta is not
+ // online for them to update state in. This is dodgy. Needs to be made more
+ // robust. See TODO below.
+ if (hri.isMetaRegion()) regions.addLast(hri);
+ else regions.addFirst(hri);
+ }
+
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE",
+ justification="Ignoring complaint because don't know what it is complaining about")
@Override
- public void moveServers(Set servers, String targetGroupName)
- throws IOException {
+ public void moveServers(Set servers, String targetGroupName)
+ throws IOException {
if (servers == null) {
throw new ConstraintException("The list of servers to move cannot be null.");
}
if (servers.isEmpty()) {
+ // For some reason this difference between null servers and isEmpty is important distinction.
+ // TODO. Why? Stuff breaks if I equate them.
return;
}
- if (StringUtils.isEmpty(targetGroupName)) {
- throw new ConstraintException("The target rsgroup cannot be null.");
- }
- RSGroupInfo targetGrp = getRSGroupInfo(targetGroupName);
- if (targetGrp == null) {
- throw new ConstraintException("RSGroup " + targetGroupName + " does not exist.");
- }
+ RSGroupInfo targetGrp = getAndCheckRSGroupInfo(targetGroupName);
RSGroupInfoManager manager = getRSGroupInfoManager();
+ // Lock the manager during the below manipulations.
synchronized (manager) {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preMoveServers(servers, targetGroupName);
}
- HostAndPort firstServer = servers.iterator().next();
- // We only allow a move from a single source group so this should be ok
+ // Presume first server is the source group. Later we check all servers are from
+ // this same group.
+ Address firstServer = servers.iterator().next();
RSGroupInfo srcGrp = manager.getRSGroupOfServer(firstServer);
if (srcGrp == null) {
- throw new ConstraintException("Server " + firstServer + " does not have a rsgroup.");
+ // Be careful. This message is tested for in TestRSGroupsBase...
+ throw new ConstraintException("Source RSGroup for server " + firstServer + " does not exist.");
}
if (srcGrp.getName().equals(targetGroupName)) {
- throw new ConstraintException( "Target rsgroup " + targetGroupName +
- " is same as source " + srcGrp + " rsgroup.");
+ throw new ConstraintException( "Target RSGroup " + targetGroupName +
+ " is same as source " + srcGrp + " RSGroup.");
}
- // Only move online servers (from default) or servers from other groups.
+ // Only move online servers (when from 'default') or servers from other groups.
// This prevents bogus servers from entering groups
if (RSGroupInfo.DEFAULT_GROUP.equals(srcGrp.getName())) {
- Set onlineServers = getOnlineServers(this.master);
- for (HostAndPort el: servers) {
- if (!onlineServers.contains(el)) {
- throw new ConstraintException(
- "Server " + el + " is not an online server in 'default' rsgroup.");
- }
+ checkOnlineServersOnly(servers);
+ }
+ // Check all servers are of same rsgroup.
+ for (Address server: servers) {
+ String tmpGroup = manager.getRSGroupOfServer(server).getName();
+ if (!tmpGroup.equals(srcGrp.getName())) {
+ throw new ConstraintException("Move server request should only come from one source " +
+ "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup);
}
}
-
- if(srcGrp.getServers().size() <= servers.size() &&
- srcGrp.getTables().size() > 0) {
- throw new ConstraintException("Cannot leave a rsgroup " + srcGrp.getName() +
+ if (srcGrp.getServers().size() <= servers.size() && srcGrp.getTables().size() > 0) {
+ throw new ConstraintException("Cannot leave a RSGroup " + srcGrp.getName() +
" that contains tables without servers to host them.");
}
- String sourceGroupName =
- manager.getRSGroupOfServer(srcGrp.getServers().iterator().next()).getName();
- if (getRSGroupInfo(targetGroupName) == null) {
- throw new ConstraintException("Target " + targetGroupName + " rsgroup does not exist.");
- }
+ // MovedServers may be < passed in 'servers'.
+ Set movedServers = manager.moveServers(servers, srcGrp.getName(), targetGroupName);
+ // Appy makes note that if we were passed in a List of servers,
+ // we'd save having to do stuff like the below.
+ List editableMovedServers = Lists.newArrayList(movedServers);
+ boolean foundRegionsToUnassign;
+ do {
+ foundRegionsToUnassign = false;
+ for (Iterator iter = editableMovedServers.iterator(); iter.hasNext();) {
+ Address rs = iter.next();
+ // Get regions that are associated with this server.
+ List regions = getRegions(rs);
- for (HostAndPort server: servers) {
- if (serversInTransition.containsKey(server)) {
- throw new ConstraintException(
- "Server list contains a server " + server + " that is already being moved.");
- }
- String tmpGroup = manager.getRSGroupOfServer(server).getName();
- if (sourceGroupName != null && !tmpGroup.equals(sourceGroupName)) {
- throw new ConstraintException(
- "Move server request should only come from one source rsgroup. "+
- "Expecting only " + sourceGroupName + " but contains " + tmpGroup);
- }
- }
-
- if (sourceGroupName.equals(targetGroupName)) {
- throw new ConstraintException(
- "Target rsgroup " + sourceGroupName + " is same as source rsgroup.");
- }
- try {
- //update the servers as in transition
- for (HostAndPort server : servers) {
- serversInTransition.put(server, targetGroupName);
- }
-
- Set movedServers =
- manager.moveServers(servers, sourceGroupName, targetGroupName);
- boolean found;
- do {
- found = false;
- for (Iterator iter = movedServers.iterator();
- iter.hasNext(); ) {
- HostAndPort rs = iter.next();
- //get online regions
- List regions = new LinkedList();
- for (Map.Entry el :
- master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) {
- if (el.getValue().getHostPort().equals(rs)) {
- regions.add(el.getKey());
- }
- }
- for (RegionState state :
- master.getAssignmentManager().getRegionStates().getRegionsInTransition()) {
- if (state.getServerName().getHostPort().equals(rs)) {
- regions.add(state.getRegion());
- }
- }
-
- //unassign regions for a server
- LOG.info("Unassigning " + regions.size() +
- " regions from server " + rs + " for move to " + targetGroupName);
- if (regions.size() > 0) {
- //TODO bulk unassign or throttled unassign?
- for (HRegionInfo region : regions) {
- //regions might get assigned from tables of target group
- //so we need to filter
- if (!targetGrp.containsTable(region.getTable())) {
- master.getAssignmentManager().unassign(region);
- if (master.getAssignmentManager().getRegionStates().
- getRegionState(region).isFailedOpen()) {
- // If region is in FAILED_OPEN state, it won't recover, not without
- // operator intervention... in hbase-2.0.0 at least. Continue rather
- // than mark region as 'found'.
- continue;
- }
- found = true;
+ // Unassign regions for a server
+ // TODO: This is problematic especially if hbase:meta is in the mix.
+ // We need to update state in hbase:meta and if unassigned we hang
+ // around in here. There is a silly sort on linked list done above
+ // in getRegions putting hbase:meta last which helps but probably holes.
+ LOG.info("Unassigning " + regions.size() +
+ " region(s) from " + rs + " for server move to " + targetGroupName);
+ if (!regions.isEmpty()) {
+ // TODO bulk unassign or throttled unassign?
+ for (HRegionInfo region: regions) {
+ // Regions might get assigned from tables of target group so we need to filter
+ if (!targetGrp.containsTable(region.getTable())) {
+ this.master.getAssignmentManager().unassign(region);
+ if (master.getAssignmentManager().getRegionStates().
+ getRegionState(region).isFailedOpen()) {
+ // If region is in FAILED_OPEN state, it won't recover, not without
+ // operator intervention... in hbase-2.0.0 at least. Continue rather
+ // than mark region as 'foundRegionsToUnassign'.
+ continue;
}
+ foundRegionsToUnassign = true;
}
}
- if (!found) {
- iter.remove();
- }
}
- try {
- manager.wait(1000);
- } catch (InterruptedException e) {
- LOG.warn("Sleep interrupted", e);
- Thread.currentThread().interrupt();
+ if (!foundRegionsToUnassign) {
+ iter.remove();
}
- } while (found);
- } finally {
- //remove from transition
- for (HostAndPort server : servers) {
- serversInTransition.remove(server);
}
- }
+ try {
+ manager.wait(1000);
+ } catch (InterruptedException e) {
+ LOG.warn("Sleep interrupted", e);
+ Thread.currentThread().interrupt();
+ }
+ } while (foundRegionsToUnassign);
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postMoveServers(servers, targetGroupName);
}
- LOG.info("Move server done: "+sourceGroupName+"->"+targetGroupName);
+ LOG.info("Move server done: " + srcGrp.getName() + "=>" + targetGroupName);
}
}
@@ -245,6 +253,7 @@ public class RSGroupAdminServer extends RSGroupAdmin {
return;
}
RSGroupInfoManager manager = getRSGroupInfoManager();
+ // Lock the manager during below machinations.
synchronized (manager) {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preMoveTables(tables, targetGroup);
@@ -253,10 +262,10 @@ public class RSGroupAdminServer extends RSGroupAdmin {
if(targetGroup != null) {
RSGroupInfo destGroup = manager.getRSGroup(targetGroup);
if(destGroup == null) {
- throw new ConstraintException("Target " + targetGroup + " rsgroup does not exist.");
+ throw new ConstraintException("Target " + targetGroup + " RSGroup does not exist.");
}
if(destGroup.getServers().size() < 1) {
- throw new ConstraintException("Target rsgroup must have at least one server.");
+ throw new ConstraintException("Target RSGroup must have at least one server.");
}
}
@@ -264,8 +273,8 @@ public class RSGroupAdminServer extends RSGroupAdmin {
String srcGroup = manager.getRSGroupOfTable(table);
if(srcGroup != null && srcGroup.equals(targetGroup)) {
throw new ConstraintException(
- "Source rsgroup " + srcGroup + " is same as target " + targetGroup +
- " rsgroup for table " + table);
+ "Source RSGroup " + srcGroup + " is same as target " + targetGroup +
+ " RSGroup for table " + table);
}
}
manager.moveTables(tables, targetGroup);
@@ -306,12 +315,13 @@ public class RSGroupAdminServer extends RSGroupAdmin {
@Override
public void removeRSGroup(String name) throws IOException {
RSGroupInfoManager manager = getRSGroupInfoManager();
+ // Hold lock across coprocessor calls.
synchronized (manager) {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preRemoveRSGroup(name);
}
- RSGroupInfo RSGroupInfo = getRSGroupInfoManager().getRSGroup(name);
- if(RSGroupInfo == null) {
+ RSGroupInfo RSGroupInfo = manager.getRSGroup(name);
+ if (RSGroupInfo == null) {
throw new ConstraintException("RSGroup " + name + " does not exist");
}
int tableCount = RSGroupInfo.getTables().size();
@@ -321,10 +331,10 @@ public class RSGroupAdminServer extends RSGroupAdmin {
"the rsgroup can be removed.");
}
int serverCount = RSGroupInfo.getServers().size();
- if(serverCount > 0) {
+ if (serverCount > 0) {
throw new ConstraintException("RSGroup " + name + " has " + serverCount +
- " servers; you must remove these servers from the rsgroup before" +
- "the rsgroup can be removed.");
+ " servers; you must remove these servers from the RSGroup before" +
+ "the RSGroup can be removed.");
}
for (NamespaceDescriptor ns: master.getClusterSchema().getNamespaces()) {
String nsGroup = ns.getConfigurationValue(RSGroupInfo.NAMESPACEDESC_PROP_GROUP);
@@ -406,13 +416,12 @@ public class RSGroupAdminServer extends RSGroupAdmin {
}
@Override
- public RSGroupInfo getRSGroupOfServer(HostAndPort hostPort) throws IOException {
+ public RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException {
return getRSGroupInfoManager().getRSGroupOfServer(hostPort);
}
- @InterfaceAudience.Private
- public RSGroupInfoManager getRSGroupInfoManager() throws IOException {
- return rsgroupInfoManager;
+ private RSGroupInfoManager getRSGroupInfoManager() throws IOException {
+ return rsGroupInfoManager;
}
private Map rsGroupGetRegionsInTransition(String groupName)
@@ -455,7 +464,7 @@ public class RSGroupAdminServer extends RSGroupAdmin {
Map> serverMap = Maps.newHashMap();
for(ServerName serverName: master.getServerManager().getOnlineServers().keySet()) {
- if(RSGroupInfo.getServers().contains(serverName.getHostPort())) {
+ if(RSGroupInfo.getServers().contains(serverName.getAddress())) {
serverMap.put(serverName, Collections.EMPTY_LIST);
}
}
@@ -485,7 +494,7 @@ public class RSGroupAdminServer extends RSGroupAdmin {
throw new ConstraintException("RSGroup " + groupName + " does not exist.");
}
if (!RSGroupInfo.containsTable(desc.getTableName())) {
- LOG.debug("Pre-moving table " + desc.getTableName() + " to rsgroup " + groupName);
+ LOG.debug("Pre-moving table " + desc.getTableName() + " to RSGroup " + groupName);
moveTables(Sets.newHashSet(desc.getTableName()), groupName);
}
}
@@ -498,13 +507,9 @@ public class RSGroupAdminServer extends RSGroupAdmin {
moveTables(Sets.newHashSet(tableName), null);
}
} catch (ConstraintException ex) {
- LOG.debug("Failed to perform rsgroup information cleanup for table: " + tableName, ex);
+ LOG.debug("Failed to perform RSGroup information cleanup for table: " + tableName, ex);
} catch (IOException ex) {
- LOG.debug("Failed to perform rsgroup information cleanup for table: " + tableName, ex);
+ LOG.debug("Failed to perform RSGroup information cleanup for table: " + tableName, ex);
}
}
-
- @Override
- public void close() throws IOException {
- }
}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index b83a3081fb9..3de1b1d9841 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -1,6 +1,4 @@
/**
- * Copyright The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -25,7 +23,6 @@ import com.google.common.collect.LinkedListMultimap;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
-import com.google.common.net.HostAndPort;
import java.io.IOException;
import java.util.ArrayList;
@@ -55,6 +52,7 @@ import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer;
+import org.apache.hadoop.hbase.util.Address;
import org.apache.hadoop.util.ReflectionUtils;
/**
@@ -81,7 +79,9 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
private Configuration config;
private ClusterStatus clusterStatus;
private MasterServices masterServices;
- private RSGroupInfoManager RSGroupInfoManager;
+ // Synchronize on access until we take the time to cmoe up with a finer-grained
+ // locking regime.
+ private volatile RSGroupInfoManager rsGroupInfoManager;
private LoadBalancer internalBalancer;
//used during reflection by LoadBalancerFactory
@@ -91,8 +91,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
//This constructor should only be used for unit testing
@InterfaceAudience.Private
- public RSGroupBasedLoadBalancer(RSGroupInfoManager RSGroupInfoManager) {
- this.RSGroupInfoManager = RSGroupInfoManager;
+ public RSGroupBasedLoadBalancer(RSGroupInfoManager rsGroupInfoManager) {
+ this.rsGroupInfoManager = rsGroupInfoManager;
}
@Override
@@ -142,14 +142,15 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
regionPlans.add(new RegionPlan(regionInfo, null, null));
}
try {
- for (RSGroupInfo info : RSGroupInfoManager.listRSGroups()) {
+ List rsgi = rsGroupInfoManager.listRSGroups();
+ for (RSGroupInfo info: rsgi) {
Map> groupClusterState =
new HashMap>();
Map>> groupClusterLoad =
new HashMap>>();
- for (HostAndPort sName : info.getServers()) {
+ for (Address sName : info.getServers()) {
for(ServerName curr: clusterState.keySet()) {
- if(curr.getHostPort().equals(sName)) {
+ if(curr.getAddress().equals(sName)) {
groupClusterState.put(curr, correctedState.get(curr));
}
}
@@ -199,7 +200,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
Set misplacedRegions = getMisplacedRegions(regions);
for (HRegionInfo region : regions.keySet()) {
if (!misplacedRegions.contains(region)) {
- String groupName = RSGroupInfoManager.getRSGroupOfTable(region.getTable());
+ String groupName = rsGroupInfoManager.getRSGroupOfTable(region.getTable());
groupToRegion.put(groupName, region);
}
}
@@ -208,7 +209,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
for (String key : groupToRegion.keySet()) {
Map currentAssignmentMap = new TreeMap();
List regionList = groupToRegion.get(key);
- RSGroupInfo info = RSGroupInfoManager.getRSGroup(key);
+ RSGroupInfo info = rsGroupInfoManager.getRSGroup(key);
List candidateList = filterOfflineServers(info, servers);
for (HRegionInfo region : regionList) {
currentAssignmentMap.put(region, regions.get(region));
@@ -220,9 +221,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
}
for (HRegionInfo region : misplacedRegions) {
- String groupName = RSGroupInfoManager.getRSGroupOfTable(
- region.getTable());
- RSGroupInfo info = RSGroupInfoManager.getRSGroup(groupName);
+ String groupName = rsGroupInfoManager.getRSGroupOfTable(region.getTable());;
+ RSGroupInfo info = rsGroupInfoManager.getRSGroup(groupName);
List candidateList = filterOfflineServers(info, servers);
ServerName server = this.internalBalancer.randomAssignment(region,
candidateList);
@@ -262,14 +262,14 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
ListMultimap serverMap) throws HBaseIOException {
try {
for (HRegionInfo region : regions) {
- String groupName = RSGroupInfoManager.getRSGroupOfTable(region.getTable());
- if(groupName == null) {
+ String groupName = rsGroupInfoManager.getRSGroupOfTable(region.getTable());
+ if (groupName == null) {
LOG.warn("Group for table "+region.getTable()+" is null");
}
regionMap.put(groupName, region);
}
for (String groupKey : regionMap.keySet()) {
- RSGroupInfo info = RSGroupInfoManager.getRSGroup(groupKey);
+ RSGroupInfo info = rsGroupInfoManager.getRSGroup(groupKey);
serverMap.putAll(groupKey, filterOfflineServers(info, servers));
if(serverMap.get(groupKey).size() < 1) {
serverMap.put(groupKey, LoadBalancer.BOGUS_SERVER_NAME);
@@ -285,7 +285,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
if (RSGroupInfo != null) {
return filterServers(RSGroupInfo.getServers(), onlineServers);
} else {
- LOG.debug("Group Information found to be null. Some regions might be unassigned.");
+ LOG.warn("RSGroup Information found to be null. Some regions might be unassigned.");
return Collections.EMPTY_LIST;
}
}
@@ -299,12 +299,12 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
* List of servers which are online.
* @return the list
*/
- private List filterServers(Collection servers,
+ private List filterServers(Collection servers,
Collection onlineServers) {
ArrayList finalList = new ArrayList();
- for (HostAndPort server : servers) {
+ for (Address server : servers) {
for(ServerName curr: onlineServers) {
- if(curr.getHostPort().equals(server)) {
+ if(curr.getAddress().equals(server)) {
finalList.add(curr);
}
}
@@ -312,31 +312,23 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
return finalList;
}
- private ListMultimap groupRegions(
- List regionList) throws IOException {
- ListMultimap regionGroup = ArrayListMultimap
- .create();
- for (HRegionInfo region : regionList) {
- String groupName = RSGroupInfoManager.getRSGroupOfTable(region.getTable());
- regionGroup.put(groupName, region);
- }
- return regionGroup;
- }
-
private Set getMisplacedRegions(
Map regions) throws IOException {
Set misplacedRegions = new HashSet();
for(Map.Entry region : regions.entrySet()) {
HRegionInfo regionInfo = region.getKey();
ServerName assignedServer = region.getValue();
- RSGroupInfo info =
- RSGroupInfoManager.getRSGroup(RSGroupInfoManager.getRSGroupOfTable(regionInfo.getTable()));
+ RSGroupInfo info = rsGroupInfoManager.getRSGroup(rsGroupInfoManager.
+ getRSGroupOfTable(regionInfo.getTable()));
if (assignedServer != null &&
- (info == null || !info.containsServer(assignedServer.getHostPort()))) {
+ (info == null || !info.containsServer(assignedServer.getAddress()))) {
+ RSGroupInfo otherInfo = null;
+ synchronized (this.rsGroupInfoManager) {
+ otherInfo = rsGroupInfoManager.getRSGroupOfServer(assignedServer.getAddress());
+ }
LOG.debug("Found misplaced region: " + regionInfo.getRegionNameAsString() +
" on server: " + assignedServer +
- " found in group: " +
- RSGroupInfoManager.getRSGroupOfServer(assignedServer.getHostPort()) +
+ " found in group: " + otherInfo +
" outside of group: " + (info == null ? "UNKNOWN" : info.getName()));
misplacedRegions.add(regionInfo);
}
@@ -357,13 +349,13 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
for (HRegionInfo region : regions) {
RSGroupInfo info = null;
try {
- info = RSGroupInfoManager.getRSGroup(
- RSGroupInfoManager.getRSGroupOfTable(region.getTable()));
+ info = rsGroupInfoManager.getRSGroup(
+ rsGroupInfoManager.getRSGroupOfTable(region.getTable()));
} catch (IOException exp) {
- LOG.debug("Group information null for region of table " + region.getTable(),
+ LOG.debug("RSGroup information null for region of table " + region.getTable(),
exp);
}
- if ((info == null) || (!info.containsServer(sName.getHostPort()))) {
+ if ((info == null) || (!info.containsServer(sName.getAddress()))) {
correctAssignments.get(LoadBalancer.BOGUS_SERVER_NAME).add(region);
} else {
correctAssignments.get(sName).add(region);
@@ -382,7 +374,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
@Override
public void initialize() throws HBaseIOException {
try {
- if (RSGroupInfoManager == null) {
+ if (rsGroupInfoManager == null) {
List cps =
masterServices.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class);
if (cps.size() != 1) {
@@ -390,7 +382,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
LOG.error(msg);
throw new HBaseIOException(msg);
}
- RSGroupInfoManager = cps.get(0).getGroupInfoManager();
+ rsGroupInfoManager = cps.get(0).getGroupInfoManager();
}
} catch (IOException e) {
throw new HBaseIOException("Failed to initialize GroupInfoManagerImpl", e);
@@ -408,7 +400,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
}
public boolean isOnline() {
- return RSGroupInfoManager != null && RSGroupInfoManager.isOnline();
+ if (this.rsGroupInfoManager == null) return false;
+ return this.rsGroupInfoManager.isOnline();
}
@Override
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
index ce6699f20b4..d5a971cec45 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
@@ -1,6 +1,4 @@
/**
- * Copyright The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -20,7 +18,6 @@
package org.apache.hadoop.hbase.rsgroup;
-import com.google.common.net.HostAndPort;
import java.io.IOException;
import java.util.List;
@@ -30,6 +27,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.Address;
/**
* Interface used to manage RSGroupInfo storage. An implementation
@@ -65,14 +63,14 @@ public interface RSGroupInfoManager {
void removeRSGroup(String groupName) throws IOException;
/**
- * move servers to a new group.
- * @param hostPorts list of servers, must be part of the same group
+ * Move servers to a new group.
+ * @param servers list of servers, must be part of the same group
* @param srcGroup groupName being moved from
* @param dstGroup groupName being moved to
- * @return Set of servers moved (May be a subset of {@code hostPorts}).
+ * @return Set of servers moved (May be a subset of {@code servers}).
* @throws java.io.IOException on move failure
*/
- Set moveServers(Set hostPorts,
+ Set moveServers(Set servers,
String srcGroup, String dstGroup) throws IOException;
/**
@@ -81,7 +79,7 @@ public interface RSGroupInfoManager {
* @param hostPort the server
* @return An instance of RSGroupInfo
*/
- RSGroupInfo getRSGroupOfServer(HostAndPort hostPort) throws IOException;
+ RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException;
/**
* Gets the group information.
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index fccf7bf8ee4..a89d9ed9ec7 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -1,6 +1,4 @@
/**
- * Copyright The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -20,16 +18,11 @@
package org.apache.hadoop.hbase.rsgroup;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import com.google.common.net.HostAndPort;
-import com.google.protobuf.ServiceException;
+import static org.apache.hadoop.hbase.rsgroup.Utility.getOnlineServers;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
-import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
@@ -37,12 +30,12 @@ import java.util.List;
import java.util.Map;
import java.util.NavigableSet;
import java.util.Set;
+import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.Coprocessor;
@@ -52,9 +45,9 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.ProcedureInfo;
import org.apache.hadoop.hbase.MetaTableAccessor.DefaultVisitorBase;
import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ProcedureInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -74,23 +67,40 @@ import org.apache.hadoop.hbase.master.ServerListener;
import org.apache.hadoop.hbase.master.TableStateManager;
import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
import org.apache.hadoop.hbase.security.access.AccessControlLists;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.util.Address;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException;
-import static org.apache.hadoop.hbase.rsgroup.Utility.getOnlineServers;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import com.google.protobuf.ServiceException;
/**
- * This is an implementation of {@link RSGroupInfoManager}. Which makes
+ * This is an implementation of {@link RSGroupInfoManager} which makes
* use of an HBase table as the persistence store for the group information.
* It also makes use of zookeeper to store group information needed
* for bootstrapping during offline mode.
+ *
+ * Concurrency
+ * All methods are synchronized to protect against concurrent access on contained
+ * Maps and so as only one writer at a time to the backing zookeeper cache and rsgroup table.
+ *
+ * Clients of this class, the {@link RSGroupAdminEndpoint} for example, want to query and
+ * then act on the results of the query modifying cache in zookeeper without another thread
+ * making intermediate modifications. These clients synchronize on the 'this' instance so
+ * no other has access concurrently.
+ *
+ * TODO: Spend time cleaning up this coarse locking that is prone to error if not carefully
+ * enforced everywhere.
*/
@InterfaceAudience.Private
public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListener {
@@ -111,18 +121,18 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
}
}
- private volatile Map rsGroupMap;
- private volatile Map tableMap;
- private MasterServices master;
+ private Map rsGroupMap;
+ private Map tableMap;
+ private final MasterServices master;
private Table rsGroupTable;
- private ClusterConnection conn;
- private ZooKeeperWatcher watcher;
+ private final ClusterConnection conn;
+ private final ZooKeeperWatcher watcher;
private RSGroupStartupWorker rsGroupStartupWorker;
// contains list of groups that were last flushed to persistent store
- private volatile Set prevRSGroups;
- private RSGroupSerDe rsGroupSerDe;
+ private Set prevRSGroups;
+ private final RSGroupSerDe rsGroupSerDe;
private DefaultServerUpdater defaultServerUpdater;
- private boolean isInit = false;
+ private boolean init = false;
public RSGroupInfoManagerImpl(MasterServices master) throws IOException {
@@ -135,18 +145,18 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
prevRSGroups = new HashSet();
}
- public void init() throws IOException{
+ public synchronized void init() throws IOException{
rsGroupStartupWorker = new RSGroupStartupWorker(this, master, conn);
refresh();
rsGroupStartupWorker.start();
defaultServerUpdater = new DefaultServerUpdater(this);
master.getServerManager().registerListener(this);
defaultServerUpdater.start();
- isInit = true;
+ init = true;
}
- boolean isInit() {
- return isInit;
+ synchronized boolean isInit() {
+ return init;
}
/**
@@ -166,43 +176,48 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
flushConfig(newGroupMap);
}
+ private RSGroupInfo getRSGroupInfo(final String groupName) throws DoNotRetryIOException {
+ RSGroupInfo rsgi = null;
+ try {
+ rsgi = getRSGroup(groupName);
+ } catch (IOException ioe) {
+ // Will never happen
+ throw new DoNotRetryIOException(ioe);
+ }
+ if (rsgi == null) {
+ throw new DoNotRetryIOException("RSGroup " + groupName + " does not exist");
+ }
+ return rsgi;
+ }
+
@Override
- public synchronized Set moveServers(Set hostPorts,
- String srcGroup, String dstGroup)
+ public synchronized Set moveServers(Set servers, String srcGroup, String dstGroup)
throws IOException {
- if (!rsGroupMap.containsKey(srcGroup)) {
- throw new DoNotRetryIOException("RSGroup " + srcGroup + " does not exist");
- }
- if (!rsGroupMap.containsKey(dstGroup)) {
- throw new DoNotRetryIOException("RSGroup " + dstGroup + " does not exist");
- }
- RSGroupInfo src = new RSGroupInfo(getRSGroup(srcGroup));
- RSGroupInfo dst = new RSGroupInfo(getRSGroup(dstGroup));
- // If destination is 'default' rsgroup, make sure servers is online.
- // If not, just drop it.
- Set onlineServers = dst.getName().equals(RSGroupInfo.DEFAULT_GROUP)?
+ RSGroupInfo src = getRSGroupInfo(srcGroup);
+ RSGroupInfo dst = getRSGroupInfo(dstGroup);
+ // If destination is 'default' rsgroup, only add servers that are online. If not online, drop it.
+ // If not 'default' group, add server to dst group EVEN IF IT IS NOT online (could be a group
+ // of dead servers that are to come back later).
+ Set onlineServers = dst.getName().equals(RSGroupInfo.DEFAULT_GROUP)?
getOnlineServers(this.master): null;
- Set result = new HashSet<>(hostPorts.size());
- for (HostAndPort el: hostPorts) {
+ for (Address el: servers) {
src.removeServer(el);
if (onlineServers != null) {
- // onlineServers is non-null if 'default' rsgroup.
- // If the server is not online, drop it.
+ // onlineServers is non-null if 'default' rsgroup. If the server is not online, drop it.
if (!onlineServers.contains(el)) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Dropping " + el + " during move-to-default rsgroup because it is not online");
+ LOG.debug("Dropping " + el + " during move-to-default rsgroup because not online");
}
continue;
}
}
dst.addServer(el);
- result.add(el);
}
Map newGroupMap = Maps.newHashMap(rsGroupMap);
newGroupMap.put(src.getName(), src);
newGroupMap.put(dst.getName(), dst);
flushConfig(newGroupMap);
- return result;
+ return dst.getServers();
}
/**
@@ -212,9 +227,10 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
* @return An instance of GroupInfo.
*/
@Override
- public RSGroupInfo getRSGroupOfServer(HostAndPort hostPort) throws IOException {
+ public synchronized RSGroupInfo getRSGroupOfServer(Address hostPort)
+ throws IOException {
for (RSGroupInfo info : rsGroupMap.values()) {
- if (info.containsServer(hostPort)){
+ if (info.containsServer(hostPort)) {
return info;
}
}
@@ -229,15 +245,14 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
* @return An instance of GroupInfo
*/
@Override
- public RSGroupInfo getRSGroup(String groupName) throws IOException {
- RSGroupInfo RSGroupInfo = rsGroupMap.get(groupName);
- return RSGroupInfo;
+ public synchronized RSGroupInfo getRSGroup(String groupName) throws IOException {
+ return this.rsGroupMap.get(groupName);
}
@Override
- public String getRSGroupOfTable(TableName tableName) throws IOException {
+ public synchronized String getRSGroupOfTable(TableName tableName) throws IOException {
return tableMap.get(tableName);
}
@@ -283,18 +298,17 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
}
@Override
- public List listRSGroups() throws IOException {
- List list = Lists.newLinkedList(rsGroupMap.values());
- return list;
+ public synchronized List listRSGroups() throws IOException {
+ return Lists.newLinkedList(rsGroupMap.values());
}
@Override
- public boolean isOnline() {
+ public synchronized boolean isOnline() {
return rsGroupStartupWorker.isOnline();
}
@Override
- public synchronized void refresh() throws IOException {
+ public void refresh() throws IOException {
refresh(false);
}
@@ -346,10 +360,9 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
// so it overwrites the default group loaded
// from region group table or zk
groupList.add(new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP,
- Sets.newHashSet(getDefaultServers()),
+ Sets.newTreeSet(getDefaultServers()),
orphanTables));
-
// populate the data
HashMap newGroupMap = Maps.newHashMap();
HashMap newTableMap = Maps.newHashMap();
@@ -395,7 +408,11 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
}
return newTableMap;
}
+ private synchronized void flushConfig() throws IOException {
+ flushConfig(rsGroupMap);
+ }
+ // Called from RSGroupStartupWorker thread so synchronize
private synchronized void flushConfig(Map newGroupMap) throws IOException {
Map newTableMap;
@@ -457,6 +474,7 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
prevRSGroups.addAll(newGroupMap.keySet());
}
+ // Called by getDefaultServers. Presume it has lock in place.
private List getOnlineRS() throws IOException {
if (master != null) {
return master.getServerManager().getOnlineServersList();
@@ -473,29 +491,31 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
}
}
- private List getDefaultServers() throws IOException {
- List defaultServers = new LinkedList();
- for(ServerName server : getOnlineRS()) {
- HostAndPort hostPort = HostAndPort.fromParts(server.getHostname(), server.getPort());
+ // Called by DefaultServerUpdater. Presume it has lock on this manager when it runs.
+ private SortedSet getDefaultServers() throws IOException {
+ SortedSet defaultServers = Sets.newTreeSet();
+ for (ServerName server : getOnlineRS()) {
+ Address hostPort = Address.fromParts(server.getHostname(), server.getPort());
boolean found = false;
- for(RSGroupInfo RSGroupInfo : rsGroupMap.values()) {
+ for(RSGroupInfo RSGroupInfo: listRSGroups()) {
if(!RSGroupInfo.DEFAULT_GROUP.equals(RSGroupInfo.getName()) &&
RSGroupInfo.containsServer(hostPort)) {
found = true;
break;
}
}
- if(!found) {
+ if (!found) {
defaultServers.add(hostPort);
}
}
return defaultServers;
}
- private synchronized void updateDefaultServers(
- Set hostPort) throws IOException {
+ // Called by DefaultServerUpdater. Synchronize on this because redoing
+ // the rsGroupMap then writing it out.
+ private synchronized void updateDefaultServers(SortedSet servers) throws IOException {
RSGroupInfo info = rsGroupMap.get(RSGroupInfo.DEFAULT_GROUP);
- RSGroupInfo newInfo = new RSGroupInfo(info.getName(), hostPort, info.getTables());
+ RSGroupInfo newInfo = new RSGroupInfo(info.getName(), servers, info.getTables());
HashMap newGroupMap = Maps.newHashMap(rsGroupMap);
newGroupMap.put(newInfo.getName(), newInfo);
flushConfig(newGroupMap);
@@ -503,44 +523,40 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
@Override
public void serverAdded(ServerName serverName) {
+ // #serverChanged is internally synchronized
defaultServerUpdater.serverChanged();
}
@Override
public void serverRemoved(ServerName serverName) {
+ // #serverChanged is internally synchronized
defaultServerUpdater.serverChanged();
}
private static class DefaultServerUpdater extends Thread {
private static final Log LOG = LogFactory.getLog(DefaultServerUpdater.class);
- private RSGroupInfoManagerImpl mgr;
+ private final RSGroupInfoManagerImpl mgr;
private boolean hasChanged = false;
public DefaultServerUpdater(RSGroupInfoManagerImpl mgr) {
+ super("RSGroup.ServerUpdater");
+ setDaemon(true);
this.mgr = mgr;
}
@Override
public void run() {
- List prevDefaultServers = new LinkedList();
- while(!mgr.master.isAborted() || !mgr.master.isStopped()) {
+ SortedSet prevDefaultServers = new TreeSet<>();
+ while(isMasterRunning(this.mgr.master)) {
try {
LOG.info("Updating default servers.");
- List servers = mgr.getDefaultServers();
- Collections.sort(servers, new Comparator() {
- @Override
- public int compare(HostAndPort o1, HostAndPort o2) {
- int diff = o1.getHostText().compareTo(o2.getHostText());
- if (diff != 0) {
- return diff;
- }
- return o1.getPort() - o2.getPort();
+ synchronized (this.mgr) {
+ SortedSet servers = mgr.getDefaultServers();
+ if (!servers.equals(prevDefaultServers)) {
+ mgr.updateDefaultServers(servers);
+ prevDefaultServers = servers;
+ LOG.info("Updated with servers: "+servers.size());
}
- });
- if(!servers.equals(prevDefaultServers)) {
- mgr.updateDefaultServers(Sets.newHashSet(servers));
- prevDefaultServers = servers;
- LOG.info("Updated with servers: "+servers.size());
}
try {
synchronized (this) {
@@ -550,6 +566,7 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
hasChanged = false;
}
} catch (InterruptedException e) {
+ LOG.warn("Interrupted", e);
}
} catch (IOException e) {
LOG.warn("Failed to update default servers", e);
@@ -565,20 +582,16 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
}
}
-
private static class RSGroupStartupWorker extends Thread {
private static final Log LOG = LogFactory.getLog(RSGroupStartupWorker.class);
-
- private Configuration conf;
- private volatile boolean isOnline = false;
- private MasterServices masterServices;
- private RSGroupInfoManagerImpl groupInfoManager;
- private ClusterConnection conn;
+ private volatile boolean online = false;
+ private final MasterServices masterServices;
+ private final RSGroupInfoManagerImpl groupInfoManager;
+ private final ClusterConnection conn;
public RSGroupStartupWorker(RSGroupInfoManagerImpl groupInfoManager,
MasterServices masterServices,
ClusterConnection conn) {
- this.conf = masterServices.getConfiguration();
this.masterServices = masterServices;
this.groupInfoManager = groupInfoManager;
this.conn = conn;
@@ -588,7 +601,7 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
@Override
public void run() {
- if(waitForGroupTableOnline()) {
+ if (waitForGroupTableOnline()) {
LOG.info("GroupBasedLoadBalancer is now online");
}
}
@@ -599,25 +612,21 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
final AtomicBoolean found = new AtomicBoolean(false);
final TableStateManager tsm = masterServices.getTableStateManager();
boolean createSent = false;
- while (!found.get() && isMasterRunning()) {
+ while (!found.get() && isMasterRunning(this.masterServices)) {
foundRegions.clear();
assignedRegions.clear();
found.set(true);
try {
- final Table nsTable = conn.getTable(TableName.NAMESPACE_TABLE_NAME);
- final Table groupTable = conn.getTable(RSGROUP_TABLE_NAME);
+ conn.getTable(TableName.NAMESPACE_TABLE_NAME);
+ conn.getTable(RSGROUP_TABLE_NAME);
boolean rootMetaFound =
masterServices.getMetaTableLocator().verifyMetaRegionLocation(
- conn,
- masterServices.getZooKeeper(),
- 1);
+ conn, masterServices.getZooKeeper(), 1);
final AtomicBoolean nsFound = new AtomicBoolean(false);
if (rootMetaFound) {
-
MetaTableAccessor.Visitor visitor = new DefaultVisitorBase() {
@Override
public boolean visitInternal(Result row) throws IOException {
-
HRegionInfo info = MetaTableAccessor.getHRegionInfo(row);
if (info != null) {
Cell serverCell =
@@ -670,12 +679,12 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
MetaTableAccessor.fullScanRegions(conn, visitor);
// if no regions in meta then we have to create the table
if (foundRegions.size() < 1 && rootMetaFound && !createSent && nsFound.get()) {
- groupInfoManager.createGroupTable(masterServices);
+ groupInfoManager.createRSGroupTable(masterServices);
createSent = true;
}
- LOG.info("Group table: " + RSGROUP_TABLE_NAME + " isOnline: " + found.get()
- + ", regionCount: " + foundRegions.size() + ", assignCount: "
- + assignedRegions.size() + ", rootMetaFound: "+rootMetaFound);
+ LOG.info("RSGroup table=" + RSGROUP_TABLE_NAME + " isOnline=" + found.get()
+ + ", regionCount=" + foundRegions.size() + ", assignCount="
+ + assignedRegions.size() + ", rootMetaFound=" + rootMetaFound);
found.set(found.get() && assignedRegions.size() == foundRegions.size()
&& foundRegions.size() > 0);
} else {
@@ -685,9 +694,9 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
if (found.get()) {
LOG.debug("With group table online, refreshing cached information.");
groupInfoManager.refresh(true);
- isOnline = true;
+ online = true;
//flush any inconsistencies between ZK and HTable
- groupInfoManager.flushConfig(groupInfoManager.rsGroupMap);
+ groupInfoManager.flushConfig();
}
} catch (RuntimeException e) {
throw e;
@@ -705,15 +714,15 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
}
public boolean isOnline() {
- return isOnline;
- }
-
- private boolean isMasterRunning() {
- return !masterServices.isAborted() && !masterServices.isStopped();
+ return online;
}
}
- private void createGroupTable(MasterServices masterServices) throws IOException {
+ private static boolean isMasterRunning(MasterServices masterServices) {
+ return !masterServices.isAborted() && !masterServices.isStopped();
+ }
+
+ private void createRSGroupTable(MasterServices masterServices) throws IOException {
Long procId = masterServices.createSystemTable(RSGROUP_TABLE_DESC);
// wait for region to be online
int tries = 600;
@@ -772,4 +781,4 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
throw new ConstraintException("Group name should only contain alphanumeric characters");
}
}
-}
+}
\ No newline at end of file
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupSerDe.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupSerDe.java
index dfc566e58ff..3cea93993c6 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupSerDe.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupSerDe.java
@@ -1,6 +1,4 @@
/**
- * Copyright The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -21,7 +19,6 @@
package org.apache.hadoop.hbase.rsgroup;
import com.google.common.collect.Lists;
-import com.google.common.net.HostAndPort;
import java.io.ByteArrayInputStream;
import java.io.IOException;
@@ -30,15 +27,16 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
+import org.apache.hadoop.hbase.util.Address;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException;
@@ -94,7 +92,7 @@ public class RSGroupSerDe {
public static RSGroupInfo toGroupInfo(RSGroupProtos.RSGroupInfo proto) {
RSGroupInfo RSGroupInfo = new RSGroupInfo(proto.getName());
for(HBaseProtos.ServerName el: proto.getServersList()) {
- RSGroupInfo.addServer(HostAndPort.fromParts(el.getHostName(), el.getPort()));
+ RSGroupInfo.addServer(Address.fromParts(el.getHostName(), el.getPort()));
}
for(HBaseProtos.TableName pTableName: proto.getTablesList()) {
RSGroupInfo.addTable(ProtobufUtil.toTableName(pTableName));
@@ -110,9 +108,9 @@ public class RSGroupSerDe {
}
List hostports =
new ArrayList(pojo.getServers().size());
- for(HostAndPort el: pojo.getServers()) {
+ for(Address el: pojo.getServers()) {
hostports.add(HBaseProtos.ServerName.newBuilder()
- .setHostName(el.getHostText())
+ .setHostName(el.getHostname())
.setPort(el.getPort())
.build());
}
@@ -120,4 +118,4 @@ public class RSGroupSerDe {
.addAllServers(hostports)
.addAllTables(tables).build();
}
-}
\ No newline at end of file
+}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java
index ec86ddad1a7..bff392b94b0 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java
@@ -1,6 +1,4 @@
/**
- * Copyright The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -23,7 +21,9 @@ package org.apache.hadoop.hbase.rsgroup;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.master.LoadBalancer;
-
+/**
+ * Marker Interface. RSGroups feature will check for a LoadBalancer
+ * marked with this Interface before it runs.
+ */
@InterfaceAudience.Private
-public interface RSGroupableBalancer extends LoadBalancer {
-}
+public interface RSGroupableBalancer extends LoadBalancer {}
\ No newline at end of file
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/Utility.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/Utility.java
index d9c753238dd..95ad05e0b89 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/Utility.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/Utility.java
@@ -25,8 +25,7 @@ import java.util.Set;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.master.MasterServices;
-
-import com.google.common.net.HostAndPort;
+import org.apache.hadoop.hbase.util.Address;
/**
* Utility for this RSGroup package in hbase-rsgroup.
@@ -37,11 +36,11 @@ class Utility {
* @param master
* @return Set of online Servers named for their hostname and port (not ServerName).
*/
- static Set getOnlineServers(final MasterServices master) {
- Set onlineServers = new HashSet();
+ static Set getOnlineServers(final MasterServices master) {
+ Set onlineServers = new HashSet();
if (master == null) return onlineServers;
for(ServerName server: master.getServerManager().getOnlineServers().keySet()) {
- onlineServers.add(server.getHostPort());
+ onlineServers.add(server.getAddress());
}
return onlineServers;
}
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java
index 1539f733acc..73cf85a0657 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java
@@ -1,6 +1,4 @@
/**
- * Copyright The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -21,7 +19,6 @@ package org.apache.hadoop.hbase.master.balancer;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Lists;
-import com.google.common.net.HostAndPort;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -40,6 +37,7 @@ import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Address;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -176,12 +174,11 @@ public class TestRSGroupBasedLoadBalancer {
ServerName server = assignments.get(region);
TableName tableName = region.getTable();
- String groupName =
- getMockedGroupInfoManager().getRSGroupOfTable(tableName);
+ String groupName = getMockedGroupInfoManager().getRSGroupOfTable(tableName);
assertTrue(StringUtils.isNotEmpty(groupName));
RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(groupName);
assertTrue("Region is not correctly assigned to group servers.",
- gInfo.containsServer(server.getHostPort()));
+ gInfo.containsServer(server.getAddress()));
}
}
@@ -216,7 +213,7 @@ public class TestRSGroupBasedLoadBalancer {
groupName);
assertTrue(
"Region is not correctly assigned to group servers.",
- gInfo.containsServer(sn.getHostPort()));
+ gInfo.containsServer(sn.getAddress()));
}
}
ArrayListMultimap loadMap = convertToGroupBasedMap(assignments);
@@ -295,15 +292,15 @@ public class TestRSGroupBasedLoadBalancer {
groupName);
assertTrue(
"Region is not correctly assigned to group servers.",
- gInfo.containsServer(currentServer.getHostPort()));
+ gInfo.containsServer(currentServer.getAddress()));
if (oldAssignedServer != null
&& onlineHostNames.contains(oldAssignedServer
.getHostname())) {
// this region was previously assigned somewhere, and that
// host is still around, then the host must have been is a
// different group.
- if (!oldAssignedServer.getHostPort().equals(currentServer.getHostPort())) {
- assertFalse(gInfo.containsServer(oldAssignedServer.getHostPort()));
+ if (!oldAssignedServer.getAddress().equals(currentServer.getAddress())) {
+ assertFalse(gInfo.containsServer(oldAssignedServer.getAddress()));
}
}
}
@@ -346,11 +343,11 @@ public class TestRSGroupBasedLoadBalancer {
ArrayListMultimap loadMap = ArrayListMultimap
.create();
for (RSGroupInfo gInfo : getMockedGroupInfoManager().listRSGroups()) {
- Set groupServers = gInfo.getServers();
- for (HostAndPort hostPort : groupServers) {
+ Set groupServers = gInfo.getServers();
+ for (Address hostPort : groupServers) {
ServerName actual = null;
for(ServerName entry: servers) {
- if(entry.getHostPort().equals(hostPort)) {
+ if(entry.getAddress().equals(hostPort)) {
actual = entry;
break;
}
@@ -486,14 +483,14 @@ public class TestRSGroupBasedLoadBalancer {
Map groupMap = new HashMap();
for (String grpName : groups) {
RSGroupInfo RSGroupInfo = new RSGroupInfo(grpName);
- RSGroupInfo.addServer(servers.get(index).getHostPort());
+ RSGroupInfo.addServer(servers.get(index).getAddress());
groupMap.put(grpName, RSGroupInfo);
index++;
}
while (index < servers.size()) {
int grpIndex = rand.nextInt(groups.length);
groupMap.get(groups[grpIndex]).addServer(
- servers.get(index).getHostPort());
+ servers.get(index).getAddress());
index++;
}
return groupMap;
@@ -558,7 +555,7 @@ public class TestRSGroupBasedLoadBalancer {
RSGroupInfoManager gm = getMockedGroupInfoManager();
RSGroupInfo groupOfServer = null;
for(RSGroupInfo gInfo : gm.listRSGroups()){
- if(gInfo.containsServer(sn.getHostPort())){
+ if(gInfo.containsServer(sn.getAddress())){
groupOfServer = gInfo;
break;
}
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
index 957994f9fa4..94a76a8773f 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
@@ -1,6 +1,4 @@
/**
- * Copyright The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -19,9 +17,13 @@
*/
package org.apache.hadoop.hbase.rsgroup;
-import com.google.common.collect.Sets;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.Iterator;
-import com.google.common.net.HostAndPort;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -41,6 +43,7 @@ import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Address;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.AfterClass;
@@ -50,18 +53,13 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-import java.io.IOException;
-import java.util.Iterator;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import com.google.common.collect.Sets;
@Category({MediumTests.class})
public class TestRSGroups extends TestRSGroupsBase {
protected static final Log LOG = LogFactory.getLog(TestRSGroups.class);
private static HMaster master;
- private static boolean init = false;
+ private static boolean INIT = false;
private static RSGroupAdminEndpoint RSGroupAdminEndpoint;
@@ -93,8 +91,8 @@ public class TestRSGroups extends TestRSGroupsBase {
}
});
admin.setBalancerRunning(false,true);
- rsGroupAdmin = new VerifyingRSGroupAdminClient(rsGroupAdmin.newClient(TEST_UTIL.getConnection()),
- TEST_UTIL.getConfiguration());
+ rsGroupAdmin = new VerifyingRSGroupAdminClient(
+ new RSGroupAdminClient(TEST_UTIL.getConnection()), TEST_UTIL.getConfiguration());
RSGroupAdminEndpoint =
master.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class).get(0);
}
@@ -106,8 +104,8 @@ public class TestRSGroups extends TestRSGroupsBase {
@Before
public void beforeMethod() throws Exception {
- if(!init) {
- init = true;
+ if (!INIT) {
+ INIT = true;
afterMethod();
}
@@ -130,11 +128,9 @@ public class TestRSGroups extends TestRSGroupsBase {
((MiniHBaseCluster)cluster).getMaster().getServerName();
try {
- rsGroupAdmin.moveServers(
- Sets.newHashSet(masterServerName.getHostPort()),
- "master");
+ rsGroupAdmin.moveServers(Sets.newHashSet(masterServerName.getAddress()), "master");
} catch (Exception ex) {
- // ignore
+ LOG.warn("Got this on setup, FYI", ex);
}
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
@Override
@@ -241,10 +237,13 @@ public class TestRSGroups extends TestRSGroupsBase {
@Test
public void testGroupInfoMultiAccessing() throws Exception {
RSGroupInfoManager manager = RSGroupAdminEndpoint.getGroupInfoManager();
- final RSGroupInfo defaultGroup = manager.getRSGroup("default");
+ RSGroupInfo defaultGroup = null;
+ synchronized (manager) {
+ defaultGroup = manager.getRSGroup("default");
+ }
// getRSGroup updates default group's server list
// this process must not affect other threads iterating the list
- Iterator it = defaultGroup.getServers().iterator();
+ Iterator it = defaultGroup.getServers().iterator();
manager.getRSGroup("default");
it.next();
}
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index 991f6e130b6..9ad53e4a33a 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -1,6 +1,4 @@
/**
- * Copyright The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -19,9 +17,21 @@
*/
package org.apache.hadoop.hbase.rsgroup;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import com.google.common.net.HostAndPort;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.security.SecureRandom;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.ClusterStatus;
@@ -40,23 +50,13 @@ import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.util.Address;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Assert;
import org.junit.Test;
-import java.io.IOException;
-import java.security.SecureRandom;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
public abstract class TestRSGroupsBase {
protected static final Log LOG = LogFactory.getLog(TestRSGroupsBase.class);
@@ -85,8 +85,8 @@ public abstract class TestRSGroupsBase {
assertTrue(defaultInfo.getServers().size() >= serverCount);
gAdmin.addRSGroup(groupName);
- Set set = new HashSet();
- for(HostAndPort server: defaultInfo.getServers()) {
+ Set set = new HashSet();
+ for(Address server: defaultInfo.getServers()) {
if(set.size() == serverCount) {
break;
}
@@ -120,7 +120,8 @@ public abstract class TestRSGroupsBase {
}
protected void deleteGroups() throws IOException {
- RSGroupAdmin groupAdmin = rsGroupAdmin.newClient(TEST_UTIL.getConnection());
+ RSGroupAdmin groupAdmin =
+ new RSGroupAdminClient(TEST_UTIL.getConnection());
for(RSGroupInfo group: groupAdmin.listRSGroups()) {
if(!group.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
groupAdmin.moveTables(group.getTables(), RSGroupInfo.DEFAULT_GROUP);
@@ -167,7 +168,7 @@ public abstract class TestRSGroupsBase {
@Test
public void testBogusArgs() throws Exception {
assertNull(rsGroupAdmin.getRSGroupInfoOfTable(TableName.valueOf("nonexistent")));
- assertNull(rsGroupAdmin.getRSGroupOfServer(HostAndPort.fromParts("bogus",123)));
+ assertNull(rsGroupAdmin.getRSGroupOfServer(Address.fromParts("bogus",123)));
assertNull(rsGroupAdmin.getRSGroupInfo("bogus"));
try {
@@ -185,7 +186,7 @@ public abstract class TestRSGroupsBase {
}
try {
- rsGroupAdmin.moveServers(Sets.newHashSet(HostAndPort.fromParts("bogus",123)), "bogus");
+ rsGroupAdmin.moveServers(Sets.newHashSet(Address.fromParts("bogus",123)), "bogus");
fail("Expected move with bogus group to fail");
} catch(ConstraintException ex) {
//expected
@@ -276,10 +277,10 @@ public abstract class TestRSGroupsBase {
//test fail bogus server move
try {
- rsGroupAdmin.moveServers(Sets.newHashSet(HostAndPort.fromString("foo:9999")),"foo");
+ rsGroupAdmin.moveServers(Sets.newHashSet(Address.fromString("foo:9999")),"foo");
fail("Bogus servers shouldn't have been successfully moved.");
} catch(IOException ex) {
- String exp = "Server foo:9999 does not have a rsgroup";
+ String exp = "Source RSGroup for server foo:9999 does not exist.";
String msg = "Expected '"+exp+"' in exception message: ";
assertTrue(msg+" "+ex.getMessage(), ex.getMessage().contains(exp));
}
@@ -354,7 +355,7 @@ public abstract class TestRSGroupsBase {
int count = 0;
if (serverMap != null) {
for (ServerName rs : serverMap.keySet()) {
- if (newGroup.containsServer(rs.getHostPort())) {
+ if (newGroup.containsServer(rs.getAddress())) {
count += serverMap.get(rs).size();
}
}
@@ -476,7 +477,7 @@ public abstract class TestRSGroupsBase {
//get server which is not a member of new group
ServerName targetServer = null;
for(ServerName server : admin.getClusterStatus().getServers()) {
- if(!newGroup.containsServer(server.getHostPort())) {
+ if(!newGroup.containsServer(server.getAddress())) {
targetServer = server;
break;
}
@@ -486,7 +487,7 @@ public abstract class TestRSGroupsBase {
((ClusterConnection) admin.getConnection()).getAdmin(targetServer);
//move target server to group
- rsGroupAdmin.moveServers(Sets.newHashSet(targetServer.getHostPort()),
+ rsGroupAdmin.moveServers(Sets.newHashSet(targetServer.getAddress()),
newGroup.getName());
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
@Override
@@ -592,7 +593,7 @@ public abstract class TestRSGroupsBase {
return cluster.getClusterStatus().getRegionsInTransition().isEmpty();
}
});
- Set newServers = Sets.newHashSet();
+ Set newServers = Sets.newHashSet();
newServers.add(
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().iterator().next());
rsGroupAdmin.moveServers(newServers, appInfo.getName());
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
index 4a8656c873e..a9157b68941 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java
@@ -1,6 +1,4 @@
/**
- * Copyright The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -105,50 +103,47 @@ public class TestRSGroupsOfflineMode {
//so it gets assigned later
final TableName failoverTable = TableName.valueOf(name.getMethodName());
TEST_UTIL.createTable(failoverTable, Bytes.toBytes("f"));
-
- RSGroupAdmin groupAdmin = RSGroupAdmin.newClient(TEST_UTIL.getConnection());
-
final HRegionServer killRS = ((MiniHBaseCluster)cluster).getRegionServer(0);
final HRegionServer groupRS = ((MiniHBaseCluster)cluster).getRegionServer(1);
final HRegionServer failoverRS = ((MiniHBaseCluster)cluster).getRegionServer(2);
-
String newGroup = "my_group";
+ RSGroupAdmin groupAdmin = new RSGroupAdminClient(TEST_UTIL.getConnection());
groupAdmin.addRSGroup(newGroup);
if(master.getAssignmentManager().getRegionStates().getRegionAssignments()
- .containsValue(failoverRS.getServerName())) {
- for(HRegionInfo regionInfo: hbaseAdmin.getOnlineRegions(failoverRS.getServerName())) {
- hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(),
- Bytes.toBytes(failoverRS.getServerName().getServerName()));
- }
- LOG.info("Waiting for region unassignments on failover RS...");
+ .containsValue(failoverRS.getServerName())) {
+ for(HRegionInfo regionInfo: hbaseAdmin.getOnlineRegions(failoverRS.getServerName())) {
+ hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(),
+ Bytes.toBytes(failoverRS.getServerName().getServerName()));
+ }
+ LOG.info("Waiting for region unassignments on failover RS...");
+ TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
+ @Override
+ public boolean evaluate() throws Exception {
+ return master.getServerManager().getLoad(failoverRS.getServerName())
+ .getRegionsLoad().size() > 0;
+ }
+ });
+
+ //move server to group and make sure all tables are assigned
+ groupAdmin.moveServers(Sets.newHashSet(groupRS.getServerName().getAddress()), newGroup);
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
@Override
public boolean evaluate() throws Exception {
- return master.getServerManager().getLoad(failoverRS.getServerName())
- .getRegionsLoad().size() > 0;
+ return groupRS.getNumberOfOnlineRegions() < 1 &&
+ master.getAssignmentManager().getRegionStates().getRegionsInTransition().size() < 1;
+ }
+ });
+ //move table to group and wait
+ groupAdmin.moveTables(Sets.newHashSet(RSGroupInfoManager.RSGROUP_TABLE_NAME), newGroup);
+ LOG.info("Waiting for move table...");
+ TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
+ @Override
+ public boolean evaluate() throws Exception {
+ return groupRS.getNumberOfOnlineRegions() == 1;
}
});
}
- //move server to group and make sure all tables are assigned
- groupAdmin.moveServers(Sets.newHashSet(groupRS.getServerName().getHostPort()), newGroup);
- TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
- @Override
- public boolean evaluate() throws Exception {
- return groupRS.getNumberOfOnlineRegions() < 1 &&
- master.getAssignmentManager().getRegionStates().getRegionsInTransition().size() < 1;
- }
- });
- //move table to group and wait
- groupAdmin.moveTables(Sets.newHashSet(RSGroupInfoManager.RSGROUP_TABLE_NAME), newGroup);
- LOG.info("Waiting for move table...");
- TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
- @Override
- public boolean evaluate() throws Exception {
- return groupRS.getNumberOfOnlineRegions() == 1;
- }
- });
-
groupRS.stop("die");
//race condition here
TEST_UTIL.getHBaseCluster().getMaster().stopMaster();
@@ -167,12 +162,13 @@ public class TestRSGroupsOfflineMode {
RSGroupInfoManager groupMgr = RSGroupAdminEndpoint.getGroupInfoManager();
//make sure balancer is in offline mode, since this is what we're testing
- assertFalse(groupMgr.isOnline());
- //verify the group affiliation that's loaded from ZK instead of tables
- assertEquals(newGroup,
+ synchronized (groupMgr) {
+ assertFalse(groupMgr.isOnline());
+ //verify the group affiliation that's loaded from ZK instead of tables
+ assertEquals(newGroup,
groupMgr.getRSGroupOfTable(RSGroupInfoManager.RSGROUP_TABLE_NAME));
- assertEquals(RSGroupInfo.DEFAULT_GROUP, groupMgr.getRSGroupOfTable(failoverTable));
-
+ assertEquals(RSGroupInfo.DEFAULT_GROUP, groupMgr.getRSGroupOfTable(failoverTable));
+ }
//kill final regionserver to see the failover happens for all tables
//except GROUP table since it's group does not have any online RS
killRS.stop("die");
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
index 441cc393289..0732d7c8e90 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
@@ -1,6 +1,4 @@
/**
- * Copyright The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -21,7 +19,6 @@ package org.apache.hadoop.hbase.rsgroup;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
-import com.google.common.net.HostAndPort;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -32,6 +29,7 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
+import org.apache.hadoop.hbase.util.Address;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException;
@@ -44,10 +42,9 @@ import java.util.Map;
import java.util.Set;
@InterfaceAudience.Private
-public class VerifyingRSGroupAdminClient extends RSGroupAdmin {
+public class VerifyingRSGroupAdminClient implements RSGroupAdmin {
private Table table;
private ZooKeeperWatcher zkw;
- private RSGroupSerDe serDe;
private RSGroupAdmin wrapped;
public VerifyingRSGroupAdminClient(RSGroupAdmin RSGroupAdmin, Configuration conf)
@@ -55,7 +52,6 @@ public class VerifyingRSGroupAdminClient extends RSGroupAdmin {
wrapped = RSGroupAdmin;
table = ConnectionFactory.createConnection(conf).getTable(RSGroupInfoManager.RSGROUP_TABLE_NAME);
zkw = new ZooKeeperWatcher(conf, this.getClass().getSimpleName(), null);
- serDe = new RSGroupSerDe();
}
@Override
@@ -75,7 +71,7 @@ public class VerifyingRSGroupAdminClient extends RSGroupAdmin {
}
@Override
- public void moveServers(Set servers, String targetGroup) throws IOException {
+ public void moveServers(Set servers, String targetGroup) throws IOException {
wrapped.moveServers(servers, targetGroup);
verify();
}
@@ -103,7 +99,7 @@ public class VerifyingRSGroupAdminClient extends RSGroupAdmin {
}
@Override
- public RSGroupInfo getRSGroupOfServer(HostAndPort hostPort) throws IOException {
+ public RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException {
return wrapped.getRSGroupOfServer(hostPort);
}
@@ -144,8 +140,4 @@ public class VerifyingRSGroupAdminClient extends RSGroupAdmin {
throw new IOException("ZK verification failed", e);
}
}
-
- @Override
- public void close() throws IOException {
- }
-}
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
index a0a1d497a13..8cb41b60840 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.coprocessor;
-import com.google.common.net.HostAndPort;
import java.io.IOException;
import java.util.List;
@@ -44,6 +43,7 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.util.Address;
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
@InterfaceStability.Evolving
@@ -782,7 +782,7 @@ public class BaseMasterAndRegionObserver extends BaseRegionObserver
}
@Override
- public void postMoveServers(ObserverContext ctx, Set
+ public void postMoveServers(ObserverContext ctx, Set
servers, String targetGroup) throws IOException {
}
@@ -808,7 +808,7 @@ public class BaseMasterAndRegionObserver extends BaseRegionObserver
@Override
public void preMoveServers(ObserverContext ctx,
- Set servers, String targetGroup) throws IOException {
+ Set servers, String targetGroup) throws IOException {
}
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
index 98f21b23bbd..6e9a2ad6f8d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.coprocessor;
-import com.google.common.net.HostAndPort;
import java.io.IOException;
import java.util.List;
@@ -44,6 +43,7 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.util.Address;
@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.CONFIG})
@InterfaceStability.Evolving
@@ -1113,12 +1113,12 @@ public class BaseMasterObserver implements MasterObserver {
}
@Override
- public void preMoveServers(ObserverContext ctx, Set
+ public void preMoveServers(ObserverContext ctx, Set
servers, String targetGroup) throws IOException {
}
@Override
- public void postMoveServers(ObserverContext ctx, Set
+ public void postMoveServers(ObserverContext ctx, Set
servers, String targetGroup) throws IOException {
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index bb0e7329996..1d0c467d42b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -44,8 +44,8 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.util.Address;
-import com.google.common.net.HostAndPort;
/**
* Defines coprocessor hooks for interacting with operations on the
@@ -1750,7 +1750,7 @@ public interface MasterObserver extends Coprocessor {
* @throws IOException on failure
*/
void preMoveServers(final ObserverContext ctx,
- Set servers, String targetGroup) throws IOException;
+ Set servers, String targetGroup) throws IOException;
/**
* Called after servers are moved to target region server group
@@ -1760,7 +1760,7 @@ public interface MasterObserver extends Coprocessor {
* @throws IOException on failure
*/
void postMoveServers(final ObserverContext ctx,
- Set servers, String targetGroup) throws IOException;
+ Set servers, String targetGroup) throws IOException;
/**
* Called before tables are moved to target region server group
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
index 1472a919dac..277dcc8b7bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
@@ -21,9 +21,6 @@ package org.apache.hadoop.hbase.master;
import java.util.List;
import java.util.Map;
-import edu.umd.cs.findbugs.annotations.Nullable;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.conf.ConfigurationObserver;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus;
@@ -32,6 +29,10 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.conf.ConfigurationObserver;
+
+import edu.umd.cs.findbugs.annotations.Nullable;
/**
* Makes decisions about the placement and movement of Regions across
@@ -48,8 +49,8 @@ import org.apache.hadoop.hbase.TableName;
@InterfaceAudience.Private
public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObserver {
- //used to signal to the caller that the region(s) cannot be assigned
- ServerName BOGUS_SERVER_NAME = ServerName.parseServerName("localhost,1,1");
+ // Used to signal to the caller that the region(s) cannot be assigned
+ static final ServerName BOGUS_SERVER_NAME = ServerName.valueOf("bogus.example.com,1,1");
/**
* Set the current cluster status. This allows a LoadBalancer to map host name to a server
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 28a855046c8..70d749e6eb5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -54,8 +54,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-
-import com.google.common.net.HostAndPort;
+import org.apache.hadoop.hbase.util.Address;
/**
* Provides the coprocessor framework and environment for master oriented
@@ -1512,7 +1511,7 @@ public class MasterCoprocessorHost
return bypass;
}
- public void preMoveServers(final Set servers, final String targetGroup)
+ public void preMoveServers(final Set servers, final String targetGroup)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
@@ -1525,7 +1524,7 @@ public class MasterCoprocessorHost
});
}
- public void postMoveServers(final Set servers, final String targetGroup)
+ public void postMoveServers(final Set servers, final String targetGroup)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index d4a390a07b9..58933230851 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -17,11 +17,6 @@
*/
package org.apache.hadoop.hbase.master.balancer;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Joiner;
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
@@ -39,6 +34,7 @@ import java.util.Random;
import java.util.Set;
import java.util.TreeMap;
import java.util.function.Predicate;
+
import org.apache.commons.lang.NotImplementedException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -47,7 +43,6 @@ import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@@ -60,6 +55,12 @@ import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.T
import org.apache.hadoop.hbase.security.access.AccessControlLists;
import org.apache.hadoop.util.StringUtils;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
/**
* The base class for load balancers. It provides the the functions used to by
* {@link org.apache.hadoop.hbase.master.AssignmentManager} to assign regions
@@ -1348,7 +1349,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
int numServers = servers == null ? 0 : servers.size();
if (numServers == 0) {
- LOG.warn("Wanted to do retain assignment but no servers to assign to");
+ LOG.warn("Wanted to retain assignment but no servers to assign to");
return null;
}
if (numServers == 1) { // Only one server, nothing fancy we can do here
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index f68ae94ae64..aeffe1f1b41 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -120,6 +120,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.SimpleMutableByteRange;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.hbase.util.Address;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ImmutableSet;
@@ -128,7 +129,6 @@ import com.google.common.collect.Lists;
import com.google.common.collect.MapMaker;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
-import com.google.common.net.HostAndPort;
import com.google.protobuf.Message;
import com.google.protobuf.RpcCallback;
import com.google.protobuf.RpcController;
@@ -2671,7 +2671,7 @@ public class AccessController extends BaseMasterAndRegionObserver
@Override
public void preMoveServers(ObserverContext ctx,
- Set servers, String targetGroup) throws IOException {
+ Set servers, String targetGroup) throws IOException {
requirePermission(getActiveUser(ctx), "moveServers", Action.ADMIN);
}
@@ -2773,4 +2773,4 @@ public class AccessController extends BaseMasterAndRegionObserver
throw new DoNotRetryIOException("Invalid lock level when requesting permissions.");
}
}
-}
\ No newline at end of file
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
index ea7c07ba906..35a1176bd18 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
@@ -255,7 +255,8 @@ public abstract class AbstractFSWALProvider> implemen
}
/**
- * Construct the directory name for all WALs on a given server.
+ * Construct the directory name for all WALs on a given server. Dir names currently look like
+ * this for WALs: hbase//WALs/kalashnikov.att.net,61634,1486865297088
.
* @param serverName Server name formatted as described in {@link ServerName}
* @return the relative WAL directory name, e.g. .logs/1.example.org,60030,12345
if
* serverName
passed is 1.example.org,60030,12345
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 19ab9559e1e..2cf2c6b69a9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -309,6 +309,8 @@ public class WALSplitter {
outputSinkStarted = true;
Entry entry;
Long lastFlushedSequenceId = -1L;
+ // THIS IS BROKEN!!!! GETTING SERVERNAME FROM PATH IS NOT GOING TO WORK IF LAYOUT CHANGES!!!
+ // TODO: Fix.
ServerName serverName = AbstractFSWALProvider.getServerNameFromWALDirectoryName(logPath);
failedServerName = (serverName == null) ? "" : serverName.getServerName();
while ((entry = getNextLogLine(in, logPath, skipErrors)) != null) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java
index cde26e65b92..d1b2145224c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerName.java
@@ -91,23 +91,14 @@ public class TestServerName {
assertEquals(sn.hashCode(), sn2.hashCode());
assertNotSame(sn.hashCode(), sn3.hashCode());
assertEquals(sn.toString(),
- ServerName.getServerName("www.example.org", 1234, 5678));
+ ServerName.valueOf("www.example.org", 1234, 5678).toString());
assertEquals(sn.toString(),
- ServerName.getServerName("www.example.org:1234", 5678));
+ ServerName.valueOf("www.example.org:1234", 5678).toString());
assertEquals(sn.toString(),
"www.example.org" + ServerName.SERVERNAME_SEPARATOR + "1234" +
ServerName.SERVERNAME_SEPARATOR + "5678");
}
- @Test
- public void getServerStartcodeFromServerName() {
- ServerName sn = ServerName.valueOf("www.example.org", 1234, 5678);
- assertEquals(5678,
- ServerName.getServerStartcodeFromServerName(sn.toString()));
- assertNotSame(5677,
- ServerName.getServerStartcodeFromServerName(sn.toString()));
- }
-
@Test
public void testHostNameCaseSensitivity() {
ServerName lower = ServerName.valueOf("www.example.org", 1234, 5678);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index 7633fcede59..eb15d919c49 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -766,16 +766,6 @@ public class TestAdmin2 {
assertTrue(procList.length >= 0);
}
- /*
- * Test that invalid draining server names (invalid start code) don't get added to drain list.
- */
- @Test(timeout = 10000, expected = IllegalArgumentException.class)
- public void testCheckDrainServerName() throws Exception {
- List servers = new ArrayList();
- servers.add(ServerName.parseServerName("127.0.0.1:123"));
- admin.drainRegionServers(servers);
- }
-
/*
* This test drains all regions so cannot be run in parallel with other tests.
*/
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
index cbbc5a5fb3d..4e8b5271b6c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
@@ -76,8 +76,8 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
+import org.apache.hadoop.hbase.util.Address;
-import com.google.common.net.HostAndPort;
/**
* Tests invocation of the {@link org.apache.hadoop.hbase.coprocessor.MasterObserver}
@@ -1462,12 +1462,12 @@ public class TestMasterObserver {
@Override
public void preMoveServers(ObserverContext ctx,
- Set servers, String targetGroup) throws IOException {
+ Set servers, String targetGroup) throws IOException {
}
@Override
public void postMoveServers(ObserverContext ctx,
- Set servers, String targetGroup) throws IOException {
+ Set servers, String targetGroup) throws IOException {
}
@Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
index b518605579a..2ab91c1ea79 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -98,10 +99,8 @@ public class TestWALObserver {
private Configuration conf;
private FileSystem fs;
- private Path dir;
private Path hbaseRootDir;
private Path hbaseWALRootDir;
- private String logName;
private Path oldLogDir;
private Path logDir;
private WALFactory wals;
@@ -137,12 +136,12 @@ public class TestWALObserver {
this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
this.hbaseRootDir = FSUtils.getRootDir(conf);
this.hbaseWALRootDir = FSUtils.getWALRootDir(conf);
- this.dir = new Path(this.hbaseRootDir, TestWALObserver.class.getName());
this.oldLogDir = new Path(this.hbaseWALRootDir,
HConstants.HREGION_OLDLOGDIR_NAME);
+ String serverName = ServerName.valueOf(currentTest.getMethodName(), 16010,
+ System.currentTimeMillis()).toString();
this.logDir = new Path(this.hbaseWALRootDir,
- AbstractFSWALProvider.getWALDirectoryName(currentTest.getMethodName()));
- this.logName = HConstants.HREGION_LOGDIR_NAME;
+ AbstractFSWALProvider.getWALDirectoryName(serverName));
if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
@@ -150,7 +149,7 @@ public class TestWALObserver {
if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseWALRootDir)) {
TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseWALRootDir, true);
}
- this.wals = new WALFactory(conf, null, currentTest.getMethodName());
+ this.wals = new WALFactory(conf, null, serverName);
}
@After
@@ -351,7 +350,8 @@ public class TestWALObserver {
LOG.info("WALSplit path == " + p);
FileSystem newFS = FileSystem.get(newConf);
// Make a new wal for new region open.
- final WALFactory wals2 = new WALFactory(conf, null, currentTest.getMethodName()+"2");
+ final WALFactory wals2 = new WALFactory(conf, null,
+ ServerName.valueOf(currentTest.getMethodName()+"2", 16010, System.currentTimeMillis()).toString());
WAL wal2 = wals2.getWAL(UNSPECIFIED_REGION, null);;
HRegion region = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir,
hri, htd, wal2, TEST_UTIL.getHBaseCluster().getRegionServer(0), null);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
index d59fa06dd56..d0b8494aec2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
@@ -1136,7 +1136,8 @@ public class TestDistributedLogSplitting {
startCluster(1);
final SplitLogManager slm = master.getMasterWalManager().getSplitLogManager();
final FileSystem fs = master.getMasterFileSystem().getFileSystem();
- final Path logDir = new Path(FSUtils.getRootDir(conf), "x");
+ final Path logDir = new Path(new Path(FSUtils.getRootDir(conf), HConstants.HREGION_LOGDIR_NAME),
+ ServerName.valueOf("x", 1, 1).toString());
fs.mkdirs(logDir);
ExecutorService executor = null;
try {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
index 022e7b6ea46..4c7bc54d308 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java
@@ -516,8 +516,8 @@ public class TestSplitLogManager {
LOG.info("testEmptyLogDir");
slm = new SplitLogManager(master, conf);
FileSystem fs = TEST_UTIL.getTestFileSystem();
- Path emptyLogDirPath = new Path(fs.getWorkingDirectory(),
- UUID.randomUUID().toString());
+ Path emptyLogDirPath = new Path(new Path(fs.getWorkingDirectory(), HConstants.HREGION_LOGDIR_NAME),
+ ServerName.valueOf("emptyLogDir", 1, 1).toString());
fs.mkdirs(emptyLogDirPath);
slm.splitLogDistributed(emptyLogDirPath);
assertFalse(fs.exists(emptyLogDirPath));
@@ -530,10 +530,11 @@ public class TestSplitLogManager {
FileSystem fs = TEST_UTIL.getTestFileSystem();
Path dir = TEST_UTIL.getDataTestDirOnTestFS("testLogFilesAreArchived");
conf.set(HConstants.HBASE_DIR, dir.toString());
- Path logDirPath = new Path(dir, UUID.randomUUID().toString());
+ String serverName = ServerName.valueOf("foo", 1, 1).toString();
+ Path logDirPath = new Path(new Path(dir, HConstants.HREGION_LOGDIR_NAME), serverName);
fs.mkdirs(logDirPath);
// create an empty log file
- String logFile = ServerName.valueOf("foo", 1, 1).toString();
+ String logFile = new Path(logDirPath, UUID.randomUUID().toString()).toString();
fs.create(new Path(logDirPath, logFile)).close();
// spin up a thread mocking split done.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
index 563810cc5b5..9ac07d7b2d9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
@@ -147,7 +147,10 @@ public abstract class AbstractTestWALReplay {
this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
this.hbaseRootDir = FSUtils.getRootDir(this.conf);
this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
- this.logName = AbstractFSWALProvider.getWALDirectoryName(currentTest.getMethodName() + "-manual");
+ String serverName =
+ ServerName.valueOf(currentTest.getMethodName() + "-manual", 16010,
+ System.currentTimeMillis()).toString();
+ this.logName = AbstractFSWALProvider.getWALDirectoryName(serverName);
this.logDir = new Path(this.hbaseRootDir, logName);
if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
index f80d66be95f..f976b492711 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.regionserver.wal;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
@@ -32,6 +31,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Increment;
@@ -109,7 +109,8 @@ public class TestDurability {
@Test
public void testDurability() throws Exception {
- final WALFactory wals = new WALFactory(CONF, null, "TestDurability");
+ final WALFactory wals = new WALFactory(CONF, null, ServerName.valueOf("TestDurability",
+ 16010, System.currentTimeMillis()).toString());
byte[] tableName = Bytes.toBytes("TestDurability");
final WAL wal = wals.getWAL(tableName, null);
HRegion region = createHRegion(tableName, "region", wal, Durability.USE_DEFAULT);
@@ -172,7 +173,8 @@ public class TestDurability {
byte[] col3 = Bytes.toBytes("col3");
// Setting up region
- final WALFactory wals = new WALFactory(CONF, null, "TestIncrement");
+ final WALFactory wals = new WALFactory(CONF, null,
+ ServerName.valueOf("TestIncrement", 16010, System.currentTimeMillis()).toString());
byte[] tableName = Bytes.toBytes("TestIncrement");
final WAL wal = wals.getWAL(tableName, null);
HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT);
@@ -238,7 +240,9 @@ public class TestDurability {
byte[] col1 = Bytes.toBytes("col1");
// Setting up region
- final WALFactory wals = new WALFactory(CONF, null, "testIncrementWithReturnResultsSetToFalse");
+ final WALFactory wals = new WALFactory(CONF, null,
+ ServerName.valueOf("testIncrementWithReturnResultsSetToFalse", 16010,
+ System.currentTimeMillis()).toString());
byte[] tableName = Bytes.toBytes("testIncrementWithReturnResultsSetToFalse");
final WAL wal = wals.getWAL(tableName, null);
HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
index b47d823968a..22395c8ad62 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
@@ -177,7 +178,8 @@ public class TestLogRollAbort {
@Test (timeout=300000)
public void testLogRollAfterSplitStart() throws IOException {
LOG.info("Verify wal roll after split starts will fail.");
- String logName = "testLogRollAfterSplitStart";
+ String logName = ServerName.valueOf("testLogRollAfterSplitStart",
+ 16010, System.currentTimeMillis()).toString();
Path thisTestsDir = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(logName));
final WALFactory wals = new WALFactory(conf, null, logName);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
index b63aa1785b3..e2aa580422f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
@@ -48,4 +48,4 @@ public class TestWALReplay extends AbstractTestWALReplay {
HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
return wal;
}
-}
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
index 985dd623f6a..3318f61b087 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver;
@@ -88,6 +89,7 @@ public class TestWALFactory {
protected FileSystem fs;
protected Path dir;
protected WALFactory wals;
+ private ServerName currentServername;
@Rule
public final TestName currentTest = new TestName();
@@ -96,7 +98,8 @@ public class TestWALFactory {
public void setUp() throws Exception {
fs = cluster.getFileSystem();
dir = new Path(hbaseDir, currentTest.getMethodName());
- wals = new WALFactory(conf, null, currentTest.getMethodName());
+ this.currentServername = ServerName.valueOf(currentTest.getMethodName(), 16010, 1);
+ wals = new WALFactory(conf, null, this.currentServername.toString());
}
@After
@@ -165,9 +168,6 @@ public class TestWALFactory {
final TableName tableName = TableName.valueOf(currentTest.getMethodName());
final byte [] rowName = tableName.getName();
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
- final Path logdir = new Path(hbaseWALDir,
- AbstractFSWALProvider.getWALDirectoryName(currentTest.getMethodName()));
- Path oldLogDir = new Path(hbaseWALDir, HConstants.HREGION_OLDLOGDIR_NAME);
final int howmany = 3;
HRegionInfo[] infos = new HRegionInfo[3];
Path tabledir = FSUtils.getTableDir(hbaseWALDir, tableName);
@@ -209,7 +209,13 @@ public class TestWALFactory {
}
}
wals.shutdown();
- List splits = WALSplitter.split(hbaseWALDir, logdir, oldLogDir, fs, conf, wals);
+ // The below calculation of logDir relies on insider information... WALSplitter should be connected better
+ // with the WAL system.... not requiring explicit path. The oldLogDir is just made up not used.
+ Path logDir =
+ new Path(new Path(hbaseWALDir, HConstants.HREGION_LOGDIR_NAME),
+ this.currentServername.toString());
+ Path oldLogDir = new Path(hbaseDir, HConstants.HREGION_OLDLOGDIR_NAME);
+ List splits = WALSplitter.split(hbaseWALDir, logDir, oldLogDir, fs, conf, wals);
verifySplits(splits, howmany);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java
index 0562fd98ae9..e866d48c9a8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.OffheapKeyValue;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
@@ -208,7 +209,9 @@ public class TestWALReaderOnSecureWAL {
WALProvider.Writer.class);
conf.setBoolean(WAL_ENCRYPTION, false);
FileSystem fs = TEST_UTIL.getTestFileSystem();
- final WALFactory wals = new WALFactory(conf, null, currentTest.getMethodName());
+ final WALFactory wals = new WALFactory(conf, null,
+ ServerName.valueOf(currentTest.getMethodName(), 16010,
+ System.currentTimeMillis()).toString());
Path walPath = writeWAL(wals, currentTest.getMethodName(), false);
// Ensure edits are plaintext
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
index 2f69c4cca3f..3b15cefd01c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
@@ -192,7 +192,9 @@ public class TestWALSplit {
this.mode = (conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false) ?
RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING);
wals = new WALFactory(conf, null, name.getMethodName());
- WALDIR = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(name.getMethodName()));
+ WALDIR = new Path(HBASELOGDIR,
+ AbstractFSWALProvider.getWALDirectoryName(ServerName.valueOf(name.getMethodName(),
+ 16010, System.currentTimeMillis()).toString()));
//fs.mkdirs(WALDIR);
}
diff --git a/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb b/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb
index c654f23cb08..4414271739d 100644
--- a/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb
@@ -1,6 +1,3 @@
-#
-# Copyright The Apache Software Foundation
-#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
@@ -29,7 +26,7 @@ module Hbase
include HBaseConstants
def initialize(connection)
- @admin = org.apache.hadoop.hbase.rsgroup.RSGroupAdmin.newClient(connection)
+ @admin = org.apache.hadoop.hbase.rsgroup.RSGroupAdminClient.new(connection)
end
def close
@@ -108,7 +105,7 @@ module Hbase
def move_servers(dest, *args)
servers = java.util.HashSet.new
args[0].each do |s|
- servers.add(com.google.common.net.HostAndPort.fromString(s))
+ servers.add(org.apache.hadoop.hbase.util.Address.fromString(s))
end
@admin.moveServers(servers, dest)
end
@@ -127,7 +124,7 @@ module Hbase
# get group of server
def get_rsgroup_of_server(server)
res = @admin.getRSGroupOfServer(
- com.google.common.net.HostAndPort.fromString(server))
+ org.apache.hadoop.hbase.util.Address.fromString(server))
if res.nil?
raise(ArgumentError,'Server has no group: ' + server)
end
diff --git a/hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb
index 92faefbb947..5f7e561fcc0 100644
--- a/hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb
@@ -1,6 +1,3 @@
-#
-# Copyright The Apache Software Foundation
-#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
index ab6e72a1a69..15a363bf86e 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
@@ -1,6 +1,3 @@
-#
-# Copyright The Apache Software Foundation
-#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
index fd2ccc7f42f..61c13a2c117 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
@@ -1,6 +1,3 @@
-#
-# Copyright The Apache Software Foundation
-#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb
index 9684687727d..c03af1fa856 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb
@@ -1,6 +1,3 @@
-#
-# Copyright The Apache Software Foundation
-#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb b/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb
index 393797dca26..7a07b53961d 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb
@@ -1,6 +1,3 @@
-#
-# Copyright The Apache Software Foundation
-#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
diff --git a/hbase-shell/src/main/ruby/shell/commands/move_servers_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/move_servers_rsgroup.rb
index 1e830f94d80..61fcd02447f 100644
--- a/hbase-shell/src/main/ruby/shell/commands/move_servers_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/move_servers_rsgroup.rb
@@ -1,6 +1,3 @@
-#
-# Copyright The Apache Software Foundation
-#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
diff --git a/hbase-shell/src/main/ruby/shell/commands/move_tables_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/move_tables_rsgroup.rb
index 4828bcfe382..c6853eafcce 100644
--- a/hbase-shell/src/main/ruby/shell/commands/move_tables_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/move_tables_rsgroup.rb
@@ -1,6 +1,3 @@
-#
-# Copyright The Apache Software Foundation
-#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
diff --git a/hbase-shell/src/main/ruby/shell/commands/remove_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/remove_rsgroup.rb
index ea0d1cda2e7..f200ff8334d 100644
--- a/hbase-shell/src/main/ruby/shell/commands/remove_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/remove_rsgroup.rb
@@ -1,6 +1,3 @@
-#
-# Copyright The Apache Software Foundation
-#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
diff --git a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
index f4ff83c720a..b7cfb85f262 100644
--- a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
+++ b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
@@ -27,7 +27,7 @@ module Hbase
@shell = Shell::Shell.new(@hbase)
connection = $TEST_CLUSTER.getConnection
@rsgroup_admin =
- org.apache.hadoop.hbase.rsgroup.RSGroupAdmin.newClient(connection)
+ org.apache.hadoop.hbase.rsgroup.RSGroupAdminClient.new(connection)
end
define_test 'Test Basic RSGroup Commands' do