HBASE-27304 Support using IP to expose master/rs servers for some special scenarios (#4713)

Signed-off-by: Duo Zhang <zhangduo@apache.org>
This commit is contained in:
LiangJun He 2022-09-22 10:17:08 +08:00 committed by GitHub
parent de127bde84
commit cc4268a7d2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 335 additions and 4 deletions

View File

@ -1571,6 +1571,22 @@ public final class HConstants {
*/ */
public static final int BATCH_ROWS_THRESHOLD_DEFAULT = 5000; public static final int BATCH_ROWS_THRESHOLD_DEFAULT = 5000;
/**
* In some scenarios, such as the elastic scaling scenario on the cloud, the HBase client may not
* be able to resolve the hostname of the newly added node. If the network is interconnected, the
* client can actually access the HBase cluster nodes through ip. However, since the HBase client
* obtains the Master/RS address info from or the ZK or the meta table, so the Master/RS of the
* HBase cluster needs to expose the service with ip instead of the hostname. Therefore, We can
* use hostname by default, but at the same time, we can also provide a config to support whether
* to use ip for Master/RS service. See HBASE-27304 for details.
*/
public final static String HBASE_SERVER_USEIP_ENABLED_KEY = "hbase.server.useip.enabled";
/**
* Default value of {@link #HBASE_SERVER_USEIP_ENABLED_KEY}
*/
public final static boolean HBASE_SERVER_USEIP_ENABLED_DEFAULT = false;
private HConstants() { private HConstants() {
// Can't be instantiated with this ctor. // Can't be instantiated with this ctor.
} }

View File

@ -252,9 +252,15 @@ public abstract class HBaseServerBase<R extends HBaseRpcServicesBase<?>> extends
this.rpcServices = createRpcServices(); this.rpcServices = createRpcServices();
useThisHostnameInstead = getUseThisHostnameInstead(conf); useThisHostnameInstead = getUseThisHostnameInstead(conf);
InetSocketAddress addr = rpcServices.getSocketAddress(); InetSocketAddress addr = rpcServices.getSocketAddress();
String hostName = StringUtils.isBlank(useThisHostnameInstead)
? addr.getHostName() // if use-ip is enabled, we will use ip to expose Master/RS service for client,
: this.useThisHostnameInstead; // see HBASE-27304 for details.
boolean useIp = conf.getBoolean(HConstants.HBASE_SERVER_USEIP_ENABLED_KEY,
HConstants.HBASE_SERVER_USEIP_ENABLED_DEFAULT);
String isaHostName =
useIp ? addr.getAddress().getHostAddress() : addr.getAddress().getHostName();
String hostName =
StringUtils.isBlank(useThisHostnameInstead) ? isaHostName : useThisHostnameInstead;
serverName = ServerName.valueOf(hostName, addr.getPort(), this.startcode); serverName = ServerName.valueOf(hostName, addr.getPort(), this.startcode);
// login the zookeeper client principal (if using security) // login the zookeeper client principal (if using security)
ZKAuthentication.loginClient(this.conf, HConstants.ZK_CLIENT_KEYTAB_FILE, ZKAuthentication.loginClient(this.conf, HConstants.ZK_CLIENT_KEYTAB_FILE,

View File

@ -219,8 +219,13 @@ public class ServerManager {
// in, it should have been removed from serverAddressToServerInfo and queued // in, it should have been removed from serverAddressToServerInfo and queued
// for processing by ProcessServerShutdown. // for processing by ProcessServerShutdown.
// if use-ip is enabled, we will use ip to expose Master/RS service for client,
// see HBASE-27304 for details.
boolean useIp = master.getConfiguration().getBoolean(HConstants.HBASE_SERVER_USEIP_ENABLED_KEY,
HConstants.HBASE_SERVER_USEIP_ENABLED_DEFAULT);
String isaHostName = useIp ? ia.getHostAddress() : ia.getHostName();
final String hostname = final String hostname =
request.hasUseThisHostnameInstead() ? request.getUseThisHostnameInstead() : ia.getHostName(); request.hasUseThisHostnameInstead() ? request.getUseThisHostnameInstead() : isaHostName;
ServerName sn = ServerName.valueOf(hostname, request.getPort(), request.getServerStartCode()); ServerName sn = ServerName.valueOf(hostname, request.getPort(), request.getServerStartCode());
checkClockSkew(sn, request.getServerCurrentTime()); checkClockSkew(sn, request.getServerCurrentTime());
checkIsDead(sn, "STARTUP"); checkIsDead(sn, "STARTUP");

View File

@ -26,6 +26,7 @@ import java.io.File;
import java.io.FileInputStream; import java.io.FileInputStream;
import java.io.FileOutputStream; import java.io.FileOutputStream;
import java.io.IOException; import java.io.IOException;
import java.net.InetAddress;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Paths; import java.nio.file.Paths;
import java.util.ArrayList; import java.util.ArrayList;
@ -67,6 +68,7 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.net.InetAddresses;
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
@ -116,6 +118,17 @@ public class RegionMover extends AbstractHBaseTool implements Closeable {
setConf(builder.conf); setConf(builder.conf);
this.conn = ConnectionFactory.createConnection(conf); this.conn = ConnectionFactory.createConnection(conf);
this.admin = conn.getAdmin(); this.admin = conn.getAdmin();
// if the hostname of master is ip, it indicates that the master/RS has enabled use-ip, we need
// to resolve the current hostname to ip to ensure that the RegionMover logic can be executed
// normally, see HBASE-27304 for details.
ServerName master = admin.getClusterMetrics(EnumSet.of(Option.MASTER)).getMasterName();
if (InetAddresses.isInetAddress(master.getHostname())) {
if (!InetAddresses.isInetAddress(this.hostname)) {
this.hostname = InetAddress.getByName(this.hostname).getHostAddress();
}
}
// Only while running unit tests, builder.rackManager will not be null for the convenience of // Only while running unit tests, builder.rackManager will not be null for the convenience of
// providing custom rackManager. Otherwise for regular workflow/user triggered action, // providing custom rackManager. Otherwise for regular workflow/user triggered action,
// builder.rackManager is supposed to be null. Hence, setter of builder.rackManager is // builder.rackManager is supposed to be null. Hence, setter of builder.rackManager is

View File

@ -0,0 +1,76 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import static org.junit.Assert.assertEquals;
import java.net.InetAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
import org.apache.hadoop.hbase.StartTestingClusterOption;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Category({ MasterTests.class, MediumTests.class })
public class TestMasterUseIp {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestMasterUseIp.class);
private static final Logger LOG = LoggerFactory.getLogger(TestMasterUseIp.class);
private HBaseTestingUtil TEST_UTIL;
private SingleProcessHBaseCluster CLUSTER;
private static final int NUM_MASTERS = 1;
private static final int NUM_RS = 1;
@Before
public void setup() throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.setBoolean(HConstants.HBASE_SERVER_USEIP_ENABLED_KEY, true);
TEST_UTIL = new HBaseTestingUtil(conf);
StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(NUM_MASTERS)
.numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
CLUSTER = TEST_UTIL.startMiniCluster(option);
}
@After
public void teardown() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Test
public void testMasterUseIp() throws Exception {
String hostname = CLUSTER.getMaster(0).getServerName().getHostname();
String ip = InetAddress.getByName(hostname).getHostAddress();
LOG.info("hostname= " + hostname + " ,ip=" + ip);
assertEquals(ip, hostname);
}
}

View File

@ -0,0 +1,76 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import static org.junit.Assert.assertEquals;
import java.net.InetAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
import org.apache.hadoop.hbase.StartTestingClusterOption;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Category({ RegionServerTests.class, MediumTests.class })
public class TestRegionServerUseIp {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestRegionServerUseIp.class);
private static final Logger LOG = LoggerFactory.getLogger(TestRegionServerUseIp.class);
private HBaseTestingUtil TEST_UTIL;
private SingleProcessHBaseCluster CLUSTER;
private static final int NUM_MASTERS = 1;
private static final int NUM_RS = 1;
@Before
public void setup() throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.setBoolean(HConstants.HBASE_SERVER_USEIP_ENABLED_KEY, true);
TEST_UTIL = new HBaseTestingUtil(conf);
StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(NUM_MASTERS)
.numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
CLUSTER = TEST_UTIL.startMiniCluster(option);
}
@After
public void teardown() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Test
public void testRegionServerUseIp() throws Exception {
String hostname = CLUSTER.getRegionServer(0).getServerName().getHostname();
String ip = InetAddress.getByName(hostname).getHostAddress();
LOG.info("hostname= " + hostname + " ,ip=" + ip);
assertEquals(ip, hostname);
}
}

View File

@ -0,0 +1,139 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.util;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Category({ MiscTests.class, LargeTests.class })
public class TestRegionMoverUseIp {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestRegionMoverUseIp.class);
private static final Logger LOG = LoggerFactory.getLogger(TestRegionMoverUseIp.class);
@Rule
public TestName name = new TestName();
private static HBaseTestingUtil TEST_UTIL;
private static ServerName rs0;
private static ServerName rs1;
private static ServerName rs2;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.setBoolean(HConstants.HBASE_SERVER_USEIP_ENABLED_KEY, true);
TEST_UTIL = new HBaseTestingUtil(conf);
TEST_UTIL.startMiniCluster(3);
SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
rs0 = cluster.getRegionServer(0).getServerName();
rs1 = cluster.getRegionServer(1).getServerName();
rs2 = cluster.getRegionServer(2).getServerName();
LOG.info("rs0 hostname=" + rs0.getHostname());
LOG.info("rs1 hostname=" + rs1.getHostname());
LOG.info("rs2 hostname=" + rs2.getHostname());
TEST_UTIL.getAdmin().balancerSwitch(false, true);
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Before
public void setUp() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1")).build();
int startKey = 0;
int endKey = 80000;
TEST_UTIL.getAdmin().createTable(tableDesc, Bytes.toBytes(startKey), Bytes.toBytes(endKey), 9);
}
@Test
public void testRegionUnloadUesIp() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
Admin admin = TEST_UTIL.getAdmin();
Table table = TEST_UTIL.getConnection().getTable(tableName);
List<Put> puts = IntStream.range(10, 50000).mapToObj(i -> new Put(Bytes.toBytes(i))
.addColumn(Bytes.toBytes("fam1"), Bytes.toBytes("q1"), Bytes.toBytes("val_" + i)))
.collect(Collectors.toList());
table.put(puts);
admin.flush(tableName);
admin.compact(tableName);
Thread.sleep(3000);
HRegionServer hRegionServer0 = cluster.getRegionServer(0);
HRegionServer hRegionServer1 = cluster.getRegionServer(1);
HRegionServer hRegionServer2 = cluster.getRegionServer(2);
int numRegions0 = hRegionServer0.getNumberOfOnlineRegions();
int numRegions1 = hRegionServer1.getNumberOfOnlineRegions();
int numRegions2 = hRegionServer2.getNumberOfOnlineRegions();
Assert.assertTrue(numRegions0 >= 3);
Assert.assertTrue(numRegions1 >= 3);
Assert.assertTrue(numRegions2 >= 3);
int totalRegions = numRegions0 + numRegions1 + numRegions2;
// source RS: rs0
String sourceRSName = rs0.getAddress().toString();
RegionMover.RegionMoverBuilder rmBuilder =
new RegionMover.RegionMoverBuilder(sourceRSName, TEST_UTIL.getConfiguration()).ack(true)
.maxthreads(8);
try (RegionMover regionMover = rmBuilder.build()) {
regionMover.unload();
int newNumRegions0 = hRegionServer0.getNumberOfOnlineRegions();
int newNumRegions1 = hRegionServer1.getNumberOfOnlineRegions();
int newNumRegions2 = hRegionServer2.getNumberOfOnlineRegions();
Assert.assertEquals(0, newNumRegions0);
Assert.assertEquals(totalRegions, newNumRegions1 + newNumRegions2);
}
}
}