HBASE-21318 Make RefreshHFilesClient runnable

Other than when user enables hbase.coprocessor.region.classes with
RefreshHFilesEndPoint, user can also run this client as tool runner
class/CLI and calls refresh HFiles directly.

Signed-off-by: tedyu <yuzhihong@gmail.com>
This commit is contained in:
TAK LON WU 2018-08-17 15:55:10 -07:00 committed by tedyu
parent 6830a1c1a0
commit d4cc5eef43
4 changed files with 171 additions and 63 deletions

View File

@ -22,6 +22,8 @@ package org.apache.hadoop.hbase.client.example;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
@ -31,6 +33,8 @@ import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils.BlockingRpcCallback;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
import org.apache.hadoop.hbase.protobuf.generated.RefreshHFilesProtos;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -40,7 +44,7 @@ import org.slf4j.LoggerFactory;
* Region Server side via the RefreshHFilesService.
*/
@InterfaceAudience.Private
public class RefreshHFilesClient implements Closeable {
public class RefreshHFilesClient extends Configured implements Tool, Closeable {
private static final Logger LOG = LoggerFactory.getLogger(RefreshHFilesClient.class);
private final Connection connection;
@ -93,4 +97,28 @@ public class RefreshHFilesClient implements Closeable {
});
LOG.debug("Done refreshing HFiles");
}
@Override
public int run(String[] args) throws Exception {
if (args.length != 1) {
String message = "When there are multiple HBase clusters are sharing a common root dir, "
+ "especially for read replica cluster (see detail in HBASE-18477), please consider to "
+ "use this tool manually sync the flushed HFiles from the source cluster.";
message += "\nUsage: " + this.getClass().getName() + " tableName";
System.out.println(message);
return -1;
}
final TableName tableName = TableName.valueOf(args[0]);
try {
refreshHFiles(tableName);
} catch (Throwable t) {
LOG.error("Refresh HFiles from table " + tableName.getNameAsString() + " failed: ", t);
return -1;
}
return 0;
}
public static void main(String[] args) throws Exception {
ToolRunner.run(new RefreshHFilesClient(HBaseConfiguration.create()), args);
}
}

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client.example;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.coprocessor.example.TestRefreshHFilesBase;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.util.ToolRunner;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category({ ClientTests.class, MediumTests.class })
public class TestRefreshHFilesClient extends TestRefreshHFilesBase {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestRefreshHFilesClient.class);
@BeforeClass
public static void setUp() {
setUp(HRegion.class.getName());
}
@Test
public void testRefreshHFilesClient() throws Exception {
addHFilesToRegions();
assertEquals(2, HTU.getNumHFiles(TABLE_NAME, FAMILY));
RefreshHFilesClient tool = new RefreshHFilesClient(HTU.getConfiguration());
assertEquals(0, ToolRunner.run(tool, new String[] { TABLE_NAME.getNameAsString() }));
assertEquals(4, HTU.getNumHFiles(TABLE_NAME, FAMILY));
}
}

View File

@ -0,0 +1,88 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.coprocessor.example;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HFileTestUtil;
import org.junit.After;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TestRefreshHFilesBase {
protected static final Logger LOG = LoggerFactory.getLogger(TestRefreshHFilesBase.class);
protected static final HBaseTestingUtility HTU = new HBaseTestingUtility();
protected static final int NUM_RS = 2;
protected static final TableName TABLE_NAME = TableName.valueOf("testRefreshRegionHFilesEP");
protected static final byte[] FAMILY = Bytes.toBytes("family");
protected static final byte[] QUALIFIER = Bytes.toBytes("qualifier");
protected static final byte[][] SPLIT_KEY = new byte[][] { Bytes.toBytes("30") };
protected static final int NUM_ROWS = 5;
protected static final String HFILE_NAME = "123abcdef";
protected static Configuration CONF = HTU.getConfiguration();
protected static MiniHBaseCluster cluster;
protected static Table table;
public static void setUp(String regionImpl) {
try {
CONF.set(HConstants.REGION_IMPL, regionImpl);
CONF.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, RefreshHFilesEndpoint.class.getName());
cluster = HTU.startMiniCluster(NUM_RS);
// Create table
table = HTU.createTable(TABLE_NAME, FAMILY, SPLIT_KEY);
// this will create 2 regions spread across slaves
HTU.loadNumericRows(table, FAMILY, 1, 20);
HTU.flush(TABLE_NAME);
} catch (Exception ex) {
LOG.error("Couldn't finish setup", ex);
}
}
@After
public void tearDown() throws Exception {
HTU.shutdownMiniCluster();
}
protected void addHFilesToRegions() throws IOException {
MasterFileSystem mfs = HTU.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), TABLE_NAME);
for (Region region : cluster.getRegions(TABLE_NAME)) {
Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName());
Path familyDir = new Path(regionDir, Bytes.toString(FAMILY));
HFileTestUtil
.createHFile(HTU.getConfiguration(), HTU.getTestFileSystem(), new Path(familyDir, HFILE_NAME), FAMILY,
QUALIFIER, Bytes.toBytes("50"), Bytes.toBytes("60"), NUM_ROWS);
}
}
}

View File

@ -27,92 +27,32 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.example.RefreshHFilesClient;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HFileTestUtil;
import org.apache.hadoop.hbase.wal.WAL;
import org.junit.After;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Category(MediumTests.class)
public class TestRefreshHFilesEndpoint {
public class TestRefreshHFilesEndpoint extends TestRefreshHFilesBase {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestRefreshHFilesEndpoint.class);
private static final Logger LOG = LoggerFactory.getLogger(TestRefreshHFilesEndpoint.class);
private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
private static final int NUM_RS = 2;
private static final TableName TABLE_NAME = TableName.valueOf("testRefreshRegionHFilesEP");
private static final byte[] FAMILY = Bytes.toBytes("family");
private static final byte[] QUALIFIER = Bytes.toBytes("qualifier");
private static final byte[][] SPLIT_KEY = new byte[][] { Bytes.toBytes("30") };
private static final int NUM_ROWS = 5;
private static final String HFILE_NAME = "123abcdef";
private static Configuration CONF = HTU.getConfiguration();
private static MiniHBaseCluster cluster;
private static Table table;
public static void setUp(String regionImpl) {
try {
CONF.set(HConstants.REGION_IMPL, regionImpl);
CONF.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, RefreshHFilesEndpoint.class.getName());
cluster = HTU.startMiniCluster(NUM_RS);
// Create table
table = HTU.createTable(TABLE_NAME, FAMILY, SPLIT_KEY);
// this will create 2 regions spread across slaves
HTU.loadNumericRows(table, FAMILY, 1, 20);
HTU.flush(TABLE_NAME);
} catch (Exception ex) {
LOG.error("Couldn't finish setup", ex);
}
}
@After
public void tearDown() throws Exception {
HTU.shutdownMiniCluster();
}
@Test
public void testRefreshRegionHFilesEndpoint() throws Exception {
setUp(HRegion.class.getName());
MasterFileSystem mfs = HTU.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), TABLE_NAME);
for (Region region : cluster.getRegions(TABLE_NAME)) {
Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName());
Path familyDir = new Path(regionDir, Bytes.toString(FAMILY));
HFileTestUtil
.createHFile(HTU.getConfiguration(), HTU.getTestFileSystem(), new Path(familyDir, HFILE_NAME), FAMILY,
QUALIFIER, Bytes.toBytes("50"), Bytes.toBytes("60"), NUM_ROWS);
}
addHFilesToRegions();
assertEquals(2, HTU.getNumHFiles(TABLE_NAME, FAMILY));
callRefreshRegionHFilesEndPoint();
assertEquals(4, HTU.getNumHFiles(TABLE_NAME, FAMILY));