HBASE-25445: Use WAL FS instead of master FS in SplitWALManager (#2844)

Signed-off-by: Pankaj <pankajkumar@apache.org>
Signed-off-by: ramkrish86 <ramkrishna@apache.org>
Signed-off-by: Viraj Jasani <vjasani@apache.org>
This commit is contained in:
Anjan Das 2021-01-07 15:31:50 +05:30 committed by Viraj Jasani
parent 82a63abc8a
commit e39bae4ddb
No known key found for this signature in database
GPG Key ID: B3D6C0B41C8ADFD5
2 changed files with 65 additions and 2 deletions

View File

@ -85,8 +85,7 @@ public class SplitWALManager {
this.splitWorkerAssigner = new SplitWorkerAssigner(this.master,
conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER));
this.rootDir = master.getMasterFileSystem().getWALRootDir();
// TODO: This should be the WAL FS, not the Master FS?
this.fs = master.getMasterFileSystem().getFileSystem();
this.fs = master.getMasterFileSystem().getWALFileSystem();
this.walArchiveDir = new Path(this.rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
}

View File

@ -31,6 +31,14 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure;
import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface;
import org.apache.hadoop.hbase.procedure2.Procedure;
@ -43,6 +51,7 @@ import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
import org.junit.After;
@ -54,6 +63,8 @@ import org.junit.experimental.categories.Category;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Category({ MasterTests.class, LargeTests.class })
@ -63,6 +74,7 @@ public class TestSplitWALManager {
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestSplitWALManager.class);
private static final Logger LOG = LoggerFactory.getLogger(TestSplitWALManager.class);
private static HBaseTestingUtility TEST_UTIL;
private HMaster master;
private SplitWALManager splitWALManager;
@ -86,6 +98,58 @@ public class TestSplitWALManager {
TEST_UTIL.shutdownMiniCluster();
}
@Test
public void testWALArchiveWithDifferentWalAndRootFS() throws Exception{
HBaseTestingUtility test_util_2 = new HBaseTestingUtility();
Path dir = TEST_UTIL.getDataTestDirOnTestFS("testWalDir");
test_util_2.getConfiguration().set(CommonFSUtils.HBASE_WAL_DIR, dir.toString());
CommonFSUtils.setWALRootDir(test_util_2.getConfiguration(), dir);
test_util_2.startMiniCluster(3);
HMaster master2 = test_util_2.getHBaseCluster().getMaster();
LOG.info("The Master FS is pointing to: " + master2.getMasterFileSystem()
.getFileSystem().getUri());
LOG.info("The WAL FS is pointing to: " + master2.getMasterFileSystem()
.getWALFileSystem().getUri());
Table table = test_util_2.createTable(TABLE_NAME, FAMILY);
test_util_2.waitTableAvailable(TABLE_NAME);
Admin admin = test_util_2.getAdmin();
MasterProcedureEnv env = test_util_2.getMiniHBaseCluster().getMaster()
.getMasterProcedureExecutor().getEnvironment();
final ProcedureExecutor<MasterProcedureEnv> executor = test_util_2.getMiniHBaseCluster()
.getMaster().getMasterProcedureExecutor();
List<RegionInfo> regionInfos = admin.getRegions(TABLE_NAME);
SplitTableRegionProcedure splitProcedure = new SplitTableRegionProcedure(
env, regionInfos.get(0), Bytes.toBytes("row5"));
// Populate some rows in the table
LOG.info("Beginning put data to the table: " + TABLE_NAME.toString());
int rowCount = 5;
for (int i = 0; i < rowCount; i++) {
byte[] row = Bytes.toBytes("row" + i);
Put put = new Put(row);
put.addColumn(FAMILY, FAMILY, FAMILY);
table.put(put);
}
executor.submitProcedure(splitProcedure);
LOG.info("Submitted SplitProcedure.");
test_util_2.waitFor(30000, () -> executor.getProcedures().stream()
.filter(p -> p instanceof TransitRegionStateProcedure)
.map(p -> (TransitRegionStateProcedure) p)
.anyMatch(p -> TABLE_NAME.equals(p.getTableName())));
test_util_2.getMiniHBaseCluster().killRegionServer(
test_util_2.getMiniHBaseCluster().getRegionServer(0).getServerName());
test_util_2.getMiniHBaseCluster().startRegionServer();
test_util_2.waitUntilNoRegionsInTransition();
Scan scan = new Scan();
ResultScanner results = table.getScanner(scan);
int scanRowCount = 0;
while (results.next() != null) {
scanRowCount++;
}
Assert.assertEquals("Got " + scanRowCount + " rows when " + rowCount +
" were expected.", rowCount, scanRowCount);
test_util_2.shutdownMiniCluster();
}
@Test
public void testAcquireAndRelease() throws Exception {
List<FakeServerProcedure> testProcedures = new ArrayList<>();