HBASE-25445: Use WAL FS instead of master FS in SplitWALManager (#2844)
Signed-off-by: Pankaj <pankajkumar@apache.org> Signed-off-by: ramkrish86 <ramkrishna@apache.org> Signed-off-by: Viraj Jasani <vjasani@apache.org>
This commit is contained in:
parent
a414361ed9
commit
4b6215297d
|
@ -85,8 +85,7 @@ public class SplitWALManager {
|
||||||
this.splitWorkerAssigner = new SplitWorkerAssigner(this.master,
|
this.splitWorkerAssigner = new SplitWorkerAssigner(this.master,
|
||||||
conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER));
|
conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER));
|
||||||
this.rootDir = master.getMasterFileSystem().getWALRootDir();
|
this.rootDir = master.getMasterFileSystem().getWALRootDir();
|
||||||
// TODO: This should be the WAL FS, not the Master FS?
|
this.fs = master.getMasterFileSystem().getWALFileSystem();
|
||||||
this.fs = master.getMasterFileSystem().getFileSystem();
|
|
||||||
this.walArchiveDir = new Path(this.rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
|
this.walArchiveDir = new Path(this.rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,14 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||||
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
|
import org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure;
|
||||||
|
import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
|
||||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
||||||
import org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface;
|
import org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface;
|
||||||
import org.apache.hadoop.hbase.procedure2.Procedure;
|
import org.apache.hadoop.hbase.procedure2.Procedure;
|
||||||
|
@ -43,6 +51,7 @@ import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.CommonFSUtils;
|
||||||
import org.apache.hadoop.hbase.util.JVMClusterUtil;
|
import org.apache.hadoop.hbase.util.JVMClusterUtil;
|
||||||
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
@ -54,6 +63,8 @@ import org.junit.experimental.categories.Category;
|
||||||
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
@Category({ MasterTests.class, LargeTests.class })
|
@Category({ MasterTests.class, LargeTests.class })
|
||||||
|
|
||||||
|
@ -63,6 +74,7 @@ public class TestSplitWALManager {
|
||||||
public static final HBaseClassTestRule CLASS_RULE =
|
public static final HBaseClassTestRule CLASS_RULE =
|
||||||
HBaseClassTestRule.forClass(TestSplitWALManager.class);
|
HBaseClassTestRule.forClass(TestSplitWALManager.class);
|
||||||
|
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(TestSplitWALManager.class);
|
||||||
private static HBaseTestingUtility TEST_UTIL;
|
private static HBaseTestingUtility TEST_UTIL;
|
||||||
private HMaster master;
|
private HMaster master;
|
||||||
private SplitWALManager splitWALManager;
|
private SplitWALManager splitWALManager;
|
||||||
|
@ -86,6 +98,58 @@ public class TestSplitWALManager {
|
||||||
TEST_UTIL.shutdownMiniCluster();
|
TEST_UTIL.shutdownMiniCluster();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWALArchiveWithDifferentWalAndRootFS() throws Exception{
|
||||||
|
HBaseTestingUtility test_util_2 = new HBaseTestingUtility();
|
||||||
|
Path dir = TEST_UTIL.getDataTestDirOnTestFS("testWalDir");
|
||||||
|
test_util_2.getConfiguration().set(CommonFSUtils.HBASE_WAL_DIR, dir.toString());
|
||||||
|
CommonFSUtils.setWALRootDir(test_util_2.getConfiguration(), dir);
|
||||||
|
test_util_2.startMiniCluster(3);
|
||||||
|
HMaster master2 = test_util_2.getHBaseCluster().getMaster();
|
||||||
|
LOG.info("The Master FS is pointing to: " + master2.getMasterFileSystem()
|
||||||
|
.getFileSystem().getUri());
|
||||||
|
LOG.info("The WAL FS is pointing to: " + master2.getMasterFileSystem()
|
||||||
|
.getWALFileSystem().getUri());
|
||||||
|
Table table = test_util_2.createTable(TABLE_NAME, FAMILY);
|
||||||
|
test_util_2.waitTableAvailable(TABLE_NAME);
|
||||||
|
Admin admin = test_util_2.getAdmin();
|
||||||
|
MasterProcedureEnv env = test_util_2.getMiniHBaseCluster().getMaster()
|
||||||
|
.getMasterProcedureExecutor().getEnvironment();
|
||||||
|
final ProcedureExecutor<MasterProcedureEnv> executor = test_util_2.getMiniHBaseCluster()
|
||||||
|
.getMaster().getMasterProcedureExecutor();
|
||||||
|
List<RegionInfo> regionInfos = admin.getRegions(TABLE_NAME);
|
||||||
|
SplitTableRegionProcedure splitProcedure = new SplitTableRegionProcedure(
|
||||||
|
env, regionInfos.get(0), Bytes.toBytes("row5"));
|
||||||
|
// Populate some rows in the table
|
||||||
|
LOG.info("Beginning put data to the table: " + TABLE_NAME.toString());
|
||||||
|
int rowCount = 5;
|
||||||
|
for (int i = 0; i < rowCount; i++) {
|
||||||
|
byte[] row = Bytes.toBytes("row" + i);
|
||||||
|
Put put = new Put(row);
|
||||||
|
put.addColumn(FAMILY, FAMILY, FAMILY);
|
||||||
|
table.put(put);
|
||||||
|
}
|
||||||
|
executor.submitProcedure(splitProcedure);
|
||||||
|
LOG.info("Submitted SplitProcedure.");
|
||||||
|
test_util_2.waitFor(30000, () -> executor.getProcedures().stream()
|
||||||
|
.filter(p -> p instanceof TransitRegionStateProcedure)
|
||||||
|
.map(p -> (TransitRegionStateProcedure) p)
|
||||||
|
.anyMatch(p -> TABLE_NAME.equals(p.getTableName())));
|
||||||
|
test_util_2.getMiniHBaseCluster().killRegionServer(
|
||||||
|
test_util_2.getMiniHBaseCluster().getRegionServer(0).getServerName());
|
||||||
|
test_util_2.getMiniHBaseCluster().startRegionServer();
|
||||||
|
test_util_2.waitUntilNoRegionsInTransition();
|
||||||
|
Scan scan = new Scan();
|
||||||
|
ResultScanner results = table.getScanner(scan);
|
||||||
|
int scanRowCount = 0;
|
||||||
|
while (results.next() != null) {
|
||||||
|
scanRowCount++;
|
||||||
|
}
|
||||||
|
Assert.assertEquals("Got " + scanRowCount + " rows when " + rowCount +
|
||||||
|
" were expected.", rowCount, scanRowCount);
|
||||||
|
test_util_2.shutdownMiniCluster();
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testAcquireAndRelease() throws Exception {
|
public void testAcquireAndRelease() throws Exception {
|
||||||
List<FakeServerProcedure> testProcedures = new ArrayList<>();
|
List<FakeServerProcedure> testProcedures = new ArrayList<>();
|
||||||
|
|
Loading…
Reference in New Issue