HBASE-26280 Use store file tracker when snapshoting (#3685)

Signed-off-by: Wellington Chevreuil <wchevreuil@apache.org>
Reviewed-by: Josh Elser <elserj@apache.org>
This commit is contained in:
Duo Zhang 2021-09-17 09:40:44 +08:00
parent dcaea11a52
commit b812a277ab
11 changed files with 111 additions and 113 deletions

View File

@ -613,9 +613,8 @@ public class MergeTableRegionsProcedure
List<Path> mergedFiles = new ArrayList<>(); List<Path> mergedFiles = new ArrayList<>();
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) { for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
String family = hcd.getNameAsString(); String family = hcd.getNameAsString();
Configuration trackerConfig = StoreFileTracker tracker =
StoreFileTrackerFactory.mergeConfigurations(env.getMasterConfiguration(), htd, hcd); StoreFileTrackerFactory.create(env.getMasterConfiguration(), htd, hcd, regionFs);
StoreFileTracker tracker = StoreFileTrackerFactory.create(trackerConfig, family, regionFs);
final Collection<StoreFileInfo> storeFiles = tracker.load(); final Collection<StoreFileInfo> storeFiles = tracker.load();
if (storeFiles != null && storeFiles.size() > 0) { if (storeFiles != null && storeFiles.size() > 0) {
final Configuration storeConfiguration = final Configuration storeConfiguration =

View File

@ -668,9 +668,8 @@ public class SplitTableRegionProcedure
new HashMap<String, Collection<StoreFileInfo>>(htd.getColumnFamilyCount()); new HashMap<String, Collection<StoreFileInfo>>(htd.getColumnFamilyCount());
for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) { for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) {
String family = cfd.getNameAsString(); String family = cfd.getNameAsString();
Configuration trackerConfig = StoreFileTrackerFactory. StoreFileTracker tracker =
mergeConfigurations(env.getMasterConfiguration(), htd, htd.getColumnFamily(cfd.getName())); StoreFileTrackerFactory.create(env.getMasterConfiguration(), htd, cfd, regionFs);
StoreFileTracker tracker = StoreFileTrackerFactory.create(trackerConfig, family, regionFs);
Collection<StoreFileInfo> sfis = tracker.load(); Collection<StoreFileInfo> sfis = tracker.load();
if (sfis == null) { if (sfis == null) {
continue; continue;

View File

@ -598,7 +598,6 @@ public class HRegionFileSystem {
* to the proper location in the filesystem. * to the proper location in the filesystem.
* *
* @param regionInfo daughter {@link org.apache.hadoop.hbase.client.RegionInfo} * @param regionInfo daughter {@link org.apache.hadoop.hbase.client.RegionInfo}
* @throws IOException
*/ */
public Path commitDaughterRegion(final RegionInfo regionInfo, List<Path> allRegionFiles, public Path commitDaughterRegion(final RegionInfo regionInfo, List<Path> allRegionFiles,
MasterProcedureEnv env) throws IOException { MasterProcedureEnv env) throws IOException {
@ -625,12 +624,8 @@ public class HRegionFileSystem {
Map<String, List<StoreFileInfo>> fileInfoMap = new HashMap<>(); Map<String, List<StoreFileInfo>> fileInfoMap = new HashMap<>();
for(Path file : allFiles) { for(Path file : allFiles) {
String familyName = file.getParent().getName(); String familyName = file.getParent().getName();
trackerMap.computeIfAbsent(familyName, t -> { trackerMap.computeIfAbsent(familyName, t -> StoreFileTrackerFactory.create(conf, tblDesc,
Configuration config = StoreFileTrackerFactory.mergeConfigurations(conf, tblDesc, tblDesc.getColumnFamily(Bytes.toBytes(familyName)), regionFs));
tblDesc.getColumnFamily(Bytes.toBytes(familyName)));
return StoreFileTrackerFactory.
create(config, familyName, regionFs);
});
fileInfoMap.computeIfAbsent(familyName, l -> new ArrayList<>()); fileInfoMap.computeIfAbsent(familyName, l -> new ArrayList<>());
List<StoreFileInfo> infos = fileInfoMap.get(familyName); List<StoreFileInfo> infos = fileInfoMap.get(familyName);
infos.add(new StoreFileInfo(conf, fs, file, true)); infos.add(new StoreFileInfo(conf, fs, file, true));
@ -676,7 +671,6 @@ public class HRegionFileSystem {
* this method is invoked on the Master side, then the RegionSplitPolicy will * this method is invoked on the Master side, then the RegionSplitPolicy will
* NOT have a reference to a Region. * NOT have a reference to a Region.
* @return Path to created reference. * @return Path to created reference.
* @throws IOException
*/ */
public Path splitStoreFile(RegionInfo hri, String familyName, HStoreFile f, byte[] splitRow, public Path splitStoreFile(RegionInfo hri, String familyName, HStoreFile f, byte[] splitRow,
boolean top, RegionSplitPolicy splitPolicy) throws IOException { boolean top, RegionSplitPolicy splitPolicy) throws IOException {

View File

@ -22,13 +22,11 @@ import java.util.Map;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.StoreContext;
import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.regionserver.StoreUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.util.ReflectionUtils;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -113,16 +111,15 @@ public final class StoreFileTrackerFactory {
* Used at master side when splitting/merging regions, as we do not have a Store, thus no * Used at master side when splitting/merging regions, as we do not have a Store, thus no
* StoreContext at master side. * StoreContext at master side.
*/ */
public static StoreFileTracker create(Configuration conf, String family, public static StoreFileTracker create(Configuration conf, TableDescriptor td,
HRegionFileSystem regionFs) { ColumnFamilyDescriptor cfd, HRegionFileSystem regionFs) {
ColumnFamilyDescriptorBuilder fDescBuilder = StoreContext ctx =
ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family)); StoreContext.getBuilder().withColumnFamilyDescriptor(cfd).withRegionFileSystem(regionFs)
StoreContext ctx = StoreContext.getBuilder().withColumnFamilyDescriptor(fDescBuilder.build()) .withFamilyStoreDirectoryPath(regionFs.getStoreDir(cfd.getNameAsString())).build();
.withRegionFileSystem(regionFs).build(); return StoreFileTrackerFactory.create(mergeConfigurations(conf, td, cfd), true, ctx);
return StoreFileTrackerFactory.create(conf, true, ctx);
} }
public static Configuration mergeConfigurations(Configuration global, TableDescriptor table, private static Configuration mergeConfigurations(Configuration global, TableDescriptor table,
ColumnFamilyDescriptor family) { ColumnFamilyDescriptor family) {
return StoreUtils.createStoreConfiguration(global, table, family); return StoreUtils.createStoreConfiguration(global, table, family);
} }

View File

@ -47,7 +47,8 @@ import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
@ -291,7 +292,7 @@ public final class SnapshotManifest {
addRegion(tableDir, regionInfo, visitor); addRegion(tableDir, regionInfo, visitor);
} }
protected void addRegion(final Path tableDir, final RegionInfo regionInfo, RegionVisitor visitor) protected void addRegion(Path tableDir, RegionInfo regionInfo, RegionVisitor visitor)
throws IOException { throws IOException {
boolean isMobRegion = MobUtils.isMobRegionInfo(regionInfo); boolean isMobRegion = MobUtils.isMobRegionInfo(regionInfo);
try { try {
@ -300,8 +301,8 @@ public final class SnapshotManifest {
if (isMobRegion) { if (isMobRegion) {
baseDir = CommonFSUtils.getTableDir(MobUtils.getMobHome(conf), regionInfo.getTable()); baseDir = CommonFSUtils.getTableDir(MobUtils.getMobHome(conf), regionInfo.getTable());
} }
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, rootFs, HRegionFileSystem regionFs =
baseDir, regionInfo, true); HRegionFileSystem.openRegionFromFileSystem(conf, rootFs, baseDir, regionInfo, true);
monitor.rethrowException(); monitor.rethrowException();
// 1. dump region meta info into the snapshot directory // 1. dump region meta info into the snapshot directory
@ -317,27 +318,20 @@ public final class SnapshotManifest {
// in batches and may miss files being added/deleted. This could be more robust (iteratively // in batches and may miss files being added/deleted. This could be more robust (iteratively
// checking to see if we have all the files until we are sure), but the limit is currently // checking to see if we have all the files until we are sure), but the limit is currently
// 1000 files/batch, far more than the number of store files under a single column family. // 1000 files/batch, far more than the number of store files under a single column family.
Collection<String> familyNames = regionFs.getFamilies(); for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) {
if (familyNames != null) { Object familyData = visitor.familyOpen(regionData, cfd.getName());
for (String familyName: familyNames) {
Object familyData = visitor.familyOpen(regionData, Bytes.toBytes(familyName));
monitor.rethrowException(); monitor.rethrowException();
StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, htd, cfd, regionFs);
Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName); List<StoreFileInfo> storeFiles = tracker.load();
if (storeFiles == null) { if (storeFiles.isEmpty()) {
if (LOG.isDebugEnabled()) { LOG.debug("No files under family: {}", cfd.getNameAsString());
LOG.debug("No files under family: " + familyName);
}
continue; continue;
} }
// 2.1. build the snapshot reference for the store // 2.1. build the snapshot reference for the store
// iterate through all the store's files and create "references". // iterate through all the store's files and create "references".
addReferenceFiles(visitor, regionData, familyData, storeFiles, false); addReferenceFiles(visitor, regionData, familyData, storeFiles, false);
visitor.familyClose(regionData, familyData); visitor.familyClose(regionData, familyData);
} }
}
visitor.regionClose(regionData); visitor.regionClose(regionData);
} catch (IOException e) { } catch (IOException e) {
// the mob directory might not be created yet, so do nothing when it is a mob region // the mob directory might not be created yet, so do nothing when it is a mob region

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner; import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils; import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.ClientTests;
@ -92,7 +93,8 @@ public class TestMobCloneSnapshotFromClientCloneLinksAfterDelete
@Override @Override
protected void createTable() throws IOException, InterruptedException { protected void createTable() throws IOException, InterruptedException {
MobSnapshotTestingUtils.createMobTable(TEST_UTIL, tableName, MobSnapshotTestingUtils.createMobTable(TEST_UTIL, tableName,
SnapshotTestingUtils.getSplitKeys(), getNumReplicas(), DelayFlushCoprocessor.class.getName(), SnapshotTestingUtils.getSplitKeys(), getNumReplicas(),
StoreFileTrackerFactory.Trackers.DEFAULT.name(), DelayFlushCoprocessor.class.getName(),
FAMILY); FAMILY);
} }

View File

@ -26,8 +26,6 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.ClassRule; import org.junit.ClassRule;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* Test create/using/deleting snapshots from the client * Test create/using/deleting snapshots from the client
@ -41,8 +39,6 @@ public class TestMobSnapshotFromClient extends TestSnapshotFromClient {
public static final HBaseClassTestRule CLASS_RULE = public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestMobSnapshotFromClient.class); HBaseClassTestRule.forClass(TestMobSnapshotFromClient.class);
private static final Logger LOG = LoggerFactory.getLogger(TestMobSnapshotFromClient.class);
/** /**
* Setup the config for the cluster * Setup the config for the cluster
* @throws Exception on failure * @throws Exception on failure
@ -60,6 +56,7 @@ public class TestMobSnapshotFromClient extends TestSnapshotFromClient {
@Override @Override
protected void createTable() throws Exception { protected void createTable() throws Exception {
MobSnapshotTestingUtils.createMobTable(UTIL, TABLE_NAME, getNumReplicas(), TEST_FAM); MobSnapshotTestingUtils.createMobTable(UTIL, TABLE_NAME, getNumReplicas(), trackerImpl.name(),
TEST_FAM);
} }
} }

View File

@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -33,9 +34,11 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
import org.apache.hadoop.hbase.snapshot.SnapshotManifestV1; import org.apache.hadoop.hbase.snapshot.SnapshotManifestV1;
@ -52,7 +55,10 @@ import org.junit.ClassRule;
import org.junit.Rule; import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.junit.rules.TestName; import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameter;
import org.junit.runners.Parameterized.Parameters;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -65,7 +71,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
* <p> * <p>
* This is an end-to-end test for the snapshot utility * This is an end-to-end test for the snapshot utility
*/ */
@Category({LargeTests.class, ClientTests.class}) @RunWith(Parameterized.class)
@Category({ LargeTests.class, ClientTests.class })
public class TestSnapshotFromClient { public class TestSnapshotFromClient {
@ClassRule @ClassRule
@ -83,7 +90,16 @@ public class TestSnapshotFromClient {
private static final Pattern MATCH_ALL = Pattern.compile(".*"); private static final Pattern MATCH_ALL = Pattern.compile(".*");
@Rule @Rule
public TestName name = new TestName(); public TableNameTestRule name = new TableNameTestRule();
@Parameter
public StoreFileTrackerFactory.Trackers trackerImpl;
@Parameters(name = "{index}: tracker={0}")
public static List<Object[]> params() {
return Arrays.asList(new Object[] { StoreFileTrackerFactory.Trackers.DEFAULT },
new Object[] { StoreFileTrackerFactory.Trackers.FILE });
}
/** /**
* Setup the config for the cluster * Setup the config for the cluster
@ -110,7 +126,6 @@ public class TestSnapshotFromClient {
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName()); ConstantSizeRegionSplitPolicy.class.getName());
} }
@Before @Before
@ -119,9 +134,10 @@ public class TestSnapshotFromClient {
} }
protected void createTable() throws Exception { protected void createTable() throws Exception {
HTableDescriptor htd = new HTableDescriptor(TABLE_NAME); TableDescriptor htd =
htd.setRegionReplication(getNumReplicas()); TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(getNumReplicas())
UTIL.createTable(htd, new byte[][]{TEST_FAM}, null); .setValue(StoreFileTrackerFactory.TRACKER_IMPL, trackerImpl.name()).build();
UTIL.createTable(htd, new byte[][] { TEST_FAM }, null);
} }
protected int getNumReplicas() { protected int getNumReplicas() {
@ -326,7 +342,7 @@ public class TestSnapshotFromClient {
@Test @Test
public void testListTableSnapshots() throws Exception { public void testListTableSnapshots() throws Exception {
Admin admin = null; Admin admin = null;
final TableName tableName = TableName.valueOf(name.getMethodName()); final TableName tableName = name.getTableName();
try { try {
admin = UTIL.getAdmin(); admin = UTIL.getAdmin();
@ -411,7 +427,7 @@ public class TestSnapshotFromClient {
@Test @Test
public void testDeleteTableSnapshots() throws Exception { public void testDeleteTableSnapshots() throws Exception {
Admin admin = null; Admin admin = null;
final TableName tableName = TableName.valueOf(name.getMethodName()); final TableName tableName = name.getTableName();
try { try {
admin = UTIL.getAdmin(); admin = UTIL.getAdmin();

View File

@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@ -1103,10 +1104,9 @@ public class TestHStoreFile extends HBaseTestCase {
when(mockEnv.getMasterConfiguration()).thenReturn(new Configuration()); when(mockEnv.getMasterConfiguration()).thenReturn(new Configuration());
TableDescriptors mockTblDescs = mock(TableDescriptors.class); TableDescriptors mockTblDescs = mock(TableDescriptors.class);
when(mockServices.getTableDescriptors()).thenReturn(mockTblDescs); when(mockServices.getTableDescriptors()).thenReturn(mockTblDescs);
TableDescriptor mockTblDesc = mock(TableDescriptor.class); TableDescriptor mockTblDesc = TableDescriptorBuilder.newBuilder(hri.getTable())
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
when(mockTblDescs.get(any())).thenReturn(mockTblDesc); when(mockTblDescs.get(any())).thenReturn(mockTblDesc);
ColumnFamilyDescriptor mockCfDesc = mock(ColumnFamilyDescriptor.class);
when(mockTblDesc.getColumnFamily(any())).thenReturn(mockCfDesc);
Path regionDir = regionFs.commitDaughterRegion(hri, splitFiles, mockEnv); Path regionDir = regionFs.commitDaughterRegion(hri, splitFiles, mockEnv);
return new Path(new Path(regionDir, family), path.getName()); return new Path(new Path(regionDir, family), path.getName());
} }

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Assert; import org.junit.Assert;
@ -45,28 +46,39 @@ public class MobSnapshotTestingUtils {
/** /**
* Create the Mob Table. * Create the Mob Table.
*/ */
public static void createMobTable(final HBaseTestingUtility util, public static void createMobTable(final HBaseTestingUtility util, final TableName tableName,
final TableName tableName, int regionReplication, int regionReplication, final byte[]... families) throws IOException, InterruptedException {
final byte[]... families) throws IOException, InterruptedException { createMobTable(util, tableName, SnapshotTestingUtils.getSplitKeys(), regionReplication,
createMobTable(util, tableName, SnapshotTestingUtils.getSplitKeys(), StoreFileTrackerFactory.Trackers.DEFAULT.name(), families);
regionReplication, families);
} }
public static void createPreSplitMobTable(final HBaseTestingUtility util, public static void createMobTable(final HBaseTestingUtility util, final TableName tableName,
final TableName tableName, int nRegions, final byte[]... families) int regionReplication, String storeFileTracker, final byte[]... families)
throws IOException, InterruptedException { throws IOException, InterruptedException {
createMobTable(util, tableName, SnapshotTestingUtils.getSplitKeys(nRegions), createMobTable(util, tableName, SnapshotTestingUtils.getSplitKeys(), regionReplication,
1, families); storeFileTracker, families);
}
public static void createPreSplitMobTable(final HBaseTestingUtility util, final TableName tableName,
int nRegions, final byte[]... families) throws IOException, InterruptedException {
createMobTable(util, tableName, SnapshotTestingUtils.getSplitKeys(nRegions), 1, families);
} }
public static void createMobTable(final HBaseTestingUtility util, final TableName tableName, public static void createMobTable(final HBaseTestingUtility util, final TableName tableName,
final byte[][] splitKeys, int regionReplication, final byte[]... families) final byte[][] splitKeys, int regionReplication, final byte[]... families)
throws IOException, InterruptedException { throws IOException, InterruptedException {
createMobTable(util, tableName, splitKeys, regionReplication, null, families); createMobTable(util, tableName, splitKeys, regionReplication,
StoreFileTrackerFactory.Trackers.DEFAULT.name(), families);
} }
public static void createMobTable(HBaseTestingUtility util, TableName tableName, public static void createMobTable(final HBaseTestingUtility util, final TableName tableName,
byte[][] splitKeys, int regionReplication, String cpClassName, byte[]... families) final byte[][] splitKeys, int regionReplication, String storeFileTracker,
final byte[]... families) throws IOException, InterruptedException {
createMobTable(util, tableName, splitKeys, regionReplication, storeFileTracker, null, families);
}
public static void createMobTable(HBaseTestingUtility util, TableName tableName, byte[][] splitKeys,
int regionReplication, String storeFileTracker, String cpClassName, byte[]... families)
throws IOException, InterruptedException { throws IOException, InterruptedException {
TableDescriptorBuilder builder = TableDescriptorBuilder builder =
TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(regionReplication); TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(regionReplication);
@ -77,6 +89,7 @@ public class MobSnapshotTestingUtils {
if (!StringUtils.isBlank(cpClassName)) { if (!StringUtils.isBlank(cpClassName)) {
builder.setCoprocessor(cpClassName); builder.setCoprocessor(cpClassName);
} }
builder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, storeFileTracker);
util.getAdmin().createTable(builder.build(), splitKeys); util.getAdmin().createTable(builder.build(), splitKeys);
SnapshotTestingUtils.waitForTableToBeOnline(util, tableName); SnapshotTestingUtils.waitForTableToBeOnline(util, tableName);
assertEquals((splitKeys.length + 1) * regionReplication, assertEquals((splitKeys.length + 1) * regionReplication,
@ -85,15 +98,10 @@ public class MobSnapshotTestingUtils {
/** /**
* Create a Mob table. * Create a Mob table.
*
* @param util
* @param tableName
* @param families
* @return An Table instance for the created table. * @return An Table instance for the created table.
* @throws IOException
*/ */
public static Table createMobTable(final HBaseTestingUtility util, public static Table createMobTable(final HBaseTestingUtility util, final TableName tableName,
final TableName tableName, final byte[]... families) throws IOException { final byte[]... families) throws IOException {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
for (byte[] family : families) { for (byte[] family : families) {
// Disable blooms (they are on by default as of 0.95) but we disable them // Disable blooms (they are on by default as of 0.95) but we disable them
@ -102,10 +110,7 @@ public class MobSnapshotTestingUtils {
// and blooms being // and blooms being
// on is interfering. // on is interfering.
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family) builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family)
.setBloomFilterType(BloomType.NONE) .setBloomFilterType(BloomType.NONE).setMobEnabled(true).setMobThreshold(0L).build());
.setMobEnabled(true)
.setMobThreshold(0L)
.build());
} }
util.getAdmin().createTable(builder.build()); util.getAdmin().createTable(builder.build());
// HBaseAdmin only waits for regions to appear in hbase:meta we should wait // HBaseAdmin only waits for regions to appear in hbase:meta we should wait
@ -135,8 +140,8 @@ public class MobSnapshotTestingUtils {
} }
} }
public static void verifyMobRowCount(final HBaseTestingUtility util, public static void verifyMobRowCount(final HBaseTestingUtility util, final TableName tableName,
final TableName tableName, long expectedRows) throws IOException { long expectedRows) throws IOException {
Table table = ConnectionFactory.createConnection(util.getConfiguration()).getTable(tableName); Table table = ConnectionFactory.createConnection(util.getConfiguration()).getTable(tableName);
try { try {
@ -156,12 +161,9 @@ public class MobSnapshotTestingUtils {
@Override @Override
public TableDescriptor createHtd(final String tableName) { public TableDescriptor createHtd(final String tableName) {
return TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) return TableDescriptorBuilder
.setColumnFamily(ColumnFamilyDescriptorBuilder .newBuilder(TableName.valueOf(tableName)).setColumnFamily(ColumnFamilyDescriptorBuilder
.newBuilder(Bytes.toBytes(TEST_FAMILY)) .newBuilder(Bytes.toBytes(TEST_FAMILY)).setMobEnabled(true).setMobThreshold(0L).build())
.setMobEnabled(true)
.setMobThreshold(0L)
.build())
.build(); .build();
} }
} }

View File

@ -509,9 +509,8 @@ public final class SnapshotTestingUtils {
this.desc = desc; this.desc = desc;
this.tableRegions = tableRegions; this.tableRegions = tableRegions;
this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf); this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf);
new FSTableDescriptors(conf) FSTableDescriptors.createTableDescriptorForTableDirectory(
.createTableDescriptorForTableDirectory(this.snapshotDir.getFileSystem(conf), this.snapshotDir.getFileSystem(conf), snapshotDir, htd, false);
snapshotDir, htd, false);
} }
public TableDescriptor getTableDescriptor() { public TableDescriptor getTableDescriptor() {
@ -531,15 +530,13 @@ public final class SnapshotTestingUtils {
} }
public Path[] addRegionV1() throws IOException { public Path[] addRegionV1() throws IOException {
return addRegion(desc.toBuilder() return addRegion(
.setVersion(SnapshotManifestV1.DESCRIPTOR_VERSION) desc.toBuilder().setVersion(SnapshotManifestV1.DESCRIPTOR_VERSION).build());
.build());
} }
public Path[] addRegionV2() throws IOException { public Path[] addRegionV2() throws IOException {
return addRegion(desc.toBuilder() return addRegion(
.setVersion(SnapshotManifestV2.DESCRIPTOR_VERSION) desc.toBuilder().setVersion(SnapshotManifestV2.DESCRIPTOR_VERSION).build());
.build());
} }
private Path[] addRegion(final SnapshotProtos.SnapshotDescription desc) throws IOException { private Path[] addRegion(final SnapshotProtos.SnapshotDescription desc) throws IOException {
@ -550,6 +547,7 @@ public final class SnapshotTestingUtils {
RegionData regionData = tableRegions[this.snapshotted++]; RegionData regionData = tableRegions[this.snapshotted++];
ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(desc.getName()); ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(desc.getName());
SnapshotManifest manifest = SnapshotManifest.create(conf, fs, snapshotDir, desc, monitor); SnapshotManifest manifest = SnapshotManifest.create(conf, fs, snapshotDir, desc, monitor);
manifest.addTableDescriptor(htd);
manifest.addRegion(regionData.tableDir, regionData.hri); manifest.addRegion(regionData.tableDir, regionData.hri);
return regionData.files; return regionData.files;
} }