HBASE-26826 Backport StoreFileTracker (HBASE-26067, HBASE-26584, and others) to branch-2.5

Previous cherry picks:

commit 6aaef89 HBASE-26064 Introduce a StoreFileTracker to abstract the store file tracking logic
commit 43b40e9 HBASE-25988 Store the store file list by a file #3578)
commit 6e05376 HBASE-26079 Use StoreFileTracker when splitting and merging #3617)
commit 090b2fe HBASE-26224 HBASE-26224 Introduce a MigrationStoreFileTracker to support migratin… #3656)
commit 0ee1689 HBASE-26246 Persist the StoreFileTracker configurations to TableDescriptor when creating table #3666)
commit 2052e80 HBASE-26248 Should find a suitable way to let users specify the store… #3665)
commit 5ff0f98 HBASE-26264 Add more checks to prevent misconfiguration on store file… #3681)
commit fc4f6d1 HBASE-26280 HBASE-26280 Use store file tracker when snapshoting #3685)
commit 06db852 HBASE-26326 CreateTableProcedure fails when FileBasedStoreFileTracker… #3721)
commit e4e7cf8 HBASE-26386 Refactor StoreFileTracker implementations to expose the s… #3774)
commit 08d1171 HBASE-26328 Clone snapshot doesn't load reference files into FILE SFT impl #3749)
commit 8bec26e HBASE-26263 [Rolling Upgrading] Persist the StoreFileTracker configur… #3700)
commit a288365 HBASE-26271: Cleanup the broken store files under data directory #3786)
commit d00b5fa HBASE-26454 CreateTableProcedure still relies on temp dir and renames… #3845)
commit 771e552 HBASE-26286: Add support for specifying store file tracker when restoring or cloning snapshot
commit f16b7b1 HBASE-26265 Update ref guide to mention the new store file tracker im… #3942)
commit 755b3b4 HBASE-26585 Add SFT configuration to META table descriptor when creating META #3998)
commit 39c42c7 HBASE-26639 The implementation of TestMergesSplitsAddToTracker is pro… #4010)
commit 6e1f5b7 HBASE-26586 Should not rely on the global config when setting SFT implementation for a table while upgrading #4006)
commit f1dd865 HBASE-26654 ModifyTableDescriptorProcedure shoud load TableDescriptor… #4034)
commit 8fbc9a2 HBASE-26674 Should modify filesCompacting under storeWriteLock #4040)
commit 5aa0fd2 HBASE-26675 Data race on Compactor.writer #4035)
commit 3021c58 HBASE-26700 The way we bypass broken track file is not enough in Stor… #4055)
commit a8b68c9 HBASE-26690 Modify FSTableDescriptors to not rely on renaming when wr… #4054)
commit dffeb8e HBASE-26587 Introduce a new Admin API to change SFT implementation (#… #4080)
commit b265fe5 HBASE-26673 Implement a shell command for change SFT implementation #4113)
commit 4cdb380 HBASE-26640 Reimplement master local region initialization to better … #4111)
commit 77bb153 HBASE-26707: Reduce number of renames during bulkload (#4066) #4122)
commit a4b192e HBASE-26611 Changing SFT implementation on disabled table is dangerous #4082)
commit d3629bb HBASE-26837 Set SFT config when creating TableDescriptor in TestClone… #4226)
commit 541d748 HBASE-26881 Backport HBASE-25368 to branch-2 (#4267)

Fixups for precommit error prone, checkstyle, and javadoc warnings after applying cherry picks.

Signed-off-by: Josh Elser <elserj@apache.org>
Reviewed-by: Wellington Ramos Chevreuil <wchevreuil@apache.org>
This commit is contained in:
Andrew Purtell 2022-03-18 13:38:51 -07:00
parent 2bbddfee1e
commit 6902cb2568
15 changed files with 34 additions and 33 deletions

View File

@ -2868,10 +2868,10 @@ public class HBaseAdmin implements Admin {
@Override
protected RestoreSnapshotResponse rpcCall() throws Exception {
final RestoreSnapshotRequest.Builder builder = RestoreSnapshotRequest.newBuilder()
.setSnapshot(snapshot)
.setNonceGroup(nonceGroup)
.setNonce(nonce)
.setRestoreACL(restoreAcl);
.setSnapshot(snapshot)
.setNonceGroup(nonceGroup)
.setNonce(nonce)
.setRestoreACL(restoreAcl);
if (customSFT != null) {
builder.setCustomSFT(customSFT);
}

View File

@ -818,9 +818,8 @@ public class TableDescriptorBuilder {
/**
* Remove metadata represented by the key from the {@link #values} map
*
* @param key Key whose key and value we're to remove from TableDescriptor
* parameters.
* @return the modifyable TD
* @param key Key whose key and value we're to remove from TableDescriptor parameters
* @return the modifiable TD
*/
public ModifyableTableDescriptor removeValue(final byte[] key) {
return removeValue(new Bytes(key));

View File

@ -66,7 +66,8 @@ import org.slf4j.LoggerFactory;
@Category(IntegrationTests.class)
public class IntegrationTestFileBasedSFTBulkLoad extends IntegrationTestBulkLoad {
private static final Logger LOG = LoggerFactory.getLogger(IntegrationTestFileBasedSFTBulkLoad.class);
private static final Logger LOG =
LoggerFactory.getLogger(IntegrationTestFileBasedSFTBulkLoad.class);
private static String NUM_MAPS_KEY = "hbase.IntegrationTestBulkLoad.numMaps";
private static String NUM_IMPORT_ROUNDS_KEY = "hbase.IntegrationTestBulkLoad.numImportRounds";

View File

@ -232,8 +232,7 @@ public abstract class AbstractMemStore implements MemStore {
}
/**
* This method is protected under {@link HStore#lock} write lock,<br/>
* and this method is used by {@link HStore#updateStorefiles} after flushing is completed.<br/>
* This method is protected under HStore write lock.<br/>
* The passed snapshot was successfully persisted; it can be let go.
* @param id Id of the snapshot to clean out.
* @see MemStore#snapshot()

View File

@ -395,7 +395,7 @@ public class CompactingMemStore extends AbstractMemStore {
}
/**
* This method is protected under {@link HStore#lock} read lock.
* This method is protected under HStore read lock.
*/
@Override
public List<KeyValueScanner> getScanners(long readPt) throws IOException {

View File

@ -2262,7 +2262,8 @@ public class HRegionServer extends Thread implements
double brokenStoreFileCleanerDelayJitter = conf.getDouble(
BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_DELAY_JITTER,
BrokenStoreFileCleaner.DEFAULT_BROKEN_STOREFILE_CLEANER_DELAY_JITTER);
double jitterRate = (ThreadLocalRandom.current().nextDouble() - 0.5D) * brokenStoreFileCleanerDelayJitter;
double jitterRate = (ThreadLocalRandom.current().nextDouble() - 0.5D) *
brokenStoreFileCleanerDelayJitter;
long jitterValue = Math.round(brokenStoreFileCleanerDelay * jitterRate);
this.brokenStoreFileCleaner =
new BrokenStoreFileCleaner((int) (brokenStoreFileCleanerDelay + jitterValue),

View File

@ -364,7 +364,7 @@ public class SecureBulkLoadManager {
@Override
public String prepareBulkLoad(final byte[] family, final String srcPath, boolean copyFile,
String customStaging ) throws IOException {
String customStaging) throws IOException {
Path p = new Path(srcPath);
//store customStaging for failedBulkLoad

View File

@ -18,8 +18,6 @@
package org.apache.hadoop.hbase.snapshot;
import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;

View File

@ -59,10 +59,10 @@ import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.rules.TemporaryFolder;
import org.junit.rules.TestName;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
@RunWith(Parameterized.class)
public class TestBulkloadBase {
@ -135,7 +135,8 @@ public class TestBulkloadBase {
}
protected HRegion testRegionWithFamilies(byte[]... families) throws IOException {
TableName tableName = TableName.valueOf(name.getMethodName().substring(0, name.getMethodName().indexOf("[")));
TableName tableName =
TableName.valueOf(name.getMethodName().substring(0, name.getMethodName().indexOf("[")));
return testRegionWithFamiliesAndSpecifiedTableName(tableName, families);
}
@ -175,7 +176,9 @@ public class TestBulkloadBase {
private static String generateUniqueName(final String suffix) {
String name = UUID.randomUUID().toString().replaceAll("-", "");
if (suffix != null) name += suffix;
if (suffix != null) {
name += suffix;
}
return name;
}

View File

@ -234,7 +234,6 @@ public class TestMergesSplitsAddToTracker {
List<StoreFileInfo> infos = region.getRegionFileSystem().getStoreFiles("info");
final MutableBoolean foundLink = new MutableBoolean(false);
infos.stream().forEach(i -> {
i.getActiveFileName().contains(orignalFileName);
if(i.getActiveFileName().contains(untrackedFile)){
fail();
}

View File

@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.util.Random;
import java.util.UUID;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
@ -42,9 +45,6 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TemporaryFolder;
import org.junit.rules.TestName;
import java.io.IOException;
import java.util.Random;
import java.util.UUID;
/**
* Tests for failedBulkLoad logic to make sure staged files are returned to their original location
@ -232,7 +232,8 @@ public class TestSecureBulkloadListener {
private String createHFileForFamilies(byte[] family) throws IOException {
HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf);
Path testDir = new Path(dfs.getWorkingDirectory() , new Path(name.getMethodName(), Bytes.toString(family)));
Path testDir = new Path(dfs.getWorkingDirectory(),
new Path(name.getMethodName(), Bytes.toString(family)));
if(!dfs.exists(testDir)){
dfs.mkdirs(testDir);
}
@ -257,7 +258,9 @@ public class TestSecureBulkloadListener {
private static String generateUniqueName(final String suffix) {
String name = UUID.randomUUID().toString().replaceAll("-", "");
if (suffix != null) name += suffix;
if (suffix != null) {
name += suffix;
}
return name;
}

View File

@ -36,7 +36,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;

View File

@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;

View File

@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;

View File

@ -59,8 +59,9 @@ public class MobSnapshotTestingUtils {
storeFileTracker, families);
}
public static void createPreSplitMobTable(final HBaseTestingUtility util, final TableName tableName,
int nRegions, final byte[]... families) throws IOException, InterruptedException {
public static void createPreSplitMobTable(final HBaseTestingUtility util,
final TableName tableName, int nRegions, final byte[]... families)
throws IOException, InterruptedException {
createMobTable(util, tableName, SnapshotTestingUtils.getSplitKeys(nRegions), 1, families);
}
@ -77,9 +78,9 @@ public class MobSnapshotTestingUtils {
createMobTable(util, tableName, splitKeys, regionReplication, storeFileTracker, null, families);
}
public static void createMobTable(HBaseTestingUtility util, TableName tableName, byte[][] splitKeys,
int regionReplication, String storeFileTracker, String cpClassName, byte[]... families)
throws IOException, InterruptedException {
public static void createMobTable(HBaseTestingUtility util, TableName tableName,
byte[][] splitKeys, int regionReplication, String storeFileTracker, String cpClassName,
byte[]... families) throws IOException, InterruptedException {
TableDescriptorBuilder builder =
TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(regionReplication);
for (byte[] family : families) {