HBASE-26707: Reduce number of renames during bulkload (#4066)

Signed-off-by: Wellington Ramos Chevreuil <wchevreuil@apache.org>
This commit is contained in:
BukrosSzabolcs 2022-02-17 20:34:48 +01:00 committed by GitHub
parent 8dec499181
commit 4829806220
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 583 additions and 37 deletions

View File

@ -0,0 +1,166 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.mapreduce;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.IntegrationTestBase;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Consistency;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
import org.apache.hadoop.hbase.tool.BulkLoadHFiles;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.RegionSplitter;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import static org.junit.Assert.assertEquals;
/**
* Test Bulk Load and MR on a distributed cluster.
* With FileBased StorefileTracker enabled.
* It starts an MR job that creates linked chains
*
* The format of rows is like this:
* Row Key -> Long
*
* L:<< Chain Id >> -> Row Key of the next link in the chain
* S:<< Chain Id >> -> The step in the chain that his link is.
* D:<< Chain Id >> -> Random Data.
*
* All chains start on row 0.
* All rk's are > 0.
*
* After creating the linked lists they are walked over using a TableMapper based Mapreduce Job.
*
* There are a few options exposed:
*
* hbase.IntegrationTestBulkLoad.chainLength
* The number of rows that will be part of each and every chain.
*
* hbase.IntegrationTestBulkLoad.numMaps
* The number of mappers that will be run. Each mapper creates on linked list chain.
*
* hbase.IntegrationTestBulkLoad.numImportRounds
* How many jobs will be run to create linked lists.
*
* hbase.IntegrationTestBulkLoad.tableName
* The name of the table.
*
* hbase.IntegrationTestBulkLoad.replicaCount
* How many region replicas to configure for the table under test.
*/
@Category(IntegrationTests.class)
public class IntegrationTestFileBasedSFTBulkLoad extends IntegrationTestBulkLoad {
private static final Logger LOG = LoggerFactory.getLogger(IntegrationTestFileBasedSFTBulkLoad.class);
private static String NUM_MAPS_KEY = "hbase.IntegrationTestBulkLoad.numMaps";
private static String NUM_IMPORT_ROUNDS_KEY = "hbase.IntegrationTestBulkLoad.numImportRounds";
private static String NUM_REPLICA_COUNT_KEY = "hbase.IntegrationTestBulkLoad.replicaCount";
private static int NUM_REPLICA_COUNT_DEFAULT = 1;
@Test
public void testFileBasedSFTBulkLoad() throws Exception {
super.testBulkLoad();
}
@Override
public void setUpCluster() throws Exception {
util = getTestingUtil(getConf());
util.getConfiguration().set(StoreFileTrackerFactory.TRACKER_IMPL,
"org.apache.hadoop.hbase.regionserver.storefiletracker.FileBasedStoreFileTracker");
util.initializeCluster(1);
int replicaCount = getConf().getInt(NUM_REPLICA_COUNT_KEY, NUM_REPLICA_COUNT_DEFAULT);
if (LOG.isDebugEnabled() && replicaCount != NUM_REPLICA_COUNT_DEFAULT) {
LOG.debug("Region Replicas enabled: " + replicaCount);
}
// Scale this up on a real cluster
if (util.isDistributedCluster()) {
util.getConfiguration().setIfUnset(NUM_MAPS_KEY,
Integer.toString(util.getAdmin().getRegionServers().size() * 10));
util.getConfiguration().setIfUnset(NUM_IMPORT_ROUNDS_KEY, "5");
} else {
util.startMiniMapReduceCluster();
}
}
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
IntegrationTestingUtility.setUseDistributedCluster(conf);
int status = ToolRunner.run(conf, new IntegrationTestFileBasedSFTBulkLoad(), args);
System.exit(status);
}
}

View File

@ -7128,7 +7128,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* @return final path to be used for actual loading * @return final path to be used for actual loading
* @throws IOException * @throws IOException
*/ */
String prepareBulkLoad(byte[] family, String srcPath, boolean copyFile) String prepareBulkLoad(byte[] family, String srcPath, boolean copyFile, String customStaging)
throws IOException; throws IOException;
/** /**
@ -7250,12 +7250,21 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
familyWithFinalPath.put(familyName, new ArrayList<>()); familyWithFinalPath.put(familyName, new ArrayList<>());
} }
List<Pair<Path, Path>> lst = familyWithFinalPath.get(familyName); List<Pair<Path, Path>> lst = familyWithFinalPath.get(familyName);
try {
String finalPath = path; String finalPath = path;
try {
boolean reqTmp = store.storeEngine.requireWritingToTmpDirFirst();
if (bulkLoadListener != null) { if (bulkLoadListener != null) {
finalPath = bulkLoadListener.prepareBulkLoad(familyName, path, copyFile); finalPath = bulkLoadListener.prepareBulkLoad(familyName, path, copyFile,
reqTmp ? null : regionDir.toString());
}
Pair<Path, Path> pair = null;
if (reqTmp) {
pair = store.preBulkLoadHFile(finalPath, seqId);
}
else {
Path livePath = new Path(finalPath);
pair = new Pair<>(livePath, livePath);
} }
Pair<Path, Path> pair = store.preBulkLoadHFile(finalPath, seqId);
lst.add(pair); lst.add(pair);
} catch (IOException ioe) { } catch (IOException ioe) {
// A failure here can cause an atomicity violation that we currently // A failure here can cause an atomicity violation that we currently
@ -7265,7 +7274,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
" load " + Bytes.toString(p.getFirst()) + " : " + p.getSecond(), ioe); " load " + Bytes.toString(p.getFirst()) + " : " + p.getSecond(), ioe);
if (bulkLoadListener != null) { if (bulkLoadListener != null) {
try { try {
bulkLoadListener.failedBulkLoad(familyName, path); bulkLoadListener.failedBulkLoad(familyName, finalPath);
} catch (Exception ex) { } catch (Exception ex) {
LOG.error("Error while calling failedBulkLoad for family " + LOG.error("Error while calling failedBulkLoad for family " +
Bytes.toString(familyName) + " with path " + path, ex); Bytes.toString(familyName) + " with path " + path, ex);

View File

@ -508,6 +508,10 @@ public class HRegionFileSystem {
* @throws IOException * @throws IOException
*/ */
Path commitStoreFile(final Path buildPath, Path dstPath) throws IOException { Path commitStoreFile(final Path buildPath, Path dstPath) throws IOException {
// rename is not necessary in case of direct-insert stores
if(buildPath.equals(dstPath)){
return dstPath;
}
// buildPath exists, therefore not doing an exists() check. // buildPath exists, therefore not doing an exists() check.
if (!rename(buildPath, dstPath)) { if (!rename(buildPath, dstPath)) {
throw new IOException("Failed rename of " + buildPath + " to " + dstPath); throw new IOException("Failed rename of " + buildPath + " to " + dstPath);

View File

@ -27,6 +27,7 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Consumer; import java.util.function.Consumer;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.mutable.MutableInt; import org.apache.commons.lang3.mutable.MutableInt;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
@ -341,7 +342,8 @@ public class SecureBulkLoadManager {
return user; return user;
} }
private static class SecureBulkLoadListener implements BulkLoadListener { //package-private for test purpose only
static class SecureBulkLoadListener implements BulkLoadListener {
// Target filesystem // Target filesystem
private final FileSystem fs; private final FileSystem fs;
private final String stagingDir; private final String stagingDir;
@ -349,19 +351,28 @@ public class SecureBulkLoadManager {
// Source filesystem // Source filesystem
private FileSystem srcFs = null; private FileSystem srcFs = null;
private Map<String, FsPermission> origPermissions = null; private Map<String, FsPermission> origPermissions = null;
private Map<String, String> origSources = null;
public SecureBulkLoadListener(FileSystem fs, String stagingDir, Configuration conf) { public SecureBulkLoadListener(FileSystem fs, String stagingDir, Configuration conf) {
this.fs = fs; this.fs = fs;
this.stagingDir = stagingDir; this.stagingDir = stagingDir;
this.conf = conf; this.conf = conf;
this.origPermissions = new HashMap<>(); this.origPermissions = new HashMap<>();
this.origSources = new HashMap<>();
} }
@Override @Override
public String prepareBulkLoad(final byte[] family, final String srcPath, boolean copyFile) public String prepareBulkLoad(final byte[] family, final String srcPath, boolean copyFile,
throws IOException { String customStaging ) throws IOException {
Path p = new Path(srcPath); Path p = new Path(srcPath);
Path stageP = new Path(stagingDir, new Path(Bytes.toString(family), p.getName()));
//store customStaging for failedBulkLoad
String currentStaging = stagingDir;
if(StringUtils.isNotEmpty(customStaging)){
currentStaging = customStaging;
}
Path stageP = new Path(currentStaging, new Path(Bytes.toString(family), p.getName()));
// In case of Replication for bulk load files, hfiles are already copied in staging directory // In case of Replication for bulk load files, hfiles are already copied in staging directory
if (p.equals(stageP)) { if (p.equals(stageP)) {
@ -390,11 +401,16 @@ public class SecureBulkLoadManager {
LOG.debug("Moving " + p + " to " + stageP); LOG.debug("Moving " + p + " to " + stageP);
FileStatus origFileStatus = fs.getFileStatus(p); FileStatus origFileStatus = fs.getFileStatus(p);
origPermissions.put(srcPath, origFileStatus.getPermission()); origPermissions.put(srcPath, origFileStatus.getPermission());
origSources.put(stageP.toString(), srcPath);
if(!fs.rename(p, stageP)) { if(!fs.rename(p, stageP)) {
throw new IOException("Failed to move HFile: " + p + " to " + stageP); throw new IOException("Failed to move HFile: " + p + " to " + stageP);
} }
} }
if(StringUtils.isNotEmpty(customStaging)) {
fs.setPermission(stageP, PERM_ALL_ACCESS); fs.setPermission(stageP, PERM_ALL_ACCESS);
}
return stageP.toString(); return stageP.toString();
} }
@ -412,35 +428,37 @@ public class SecureBulkLoadManager {
} }
@Override @Override
public void failedBulkLoad(final byte[] family, final String srcPath) throws IOException { public void failedBulkLoad(final byte[] family, final String stagedPath) throws IOException {
try { try {
Path p = new Path(srcPath); String src = origSources.get(stagedPath);
if (srcFs == null) { if(StringUtils.isEmpty(src)){
srcFs = FileSystem.newInstance(p.toUri(), conf); LOG.debug(stagedPath + " was not moved to staging. No need to move back");
}
if (!FSUtils.isSameHdfs(conf, srcFs, fs)) {
// files are copied so no need to move them back
return;
}
Path stageP = new Path(stagingDir, new Path(Bytes.toString(family), p.getName()));
// In case of Replication for bulk load files, hfiles are not renamed by end point during
// prepare stage, so no need of rename here again
if (p.equals(stageP)) {
LOG.debug(p.getName() + " is already available in source directory. Skipping rename.");
return; return;
} }
LOG.debug("Moving " + stageP + " back to " + p); Path stageP = new Path(stagedPath);
if (!fs.rename(stageP, p)) { if (!fs.exists(stageP)) {
throw new IOException("Failed to move HFile: " + stageP + " to " + p); throw new IOException(
"Missing HFile: " + stageP + ", can't be moved back to it's original place");
}
//we should not move back files if the original exists
Path srcPath = new Path(src);
if(srcFs.exists(srcPath)) {
LOG.debug(src + " is already at it's original place. No need to move.");
return;
}
LOG.debug("Moving " + stageP + " back to " + srcPath);
if (!fs.rename(stageP, srcPath)) {
throw new IOException("Failed to move HFile: " + stageP + " to " + srcPath);
} }
// restore original permission // restore original permission
if (origPermissions.containsKey(srcPath)) { if (origPermissions.containsKey(stagedPath)) {
fs.setPermission(p, origPermissions.get(srcPath)); fs.setPermission(srcPath, origPermissions.get(src));
} else { } else {
LOG.warn("Can't find previous permission for path=" + srcPath); LOG.warn("Can't find previous permission for path=" + stagedPath);
} }
} finally { } finally {
closeSrcFs(); closeSrcFs();

View File

@ -54,6 +54,10 @@ public class TestBulkLoad extends TestBulkloadBase {
public static final HBaseClassTestRule CLASS_RULE = public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestBulkLoad.class); HBaseClassTestRule.forClass(TestBulkLoad.class);
public TestBulkLoad(boolean useFileBasedSFT) {
super(useFileBasedSFT);
}
@Test @Test
public void verifyBulkLoadEvent() throws IOException { public void verifyBulkLoadEvent() throws IOException {
TableName tableName = TableName.valueOf("test", "test"); TableName tableName = TableName.valueOf("test", "test");

View File

@ -27,8 +27,10 @@ import java.io.FileOutputStream;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
import java.util.UUID;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
@ -46,6 +48,7 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WAL;
@ -62,7 +65,10 @@ import org.junit.rules.TestName;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@RunWith(Parameterized.class)
public class TestBulkloadBase { public class TestBulkloadBase {
@ClassRule @ClassRule
public static TemporaryFolder testFolder = new TemporaryFolder(); public static TemporaryFolder testFolder = new TemporaryFolder();
@ -75,12 +81,31 @@ public class TestBulkloadBase {
protected final byte[] family2 = Bytes.toBytes("family2"); protected final byte[] family2 = Bytes.toBytes("family2");
protected final byte[] family3 = Bytes.toBytes("family3"); protected final byte[] family3 = Bytes.toBytes("family3");
protected Boolean useFileBasedSFT;
@Rule @Rule
public TestName name = new TestName(); public TestName name = new TestName();
public TestBulkloadBase(boolean useFileBasedSFT) {
this.useFileBasedSFT = useFileBasedSFT;
}
@Parameterized.Parameters
public static Collection<Boolean> data() {
Boolean[] data = {false, true};
return Arrays.asList(data);
}
@Before @Before
public void before() throws IOException { public void before() throws IOException {
random.nextBytes(randomBytes); random.nextBytes(randomBytes);
if(useFileBasedSFT) {
conf.set(StoreFileTrackerFactory.TRACKER_IMPL,
"org.apache.hadoop.hbase.regionserver.storefiletracker.FileBasedStoreFileTracker");
}
else {
conf.unset(StoreFileTrackerFactory.TRACKER_IMPL);
}
} }
protected Pair<byte[], String> withMissingHFileForFamily(byte[] family) { protected Pair<byte[], String> withMissingHFileForFamily(byte[] family) {
@ -115,7 +140,7 @@ public class TestBulkloadBase {
} }
protected HRegion testRegionWithFamilies(byte[]... families) throws IOException { protected HRegion testRegionWithFamilies(byte[]... families) throws IOException {
TableName tableName = TableName.valueOf(name.getMethodName()); TableName tableName = TableName.valueOf(name.getMethodName().substring(0, name.getMethodName().indexOf("[")));
return testRegionWithFamiliesAndSpecifiedTableName(tableName, families); return testRegionWithFamiliesAndSpecifiedTableName(tableName, families);
} }
@ -134,7 +159,7 @@ public class TestBulkloadBase {
private String createHFileForFamilies(byte[] family) throws IOException { private String createHFileForFamilies(byte[] family) throws IOException {
HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf); HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf);
// TODO We need a way to do this without creating files // TODO We need a way to do this without creating files
File hFileLocation = testFolder.newFile(); File hFileLocation = testFolder.newFile(generateUniqueName(null));
FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation), null); FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation), null);
try { try {
hFileFactory.withOutputStream(out); hFileFactory.withOutputStream(out);
@ -153,6 +178,12 @@ public class TestBulkloadBase {
return hFileLocation.getAbsoluteFile().getAbsolutePath(); return hFileLocation.getAbsoluteFile().getAbsolutePath();
} }
private static String generateUniqueName(final String suffix) {
String name = UUID.randomUUID().toString().replaceAll("-", "");
if (suffix != null) name += suffix;
return name;
}
protected static Matcher<WALEdit> bulkLogWalEditType(byte[] typeBytes) { protected static Matcher<WALEdit> bulkLogWalEditType(byte[] typeBytes) {
return new WalMatcher(typeBytes); return new WalMatcher(typeBytes);
} }

View File

@ -61,6 +61,10 @@ public class TestCompactionAfterBulkLoad extends TestBulkloadBase {
private final RegionServerServices regionServerServices = mock(RegionServerServices.class); private final RegionServerServices regionServerServices = mock(RegionServerServices.class);
public static AtomicInteger called = new AtomicInteger(0); public static AtomicInteger called = new AtomicInteger(0);
public TestCompactionAfterBulkLoad(boolean useFileBasedSFT) {
super(useFileBasedSFT);
}
@Override @Override
protected HRegion testRegionWithFamiliesAndSpecifiedTableName(TableName tableName, protected HRegion testRegionWithFamiliesAndSpecifiedTableName(TableName tableName,
byte[]... families) throws IOException { byte[]... families) throws IOException {

View File

@ -19,6 +19,8 @@ package org.apache.hadoop.hbase.regionserver;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collection;
import java.util.Deque; import java.util.Deque;
import java.util.Map; import java.util.Map;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
@ -44,24 +46,30 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool; import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.ClassRule; import org.junit.ClassRule;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.collect.Multimap; import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
@RunWith(Parameterized.class)
@Category({RegionServerTests.class, MediumTests.class}) @Category({RegionServerTests.class, MediumTests.class})
public class TestSecureBulkLoadManager { public class TestSecureBulkLoadManager {
@ -85,16 +93,35 @@ public class TestSecureBulkLoadManager {
private Thread ealierBulkload; private Thread ealierBulkload;
private Thread laterBulkload; private Thread laterBulkload;
protected Boolean useFileBasedSFT;
protected final static HBaseTestingUtil testUtil = new HBaseTestingUtil(); protected final static HBaseTestingUtil testUtil = new HBaseTestingUtil();
private static Configuration conf = testUtil.getConfiguration(); private static Configuration conf = testUtil.getConfiguration();
@BeforeClass public TestSecureBulkLoadManager(Boolean useFileBasedSFT) {
public static void setUp() throws Exception { this.useFileBasedSFT = useFileBasedSFT;
}
@Parameterized.Parameters
public static Collection<Boolean> data() {
Boolean[] data = {false, true};
return Arrays.asList(data);
}
@Before
public void setUp() throws Exception {
if (useFileBasedSFT) {
conf.set(StoreFileTrackerFactory.TRACKER_IMPL,
"org.apache.hadoop.hbase.regionserver.storefiletracker.FileBasedStoreFileTracker");
}
else{
conf.unset(StoreFileTrackerFactory.TRACKER_IMPL);
}
testUtil.startMiniCluster(); testUtil.startMiniCluster();
} }
@AfterClass @After
public static void tearDown() throws Exception { public void tearDown() throws Exception {
testUtil.shutdownMiniCluster(); testUtil.shutdownMiniCluster();
testUtil.cleanupTestDir(); testUtil.cleanupTestDir();
} }

View File

@ -0,0 +1,283 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellBuilderType;
import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TemporaryFolder;
import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.net.BindException;
import java.net.ServerSocket;
import java.util.Random;
import java.util.UUID;
/**
* Tests for failedBulkLoad logic to make sure staged files are returned to their original location
* if the bulkload have failed.
*/
@Category({MiscTests.class, LargeTests.class})
public class TestSecureBulkloadListener {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestSecureBulkloadListener.class);
private static final Logger LOG = LoggerFactory.getLogger(TestSecureBulkloadListener.class);
@ClassRule
public static TemporaryFolder testFolder = new TemporaryFolder();
private Configuration conf;
private MiniDFSCluster cluster;
private HBaseTestingUtil htu;
private DistributedFileSystem dfs;
private final Random random = new Random();
private final byte[] randomBytes = new byte[100];
private static final String host1 = "host1";
private static final String host2 = "host2";
private static final String host3 = "host3";
private static byte[] FAMILY = Bytes.toBytes("family");
private static final String STAGING_DIR = "staging";
private static final String CUSTOM_STAGING_DIR = "customStaging";
@Rule
public TestName name = new TestName();
@Before
public void setUp() throws Exception {
random.nextBytes(randomBytes);
htu = new HBaseTestingUtil();
htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks
htu.getConfiguration().setInt("dfs.replication", 3);
htu.startMiniDFSCluster(3,
new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3});
conf = htu.getConfiguration();
cluster = htu.getDFSCluster();
dfs = (DistributedFileSystem) FileSystem.get(conf);
}
@After
public void tearDownAfterClass() throws Exception {
htu.shutdownMiniCluster();
}
@Test
public void testMovingStagedFile() throws Exception {
Path stagingDirPath =
new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), STAGING_DIR));
if (!dfs.exists(stagingDirPath)) {
dfs.mkdirs(stagingDirPath);
}
SecureBulkLoadManager.SecureBulkLoadListener listener =
new SecureBulkLoadManager.SecureBulkLoadListener(dfs, stagingDirPath.toString(), conf);
//creating file to load
String srcFile = createHFileForFamilies(FAMILY);
Path srcPath = new Path(srcFile);
Assert.assertTrue(dfs.exists(srcPath));
Path stagedFamily = new Path(stagingDirPath, new Path(Bytes.toString(FAMILY)));
if (!dfs.exists(stagedFamily)) {
dfs.mkdirs(stagedFamily);
}
//moving file to staging
String stagedFile = listener.prepareBulkLoad(FAMILY, srcFile, false, null);
Path stagedPath = new Path(stagedFile);
Assert.assertTrue(dfs.exists(stagedPath));
Assert.assertFalse(dfs.exists(srcPath));
//moving files back to original location after a failed bulkload
listener.failedBulkLoad(FAMILY, stagedFile);
Assert.assertFalse(dfs.exists(stagedPath));
Assert.assertTrue(dfs.exists(srcPath));
}
@Test
public void testMovingStagedFileWithCustomStageDir() throws Exception {
Path stagingDirPath =
new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), STAGING_DIR));
if (!dfs.exists(stagingDirPath)) {
dfs.mkdirs(stagingDirPath);
}
SecureBulkLoadManager.SecureBulkLoadListener listener =
new SecureBulkLoadManager.SecureBulkLoadListener(dfs, stagingDirPath.toString(), conf);
//creating file to load
String srcFile = createHFileForFamilies(FAMILY);
Path srcPath = new Path(srcFile);
Assert.assertTrue(dfs.exists(srcPath));
Path stagedFamily = new Path(stagingDirPath, new Path(Bytes.toString(FAMILY)));
if (!dfs.exists(stagedFamily)) {
dfs.mkdirs(stagedFamily);
}
Path customStagingDirPath =
new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), CUSTOM_STAGING_DIR));
Path customStagedFamily = new Path(customStagingDirPath, new Path(Bytes.toString(FAMILY)));
if (!dfs.exists(customStagedFamily)) {
dfs.mkdirs(customStagedFamily);
}
//moving file to staging using a custom staging dir
String stagedFile =
listener.prepareBulkLoad(FAMILY, srcFile, false, customStagingDirPath.toString());
Path stagedPath = new Path(stagedFile);
Assert.assertTrue(dfs.exists(stagedPath));
Assert.assertFalse(dfs.exists(srcPath));
//moving files back to original location after a failed bulkload
listener.failedBulkLoad(FAMILY, stagedFile);
Assert.assertFalse(dfs.exists(stagedPath));
Assert.assertTrue(dfs.exists(srcPath));
}
@Test
public void testCopiedStagedFile() throws Exception {
Path stagingDirPath =
new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), STAGING_DIR));
if (!dfs.exists(stagingDirPath)) {
dfs.mkdirs(stagingDirPath);
}
SecureBulkLoadManager.SecureBulkLoadListener listener =
new SecureBulkLoadManager.SecureBulkLoadListener(dfs, stagingDirPath.toString(), conf);
//creating file to load
String srcFile = createHFileForFamilies(FAMILY);
Path srcPath = new Path(srcFile);
Assert.assertTrue(dfs.exists(srcPath));
Path stagedFamily = new Path(stagingDirPath, new Path(Bytes.toString(FAMILY)));
if (!dfs.exists(stagedFamily)) {
dfs.mkdirs(stagedFamily);
}
//copying file to staging
String stagedFile = listener.prepareBulkLoad(FAMILY, srcFile, true, null);
Path stagedPath = new Path(stagedFile);
Assert.assertTrue(dfs.exists(stagedPath));
Assert.assertTrue(dfs.exists(srcPath));
//should do nothing because the original file was copied to staging
listener.failedBulkLoad(FAMILY, stagedFile);
Assert.assertTrue(dfs.exists(stagedPath));
Assert.assertTrue(dfs.exists(srcPath));
}
@Test(expected = IOException.class)
public void testDeletedStagedFile() throws Exception {
Path stagingDirPath =
new Path(dfs.getWorkingDirectory(), new Path(name.getMethodName(), STAGING_DIR));
if (!dfs.exists(stagingDirPath)) {
dfs.mkdirs(stagingDirPath);
}
SecureBulkLoadManager.SecureBulkLoadListener listener =
new SecureBulkLoadManager.SecureBulkLoadListener(dfs, stagingDirPath.toString(), conf);
//creating file to load
String srcFile = createHFileForFamilies(FAMILY);
Path srcPath = new Path(srcFile);
Assert.assertTrue(dfs.exists(srcPath));
Path stagedFamily = new Path(stagingDirPath, new Path(Bytes.toString(FAMILY)));
if (!dfs.exists(stagedFamily)) {
dfs.mkdirs(stagedFamily);
}
//moving file to staging
String stagedFile = listener.prepareBulkLoad(FAMILY, srcFile, false, null);
Path stagedPath = new Path(stagedFile);
Assert.assertTrue(dfs.exists(stagedPath));
Assert.assertFalse(dfs.exists(srcPath));
dfs.delete(stagedPath, false);
//moving files back to original location after a failed bulkload
listener.failedBulkLoad(FAMILY, stagedFile);
}
private String createHFileForFamilies(byte[] family) throws IOException {
HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf);
Path testDir = new Path(dfs.getWorkingDirectory() , new Path(name.getMethodName(), Bytes.toString(family)));
if(!dfs.exists(testDir)){
dfs.mkdirs(testDir);
}
Path hfilePath = new Path(testDir, generateUniqueName(null));
FSDataOutputStream out = dfs.createFile(hfilePath).build();
try {
hFileFactory.withOutputStream(out);
hFileFactory.withFileContext(new HFileContextBuilder().build());
HFile.Writer writer = hFileFactory.create();
try {
writer.append(new KeyValue(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY)
.setRow(randomBytes).setFamily(family).setQualifier(randomBytes).setTimestamp(0L)
.setType(KeyValue.Type.Put.getCode()).setValue(randomBytes).build()));
} finally {
writer.close();
}
} finally {
out.close();
}
return hfilePath.toString();
}
private static String generateUniqueName(final String suffix) {
String name = UUID.randomUUID().toString().replaceAll("-", "");
if (suffix != null) name += suffix;
return name;
}
}