diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index aabda40a8f9..90f54d14dd1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -179,7 +179,6 @@ import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALSplitUtil; import org.apache.hadoop.hbase.wal.WALSplitUtil.MutationReplay; -import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.util.StringUtils; import org.apache.htrace.core.TraceScope; import org.apache.yetus.audience.InterfaceAudience; @@ -6269,8 +6268,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi * @throws IOException if failed unrecoverably. */ public Map> bulkLoadHFiles(Collection> familyPaths, - boolean assignSeqId, BulkLoadListener bulkLoadListener, - boolean copyFile, List clusterIds, boolean replicate) throws IOException { + boolean assignSeqId, BulkLoadListener bulkLoadListener, boolean copyFile, + List clusterIds, boolean replicate) throws IOException { long seqId = -1; Map> storeFiles = new TreeMap<>(Bytes.BYTES_COMPARATOR); Map storeFilesSizes = new HashMap<>(); @@ -6282,9 +6281,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi this.writeRequestsCount.increment(); // There possibly was a split that happened between when the split keys - // were gathered and before the HRegion's write lock was taken. We need + // were gathered and before the HRegion's write lock was taken. We need // to validate the HFile region before attempting to bulk load all of them - List ioes = new ArrayList<>(); + IOException ioException = null; List> failures = new ArrayList<>(); for (Pair p : familyPaths) { byte[] familyName = p.getFirst(); @@ -6292,9 +6291,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi HStore store = getStore(familyName); if (store == null) { - IOException ioe = new org.apache.hadoop.hbase.DoNotRetryIOException( + ioException = new org.apache.hadoop.hbase.DoNotRetryIOException( "No such column family " + Bytes.toStringBinary(familyName)); - ioes.add(ioe); } else { try { store.assertBulkLoadHFileOk(new Path(path)); @@ -6303,18 +6301,16 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi failures.add(p); } catch (IOException ioe) { // unrecoverable (hdfs problem) - ioes.add(ioe); + ioException = ioe; } } - } - // validation failed because of some sort of IO problem. - if (ioes.size() != 0) { - IOException e = MultipleIOException.createIOException(ioes); - LOG.error("There were one or more IO errors when checking if the bulk load is ok.", e); - throw e; + // validation failed because of some sort of IO problem. + if (ioException != null) { + LOG.error("There was IO error when checking if the bulk load is ok.", ioException); + throw ioException; + } } - // validation failed, bail out before doing anything permanent. if (failures.size() != 0) { StringBuilder list = new StringBuilder(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java index 599a7a3010c..621888e00da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java @@ -93,6 +93,7 @@ public class TestBulkLoad { private final byte[] randomBytes = new byte[100]; private final byte[] family1 = Bytes.toBytes("family1"); private final byte[] family2 = Bytes.toBytes("family2"); + private final byte[] family3 = Bytes.toBytes("family3"); @Rule public TestName name = new TestName(); @@ -202,6 +203,13 @@ public class TestBulkLoad { null); } + // after HBASE-24021 will throw DoNotRetryIOException, not MultipleIOException + @Test(expected = DoNotRetryIOException.class) + public void shouldCrashIfBulkLoadMultiFamiliesNotInTable() throws IOException { + testRegionWithFamilies(family1).bulkLoadHFiles(withFamilyPathsFor(family1, family2, family3), + false, null); + } + @Test(expected = DoNotRetryIOException.class) public void bulkHLogShouldThrowErrorWhenFamilySpecifiedAndHFileExistsButNotInTableDescriptor() throws IOException { @@ -221,6 +229,15 @@ public class TestBulkLoad { testRegionWithFamilies(family1).bulkLoadHFiles(list, false, null); } + // after HBASE-24021 will throw FileNotFoundException, not MultipleIOException + @Test(expected = FileNotFoundException.class) + public void shouldThrowErrorIfMultiHFileDoesNotExist() throws IOException { + List> list = new ArrayList<>(); + list.addAll(asList(withMissingHFileForFamily(family1))); + list.addAll(asList(withMissingHFileForFamily(family2))); + testRegionWithFamilies(family1, family2).bulkLoadHFiles(list, false, null); + } + private Pair withMissingHFileForFamily(byte[] family) { return new Pair<>(family, getNotExistFilePath()); }