HBASE-17095 The ClientSimpleScanner keeps retrying if the hfile is corrupt or cannot found

This commit is contained in:
Jingcheng Du 2016-11-22 11:18:00 +08:00
parent 3e9d1a19e2
commit b297f2dae1
2 changed files with 119 additions and 0 deletions

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver;
import com.google.common.annotations.VisibleForTesting;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.net.BindException;
@ -81,6 +82,7 @@ import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
import org.apache.hadoop.hbase.exceptions.ScannerResetException;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.io.hfile.CorruptHFileException;
import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.ipc.PriorityFunction;
@ -2997,6 +2999,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
// scanner is closed here
scannerClosed = true;
// If it is a CorruptHFileException or a FileNotFoundException, throw the
// DoNotRetryIOException. This can avoid the retry in ClientScanner.
if (e instanceof CorruptHFileException || e instanceof FileNotFoundException) {
throw new DoNotRetryIOException(e);
}
// We closed the scanner already. Instead of throwing the IOException, and client
// retrying with the same scannerId only to get USE on the next RPC, we directly throw
// a special exception to save an RPC.

View File

@ -0,0 +1,112 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.hfile.CorruptHFileException;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
/**
* Tests a scanner on a corrupt hfile.
*/
@Category(MediumTests.class)
public class TestScannerWithCorruptHFile {
@Rule public TestName name = new TestName();
private static final byte[] FAMILY_NAME = Bytes.toBytes("f");
private final static HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU();
@BeforeClass
public static void setup() throws Exception {
TEST_UTIL.startMiniCluster(1);
}
@AfterClass
public static void tearDown() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
public static class CorruptHFileCoprocessor extends BaseRegionObserver {
@Override
public boolean preScannerNext(ObserverContext<RegionCoprocessorEnvironment> e,
InternalScanner s, List<Result> results, int limit, boolean hasMore) throws IOException {
throw new CorruptHFileException("For test");
}
}
@Test(expected = DoNotRetryIOException.class)
public void testScanOnCorruptHFile() throws IOException {
TableName tableName = TableName.valueOf(name.getMethodName());
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addCoprocessor(CorruptHFileCoprocessor.class.getName());
htd.addFamily(new HColumnDescriptor(FAMILY_NAME));
Table table = TEST_UTIL.createTable(htd, null);
try {
loadTable(table, 1);
scan(table);
} finally {
table.close();
}
}
private void loadTable(Table table, int numRows) throws IOException {
for (int i = 0; i < numRows; ++i) {
byte[] row = Bytes.toBytes(i);
Put put = new Put(row);
put.setDurability(Durability.SKIP_WAL);
put.addColumn(FAMILY_NAME, null, row);
table.put(put);
}
}
private void scan(Table table) throws IOException {
Scan scan = new Scan();
scan.setCaching(1);
scan.setCacheBlocks(false);
ResultScanner scanner = table.getScanner(scan);
try {
scanner.next();
} finally {
scanner.close();
}
}
}