From b297f2dae1cf1ef9a6c0aef30977ff1dcc3db9ee Mon Sep 17 00:00:00 2001 From: Jingcheng Du Date: Tue, 22 Nov 2016 11:18:00 +0800 Subject: [PATCH] HBASE-17095 The ClientSimpleScanner keeps retrying if the hfile is corrupt or cannot found --- .../hbase/regionserver/RSRpcServices.java | 7 ++ .../TestScannerWithCorruptHFile.java | 112 ++++++++++++++++++ 2 files changed, 119 insertions(+) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithCorruptHFile.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index f3f24b91d22..554b29a6e73 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver; import com.google.common.annotations.VisibleForTesting; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.InterruptedIOException; import java.net.BindException; @@ -81,6 +82,7 @@ import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; import org.apache.hadoop.hbase.exceptions.ScannerResetException; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.io.hfile.CorruptHFileException; import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.PriorityFunction; @@ -2997,6 +2999,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // scanner is closed here scannerClosed = true; + // If it is a CorruptHFileException or a FileNotFoundException, throw the + // DoNotRetryIOException. This can avoid the retry in ClientScanner. + if (e instanceof CorruptHFileException || e instanceof FileNotFoundException) { + throw new DoNotRetryIOException(e); + } // We closed the scanner already. Instead of throwing the IOException, and client // retrying with the same scannerId only to get USE on the next RPC, we directly throw // a special exception to save an RPC. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithCorruptHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithCorruptHFile.java new file mode 100644 index 00000000000..a1719fc16af --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithCorruptHFile.java @@ -0,0 +1,112 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.io.hfile.CorruptHFileException; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +/** + * Tests a scanner on a corrupt hfile. + */ +@Category(MediumTests.class) +public class TestScannerWithCorruptHFile { + @Rule public TestName name = new TestName(); + private static final byte[] FAMILY_NAME = Bytes.toBytes("f"); + private final static HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU(); + + + @BeforeClass + public static void setup() throws Exception { + TEST_UTIL.startMiniCluster(1); + } + + @AfterClass + public static void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + public static class CorruptHFileCoprocessor extends BaseRegionObserver { + @Override + public boolean preScannerNext(ObserverContext e, + InternalScanner s, List results, int limit, boolean hasMore) throws IOException { + throw new CorruptHFileException("For test"); + } + } + + @Test(expected = DoNotRetryIOException.class) + public void testScanOnCorruptHFile() throws IOException { + TableName tableName = TableName.valueOf(name.getMethodName()); + HTableDescriptor htd = new HTableDescriptor(tableName); + htd.addCoprocessor(CorruptHFileCoprocessor.class.getName()); + htd.addFamily(new HColumnDescriptor(FAMILY_NAME)); + Table table = TEST_UTIL.createTable(htd, null); + try { + loadTable(table, 1); + scan(table); + } finally { + table.close(); + } + } + + private void loadTable(Table table, int numRows) throws IOException { + for (int i = 0; i < numRows; ++i) { + byte[] row = Bytes.toBytes(i); + Put put = new Put(row); + put.setDurability(Durability.SKIP_WAL); + put.addColumn(FAMILY_NAME, null, row); + table.put(put); + } + } + + private void scan(Table table) throws IOException { + Scan scan = new Scan(); + scan.setCaching(1); + scan.setCacheBlocks(false); + ResultScanner scanner = table.getScanner(scan); + try { + scanner.next(); + } finally { + scanner.close(); + } + } +}