HBASE-16171 Fix the potential problems in TestHCM.testConnectionCloseAllowsInterrupt (Colin Ma)

This commit is contained in:
stack 2016-07-08 10:17:55 -07:00
parent a33097e067
commit 5e0d97e0e9
1 changed files with 7 additions and 0 deletions

View File

@ -464,6 +464,10 @@ public class TestHCM {
c2.setInt(HConstants.HBASE_CLIENT_PAUSE, 1); // don't wait between retries. c2.setInt(HConstants.HBASE_CLIENT_PAUSE, 1); // don't wait between retries.
c2.setInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, 0); // Server do not really expire c2.setInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, 0); // Server do not really expire
c2.setBoolean(RpcClient.SPECIFIC_WRITE_THREAD, allowsInterrupt); c2.setBoolean(RpcClient.SPECIFIC_WRITE_THREAD, allowsInterrupt);
// to avoid the client to be stuck when do the Get
c2.setInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, 10000);
c2.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 10000);
c2.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 5000);
Connection connection = ConnectionFactory.createConnection(c2); Connection connection = ConnectionFactory.createConnection(c2);
final Table table = connection.getTable(tableName); final Table table = connection.getTable(tableName);
@ -488,6 +492,9 @@ public class TestHCM {
done++; done++;
if (done % 100 == 0) if (done % 100 == 0)
LOG.info("done=" + done); LOG.info("done=" + done);
// without the sleep, will cause the exception for too many files in
// org.apache.hadoop.hdfs.server.datanode.DataXceiver
Thread.sleep(100);
} }
} catch (Throwable t) { } catch (Throwable t) {
failed.set(t); failed.set(t);