HBASE-12558 TestHCM.testClusterStatus Unexpected exception, expected<org.apache.hadoop.hbase.regionserver.RegionServerStoppedException> but was<junit.framework.AssertionFailedError> -- ADDED DEBUG

This commit is contained in:
stack 2014-11-25 21:21:35 -08:00
parent 24f19328eb
commit 8b8f2026bd
20 changed files with 78 additions and 40 deletions

View File

@ -21,7 +21,10 @@ package org.apache.hadoop.hbase.client;
import io.netty.bootstrap.Bootstrap;
import io.netty.bootstrap.ChannelFactory;
import io.netty.buffer.ByteBufInputStream;
import io.netty.channel.Channel;
import io.netty.channel.ChannelException;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
@ -29,7 +32,9 @@ import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.DatagramChannel;
import io.netty.channel.socket.DatagramPacket;
import io.netty.channel.socket.InternetProtocolFamily;
import io.netty.channel.socket.nio.NioDatagramChannel;
import io.netty.util.internal.StringUtil;
import java.io.Closeable;
import java.io.IOException;
@ -37,6 +42,8 @@ import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.Inet4Address;
import java.net.Inet6Address;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
@ -53,6 +60,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.ExceptionUtil;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.ReflectionUtils;
/**
@ -205,12 +213,17 @@ class ClusterStatusListener implements Closeable {
throw new IOException("Can't connect to " + mcAddress, e);
}
InternetProtocolFamily family = InternetProtocolFamily.IPv4;
if (ina instanceof Inet6Address) {
family = InternetProtocolFamily.IPv6;
}
try {
Bootstrap b = new Bootstrap();
b.group(group)
.channel(NioDatagramChannel.class)
.option(ChannelOption.SO_REUSEADDR, true)
.handler(new ClusterStatusHandler());
.channelFactory(new HBaseDatagramChannelFactory<Channel>(NioDatagramChannel.class, family))
.option(ChannelOption.SO_REUSEADDR, true)
.handler(new ClusterStatusHandler());
channel = (DatagramChannel)b.bind(bindAddress, port).sync().channel();
} catch (InterruptedException e) {
@ -222,6 +235,32 @@ class ClusterStatusListener implements Closeable {
channel.joinGroup(ina, ni, null, channel.newPromise());
}
private class HBaseDatagramChannelFactory<T extends Channel> implements ChannelFactory<T> {
private final Class<? extends T> clazz;
private InternetProtocolFamily family;
HBaseDatagramChannelFactory(Class<? extends T> clazz, InternetProtocolFamily family) {
this.clazz = clazz;
this.family = family;
}
@Override
public T newChannel() {
try {
return ReflectionUtils.instantiateWithCustomCtor(clazz.getName(),
new Class[] { InternetProtocolFamily.class }, new Object[] { family });
} catch (Throwable t) {
throw new ChannelException("Unable to create Channel from class " + clazz, t);
}
}
@Override
public String toString() {
return StringUtil.simpleClassName(clazz) + ".class";
}
}
@Override
public void close() {
if (channel != null) {

View File

@ -112,13 +112,13 @@ public class HTable implements HTableInterface, RegionLocator {
private volatile Configuration configuration;
protected List<Row> writeAsyncBuffer = new LinkedList<Row>();
private long writeBufferSize;
private boolean clearBufferOnFail;
private boolean autoFlush;
protected long currentWriteBufferSize;
private boolean clearBufferOnFail = true;
private boolean autoFlush = true;
protected long currentWriteBufferSize = 0 ;
private boolean closed = false;
protected int scannerCaching;
private int maxKeyValueSize;
private ExecutorService pool; // For Multi & Scan
private boolean closed;
private int operationTimeout;
private int retries;
private final boolean cleanupPoolOnClose; // shutdown the pool in close()
@ -127,7 +127,6 @@ public class HTable implements HTableInterface, RegionLocator {
private int primaryCallTimeoutMicroSecond;
private int replicaCallTimeoutMicroSecondScan;
/** The Async process for puts with autoflush set to false or multiputs */
protected AsyncProcess ap;
/** The Async process for batch */
@ -319,9 +318,10 @@ public class HTable implements HTableInterface, RegionLocator {
/**
* For internal testing.
* @throws IOException
*/
@VisibleForTesting
protected HTable() {
protected HTable() throws IOException {
tableName = null;
cleanupPoolOnClose = false;
cleanupConnectionOnClose = false;
@ -345,9 +345,6 @@ public class HTable implements HTableInterface, RegionLocator {
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
this.writeBufferSize = this.configuration.getLong(
"hbase.client.write.buffer", 2097152);
this.clearBufferOnFail = true;
this.autoFlush = true;
this.currentWriteBufferSize = 0;
this.scannerCaching = this.configuration.getInt(
HConstants.HBASE_CLIENT_SCANNER_CACHING,
HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING);
@ -365,7 +362,6 @@ public class HTable implements HTableInterface, RegionLocator {
multiAp = this.connection.getAsyncProcess();
this.maxKeyValueSize = getMaxKeyValueSize(this.configuration);
this.closed = false;
}
/**

View File

@ -663,7 +663,7 @@ public class TestAsyncProcess {
HTable ht = new HTable();
MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, true);
ht.ap = ap;
ht.setAutoFlush(true, true);
ht.setAutoFlushTo(true);
if (bufferOn) {
ht.setWriteBufferSize(1024L * 1024L);
} else {
@ -711,7 +711,7 @@ public class TestAsyncProcess {
HTable ht = new HTable();
MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, true);
ht.ap = ap;
ht.setAutoFlush(false, true);
ht.setAutoFlushTo(false);
ht.setWriteBufferSize(0);
Put p = createPut(1, false);
@ -739,7 +739,7 @@ public class TestAsyncProcess {
public void testWithNoClearOnFail() throws IOException {
HTable ht = new HTable();
ht.ap = new MyAsyncProcess(createHConnection(), conf, true);
ht.setAutoFlush(false, false);
ht.setAutoFlush(false);
Put p = createPut(1, false);
ht.put(p);
@ -806,7 +806,7 @@ public class TestAsyncProcess {
ht.ap.serverTrackerTimeout = 1;
Put p = createPut(1, false);
ht.setAutoFlush(false, false);
ht.setAutoFlush(false);
ht.put(p);
try {
@ -828,7 +828,7 @@ public class TestAsyncProcess {
Assert.assertNotNull(ht.ap.createServerErrorTracker());
Put p = createPut(1, true);
ht.setAutoFlush(false, false);
ht.setAutoFlush(false);
ht.put(p);
try {

View File

@ -363,7 +363,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
protected void instantiateHTable(Configuration conf) throws IOException {
table = new HTable(conf, getTableName(conf));
table.setAutoFlush(false, true);
table.setAutoFlushTo(false);
table.setWriteBufferSize(4 * 1024 * 1024);
}

View File

@ -185,7 +185,7 @@ public class IntegrationTestBigLinkedListWithVisibility extends IntegrationTestB
protected void instantiateHTable(Configuration conf) throws IOException {
for (int i = 0; i < DEFAULT_TABLES_COUNT; i++) {
HTable table = new HTable(conf, getTableName(i));
table.setAutoFlush(true, true);
table.setAutoFlushTo(true);
//table.setWriteBufferSize(4 * 1024 * 1024);
this.tables[i] = table;
}

View File

@ -181,7 +181,7 @@ public void cleanUpCluster() throws Exception {
numBackReferencesPerRow = conf.getInt(NUM_BACKREFS_KEY, NUM_BACKREFS_DEFAULT);
table = new HTable(conf, TableName.valueOf(tableName));
table.setWriteBufferSize(4*1024*1024);
table.setAutoFlush(false, true);
table.setAutoFlushTo(false);
String taskId = conf.get("mapreduce.task.attempt.id");
Matcher matcher = Pattern.compile(".+_m_(\\d+_\\d+)").matcher(taskId);

View File

@ -239,7 +239,7 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
for (int x = 0; x < 5000; x++) {
TraceScope traceScope = Trace.startSpan("insertData", Sampler.ALWAYS);
try {
ht.setAutoFlush(false, true);
ht.setAutoFlushTo(false);
for (int i = 0; i < 5; i++) {
long rk = random.nextLong();
rowKeys.add(rk);

View File

@ -909,7 +909,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
void testSetup() throws IOException {
this.table = connection.getTable(tableName);
this.table.setAutoFlush(false, true);
this.table.setAutoFlushTo(false);
}
void testTakedown() throws IOException {

View File

@ -104,7 +104,7 @@ public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable,
if (!tables.containsKey(tableName)) {
LOG.debug("Opening HTable \"" + Bytes.toString(tableName.get())+ "\" for writing");
HTable table = new HTable(conf, TableName.valueOf(tableName.get()));
table.setAutoFlush(false, true);
table.setAutoFlushTo(false);
tables.put(tableName, table);
}
return tables.get(tableName);

View File

@ -195,7 +195,7 @@ implements Configurable {
}
this.connection = ConnectionFactory.createConnection(this.conf);
this.table = connection.getTable(TableName.valueOf(tableName));
((HTable) this.table).setAutoFlush(false, true);
this.table.setAutoFlushTo(false);
LOG.info("Created table instance for " + tableName);
} catch(IOException e) {
LOG.error(e);

View File

@ -37,6 +37,8 @@ import io.netty.channel.socket.nio.NioDatagramChannel;
import io.netty.handler.codec.MessageToMessageEncoder;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import io.netty.util.internal.StringUtil;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.ClusterStatus;
@ -82,6 +84,7 @@ public class ClusterStatusPublisher extends Chore {
* Use org.apache.hadoop.hbase.master.ClusterStatusPublisher.MulticastPublisher to multicast the
* status.
*/
private static final Log LOG = LogFactory.getLog(ClusterStatusPublisher.class);
public static final String STATUS_PUBLISHER_CLASS = "hbase.status.publisher.class";
public static final Class<? extends ClusterStatusPublisher.Publisher>
DEFAULT_STATUS_PUBLISHER_CLASS =
@ -171,7 +174,6 @@ public class ClusterStatusPublisher extends Chore {
null,
null);
publisher.publish(cs);
}
@ -215,6 +217,7 @@ public class ClusterStatusPublisher extends Chore {
}
res.add(toSend.getKey());
LOG.debug("###add dead server " + toSend.getKey());
}
return res;

View File

@ -3905,7 +3905,7 @@ public class TestFromClientSide {
final int NB_BATCH_ROWS = 10;
HTable table = TEST_UTIL.createTable(Bytes.toBytes("testRowsPutBufferedOneFlush"),
new byte [][] {CONTENTS_FAMILY, SMALL_FAMILY});
table.setAutoFlush(false, true);
table.setAutoFlushTo(false);
ArrayList<Put> rowsUpdate = new ArrayList<Put>();
for (int i = 0; i < NB_BATCH_ROWS * 10; i++) {
byte[] row = Bytes.toBytes("row" + i);
@ -3946,7 +3946,7 @@ public class TestFromClientSide {
final int NB_BATCH_ROWS = 10;
HTable table = TEST_UTIL.createTable(Bytes.toBytes("testRowsPutBufferedManyManyFlushes"),
new byte[][] {CONTENTS_FAMILY, SMALL_FAMILY });
table.setAutoFlush(false, true);
table.setAutoFlushTo(false);
table.setWriteBufferSize(10);
ArrayList<Put> rowsUpdate = new ArrayList<Put>();
for (int i = 0; i < NB_BATCH_ROWS * 10; i++) {
@ -4275,7 +4275,7 @@ public class TestFromClientSide {
new byte[][] { HConstants.CATALOG_FAMILY, Bytes.toBytes("info2") }, 1, 1024);
// set block size to 64 to making 2 kvs into one block, bypassing the walkForwardInSingleRow
// in Store.rowAtOrBeforeFromStoreFile
table.setAutoFlush(true);
table.setAutoFlushTo(true);
String regionName = table.getRegionLocations().firstKey().getEncodedName();
HRegion region =
TEST_UTIL.getRSForFirstRegionInTable(tableAname).getFromOnlineRegions(regionName);

View File

@ -263,7 +263,7 @@ public class TestMultiParallel {
// Load the data
LOG.info("get new table");
HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
table.setAutoFlush(false, true);
table.setAutoFlushTo(false);
table.setWriteBufferSize(10 * 1024 * 1024);
LOG.info("constructPutRequests");

View File

@ -178,7 +178,7 @@ public class TestHTableWrapper {
boolean initialAutoFlush = hTableInterface.isAutoFlush();
hTableInterface.setAutoFlushTo(false);
assertFalse(hTableInterface.isAutoFlush());
hTableInterface.setAutoFlush(true, true);
hTableInterface.setAutoFlushTo(true);
assertTrue(hTableInterface.isAutoFlush());
hTableInterface.setAutoFlushTo(initialAutoFlush);
}

View File

@ -921,7 +921,7 @@ public class TestDistributedLogSplitting {
if (key == null || key.length == 0) {
key = new byte[] { 0, 0, 0, 0, 1 };
}
ht.setAutoFlush(true, true);
ht.setAutoFlushTo(true);
Put put = new Put(key);
put.add(Bytes.toBytes("family"), Bytes.toBytes("c1"), new byte[]{'b'});
ht.put(put);
@ -1614,7 +1614,7 @@ public class TestDistributedLogSplitting {
* Load table with puts and deletes with expected values so that we can verify later
*/
private void prepareData(final HTable t, final byte[] f, final byte[] column) throws IOException {
t.setAutoFlush(false, true);
t.setAutoFlushTo(false);
byte[] k = new byte[3];
// add puts

View File

@ -348,7 +348,7 @@ public class TestRegionServerMetrics {
TEST_UTIL.createTable(tableName, cf);
HTable t = new HTable(conf, tableName);
t.setAutoFlush(false, true);
t.setAutoFlushTo(false);
for (int insertCount =0; insertCount < 100; insertCount++) {
Put p = new Put(Bytes.toBytes("" + insertCount + "row"));
p.add(cf, qualifier, val);

View File

@ -444,7 +444,7 @@ public class TestLogRolling {
writeData(table, 1002);
table.setAutoFlush(true, true);
table.setAutoFlushTo(true);
long curTime = System.currentTimeMillis();
LOG.info("log.getCurrentFileName()): " + DefaultWALProvider.getCurrentFileName(log));

View File

@ -54,7 +54,7 @@ public class TestReplicationChangingPeerRegionservers extends TestReplicationBas
*/
@Before
public void setUp() throws Exception {
((HTable)htable1).setAutoFlush(false, true);
htable1.setAutoFlushTo(false);
// Starting and stopping replication can make us miss new logs,
// rolling like this makes sure the most recent one gets added to the queue
for (JVMClusterUtil.RegionServerThread r :

View File

@ -69,7 +69,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
*/
@Before
public void setUp() throws Exception {
((HTable)htable1).setAutoFlush(true, true);
htable1.setAutoFlushTo(true);
// Starting and stopping replication can make us miss new logs,
// rolling like this makes sure the most recent one gets added to the queue
for ( JVMClusterUtil.RegionServerThread r :
@ -247,7 +247,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
LOG.info("testSmallBatch");
Put put;
// normal Batch tests
((HTable)htable1).setAutoFlush(false, true);
htable1.setAutoFlushTo(false);
for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
put = new Put(Bytes.toBytes(i));
put.add(famName, row, row);
@ -387,7 +387,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
public void testLoading() throws Exception {
LOG.info("Writing out rows to table1 in testLoading");
htable1.setWriteBufferSize(1024);
((HTable)htable1).setAutoFlush(false, true);
((HTable)htable1).setAutoFlushTo(false);
for (int i = 0; i < NB_ROWS_IN_BIG_BATCH; i++) {
Put put = new Put(Bytes.toBytes(i));
put.add(famName, row, row);

View File

@ -678,7 +678,7 @@ public class SnapshotTestingUtils {
public static void loadData(final HBaseTestingUtility util, final HTable table, int rows,
byte[]... families) throws IOException, InterruptedException {
table.setAutoFlush(false, true);
table.setAutoFlushTo(false);
// Ensure one row per region
assertTrue(rows >= KEYS.length);