HBASE-19647 Logging cleanups; emit regionname when RegionTooBusyException inside RetriesExhausted... make netty connect/disconnect TRACE-level
This commit is contained in:
parent
6d9081b864
commit
16cd916976
|
@ -1,5 +1,4 @@
|
||||||
/*
|
/*
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -30,6 +29,7 @@ import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
|
import org.apache.hadoop.hbase.RegionTooBusyException;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
|
@ -154,7 +154,9 @@ extends RetriesExhaustedException {
|
||||||
for (Throwable t : ths) {
|
for (Throwable t : ths) {
|
||||||
if (t == null) continue;
|
if (t == null) continue;
|
||||||
String name = "";
|
String name = "";
|
||||||
if (t instanceof DoNotRetryIOException) {
|
if (t instanceof DoNotRetryIOException ||
|
||||||
|
t instanceof RegionTooBusyException) {
|
||||||
|
// If RegionTooBusyException, print message since it has Region name in it.
|
||||||
name = t.getMessage();
|
name = t.getMessage();
|
||||||
} else {
|
} else {
|
||||||
name = t.getClass().getSimpleName();
|
name = t.getClass().getSimpleName();
|
||||||
|
@ -181,5 +183,4 @@ extends RetriesExhaustedException {
|
||||||
}
|
}
|
||||||
return classificatons.toString();
|
return classificatons.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,65 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
|
import org.apache.avro.generic.GenericData;
|
||||||
|
import org.apache.hadoop.hbase.CategoryBasedTimeout;
|
||||||
|
import org.apache.hadoop.hbase.RegionTooBusyException;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||||
|
import org.junit.Rule;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
import org.junit.rules.TestName;
|
||||||
|
import org.junit.rules.TestRule;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.lang.reflect.Array;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
@Category({SmallTests.class})
|
||||||
|
public class TestRetriesExhaustedWithDetailsException {
|
||||||
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(TestRetriesExhaustedWithDetailsException.class);
|
||||||
|
@Rule public TestName name = new TestName();
|
||||||
|
@Rule public final TestRule timeout = CategoryBasedTimeout.builder().
|
||||||
|
withTimeout(this.getClass()).
|
||||||
|
withLookingForStuckThread(true).
|
||||||
|
build();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that a RetriesExhaustedException that has RegionTooBusyException outputs region name.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testRegionTooBusyException() {
|
||||||
|
List<Throwable> ts = new ArrayList<>(1);
|
||||||
|
final String regionName = this.name.getMethodName();
|
||||||
|
ts.add(new RegionTooBusyException(regionName));
|
||||||
|
List<Row> rows = new ArrayList<>(1);
|
||||||
|
rows.add(Mockito.mock(Row.class));
|
||||||
|
List<String> hostAndPorts = new ArrayList<>(1);
|
||||||
|
hostAndPorts.add("example.com:1234");
|
||||||
|
RetriesExhaustedException ree =
|
||||||
|
new RetriesExhaustedWithDetailsException(ts, rows, hostAndPorts);
|
||||||
|
assertTrue(ree.toString().contains(regionName));
|
||||||
|
}
|
||||||
|
}
|
|
@ -49,10 +49,8 @@ class NettyRpcServerRequestDecoder extends ChannelInboundHandlerAdapter {
|
||||||
@Override
|
@Override
|
||||||
public void channelActive(ChannelHandlerContext ctx) throws Exception {
|
public void channelActive(ChannelHandlerContext ctx) throws Exception {
|
||||||
allChannels.add(ctx.channel());
|
allChannels.add(ctx.channel());
|
||||||
if (NettyRpcServer.LOG.isDebugEnabled()) {
|
NettyRpcServer.LOG.trace("Connection {}; # active connections={}",
|
||||||
NettyRpcServer.LOG.debug("Connection from " + ctx.channel().remoteAddress() +
|
ctx.channel().remoteAddress(), (allChannels.size() - 1));
|
||||||
"; # active connections: " + (allChannels.size() - 1));
|
|
||||||
}
|
|
||||||
super.channelActive(ctx);
|
super.channelActive(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,21 +65,16 @@ class NettyRpcServerRequestDecoder extends ChannelInboundHandlerAdapter {
|
||||||
@Override
|
@Override
|
||||||
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
|
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
|
||||||
allChannels.remove(ctx.channel());
|
allChannels.remove(ctx.channel());
|
||||||
if (NettyRpcServer.LOG.isDebugEnabled()) {
|
NettyRpcServer.LOG.trace("Disconnection {}; # active connections={}",
|
||||||
NettyRpcServer.LOG.debug("Disconnecting client: " + ctx.channel().remoteAddress() +
|
ctx.channel().remoteAddress(), (allChannels.size() - 1));
|
||||||
". Number of active connections: " + (allChannels.size() - 1));
|
|
||||||
}
|
|
||||||
super.channelInactive(ctx);
|
super.channelInactive(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void exceptionCaught(ChannelHandlerContext ctx, Throwable e) {
|
public void exceptionCaught(ChannelHandlerContext ctx, Throwable e) {
|
||||||
allChannels.remove(ctx.channel());
|
allChannels.remove(ctx.channel());
|
||||||
if (NettyRpcServer.LOG.isDebugEnabled()) {
|
NettyRpcServer.LOG.trace("Connection {}; caught unexpected downstream exception.",
|
||||||
NettyRpcServer.LOG.debug("Connection from " + ctx.channel().remoteAddress() +
|
ctx.channel().remoteAddress(), e.getCause());
|
||||||
" catch unexpected exception from downstream.",
|
|
||||||
e.getCause());
|
|
||||||
}
|
|
||||||
ctx.channel().close();
|
ctx.channel().close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -405,9 +405,7 @@ public class CompactingMemStore extends AbstractMemStore {
|
||||||
// Phase I: Update the pipeline
|
// Phase I: Update the pipeline
|
||||||
getRegionServices().blockUpdates();
|
getRegionServices().blockUpdates();
|
||||||
try {
|
try {
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.trace("IN-MEMORY FLUSH: Pushing active segment into compaction pipeline");
|
||||||
LOG.debug("IN-MEMORY FLUSH: Pushing active segment into compaction pipeline");
|
|
||||||
}
|
|
||||||
pushActiveToPipeline(this.active);
|
pushActiveToPipeline(this.active);
|
||||||
} finally {
|
} finally {
|
||||||
getRegionServices().unblockUpdates();
|
getRegionServices().unblockUpdates();
|
||||||
|
@ -429,9 +427,7 @@ public class CompactingMemStore extends AbstractMemStore {
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
inMemoryFlushInProgress.set(false);
|
inMemoryFlushInProgress.set(false);
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.trace("IN-MEMORY FLUSH: end");
|
||||||
LOG.debug("IN-MEMORY FLUSH: end");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -125,12 +125,8 @@ public class CompactionPipeline {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
suffix = versionedList.getStoreSegments();
|
suffix = versionedList.getStoreSegments();
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.debug("Swapping pipeline suffix; before={}, new segement={}",
|
||||||
LOG.debug("Swapping pipeline suffix. "
|
versionedList.getStoreSegments().size(), segment);
|
||||||
+ "Just before the swap the number of segments in pipeline is:"
|
|
||||||
+ versionedList.getStoreSegments().size()
|
|
||||||
+ ", and the new segment is:" + segment);
|
|
||||||
}
|
|
||||||
swapSuffix(suffix, segment, closeSuffix);
|
swapSuffix(suffix, segment, closeSuffix);
|
||||||
readOnlyCopy = new LinkedList<>(pipeline);
|
readOnlyCopy = new LinkedList<>(pipeline);
|
||||||
version++;
|
version++;
|
||||||
|
@ -146,11 +142,12 @@ public class CompactionPipeline {
|
||||||
if(segment != null) newHeapSize = segment.heapSize();
|
if(segment != null) newHeapSize = segment.heapSize();
|
||||||
long heapSizeDelta = suffixHeapSize - newHeapSize;
|
long heapSizeDelta = suffixHeapSize - newHeapSize;
|
||||||
region.addMemStoreSize(new MemStoreSizing(-dataSizeDelta, -heapSizeDelta));
|
region.addMemStoreSize(new MemStoreSizing(-dataSizeDelta, -heapSizeDelta));
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.debug("Suffix data size={}, new segment data size={}, suffix heap size={}," +
|
||||||
LOG.debug("Suffix data size: " + suffixDataSize + " new segment data size: "
|
"new segment heap size={}",
|
||||||
+ newDataSize + ". Suffix heap size: " + suffixHeapSize
|
suffixDataSize,
|
||||||
+ " new segment heap size: " + newHeapSize);
|
newDataSize,
|
||||||
}
|
suffixHeapSize,
|
||||||
|
newHeapSize);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -206,7 +203,7 @@ public class CompactionPipeline {
|
||||||
// upon flattening there is no change in the data size
|
// upon flattening there is no change in the data size
|
||||||
region.addMemStoreSize(new MemStoreSize(0, newMemstoreAccounting.getHeapSize()));
|
region.addMemStoreSize(new MemStoreSize(0, newMemstoreAccounting.getHeapSize()));
|
||||||
}
|
}
|
||||||
LOG.debug("Compaction pipeline segment " + s + " was flattened");
|
LOG.debug("Compaction pipeline segment {} flattened", s);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
i++;
|
i++;
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/**
|
/*
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -18,6 +18,7 @@
|
||||||
package org.apache.hadoop.hbase.regionserver;
|
package org.apache.hadoop.hbase.regionserver;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
@ -42,8 +43,7 @@ public abstract class FlushLargeStoresPolicy extends FlushPolicy {
|
||||||
|
|
||||||
protected long flushSizeLowerBound = -1;
|
protected long flushSizeLowerBound = -1;
|
||||||
|
|
||||||
protected long getFlushSizeLowerBound(HRegion region) {
|
protected long getFlushSizeLowerBound(HRegion region) { int familyNumber = region.getTableDescriptor().getColumnFamilyCount();
|
||||||
int familyNumber = region.getTableDescriptor().getColumnFamilyCount();
|
|
||||||
// For multiple families, lower bound is the "average flush size" by default
|
// For multiple families, lower bound is the "average flush size" by default
|
||||||
// unless setting in configuration is larger.
|
// unless setting in configuration is larger.
|
||||||
long flushSizeLowerBound = region.getMemStoreFlushSize() / familyNumber;
|
long flushSizeLowerBound = region.getMemStoreFlushSize() / familyNumber;
|
||||||
|
@ -57,20 +57,23 @@ public abstract class FlushLargeStoresPolicy extends FlushPolicy {
|
||||||
String flushedSizeLowerBoundString =
|
String flushedSizeLowerBoundString =
|
||||||
region.getTableDescriptor().getValue(HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND);
|
region.getTableDescriptor().getValue(HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND);
|
||||||
if (flushedSizeLowerBoundString == null) {
|
if (flushedSizeLowerBoundString == null) {
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.debug("No {} set in table {} descriptor;" +
|
||||||
LOG.debug("No " + HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND
|
"using region.getMemStoreFlushSize/# of families ({}) instead.",
|
||||||
+ " set in description of table " + region.getTableDescriptor().getTableName()
|
HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND,
|
||||||
+ ", use config (" + flushSizeLowerBound + ") instead");
|
region.getTableDescriptor().getTableName(),
|
||||||
}
|
StringUtils.humanSize(flushSizeLowerBound) + ")");
|
||||||
} else {
|
} else {
|
||||||
try {
|
try {
|
||||||
flushSizeLowerBound = Long.parseLong(flushedSizeLowerBoundString);
|
flushSizeLowerBound = Long.parseLong(flushedSizeLowerBoundString);
|
||||||
} catch (NumberFormatException nfe) {
|
} catch (NumberFormatException nfe) {
|
||||||
// fall back for fault setting
|
// fall back for fault setting
|
||||||
LOG.warn("Number format exception when parsing "
|
LOG.warn("Number format exception parsing {} for table {}: {}, {}; " +
|
||||||
+ HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND + " for table "
|
"using region.getMemStoreFlushSize/# of families ({}) instead.",
|
||||||
+ region.getTableDescriptor().getTableName() + ":" + flushedSizeLowerBoundString + ". " + nfe
|
HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND,
|
||||||
+ ", use config (" + flushSizeLowerBound + ") instead");
|
region.getTableDescriptor().getTableName(),
|
||||||
|
flushedSizeLowerBoundString,
|
||||||
|
nfe,
|
||||||
|
flushSizeLowerBound);
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -79,15 +82,13 @@ public abstract class FlushLargeStoresPolicy extends FlushPolicy {
|
||||||
|
|
||||||
protected boolean shouldFlush(HStore store) {
|
protected boolean shouldFlush(HStore store) {
|
||||||
if (store.getMemStoreSize().getDataSize() > this.flushSizeLowerBound) {
|
if (store.getMemStoreSize().getDataSize() > this.flushSizeLowerBound) {
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.debug("Flush {} of {}; memstoreSize={} > lowerBound={}",
|
||||||
LOG.debug("Flush Column Family " + store.getColumnFamilyName() + " of " +
|
store.getColumnFamilyName(),
|
||||||
region.getRegionInfo().getEncodedName() + " because memstoreSize=" +
|
region.getRegionInfo().getEncodedName(),
|
||||||
store.getMemStoreSize().getDataSize() + " > lower bound="
|
store.getMemStoreSize().getDataSize(),
|
||||||
+ this.flushSizeLowerBound);
|
this.flushSizeLowerBound);
|
||||||
}
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -4170,7 +4170,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
||||||
if (this.memstoreDataSize.get() > this.blockingMemStoreSize) {
|
if (this.memstoreDataSize.get() > this.blockingMemStoreSize) {
|
||||||
blockedRequestsCount.increment();
|
blockedRequestsCount.increment();
|
||||||
requestFlush();
|
requestFlush();
|
||||||
throw new RegionTooBusyException("Above memstore limit, " +
|
throw new RegionTooBusyException("Over memstore limit, " +
|
||||||
"regionName=" + (this.getRegionInfo() == null ? "unknown" :
|
"regionName=" + (this.getRegionInfo() == null ? "unknown" :
|
||||||
this.getRegionInfo().getRegionNameAsString()) +
|
this.getRegionInfo().getRegionNameAsString()) +
|
||||||
", server=" + (this.getRegionServerServices() == null ? "unknown" :
|
", server=" + (this.getRegionServerServices() == null ? "unknown" :
|
||||||
|
|
|
@ -85,14 +85,13 @@ public abstract class MemStoreCompactionStrategy {
|
||||||
int numOfSegments = versionedList.getNumOfSegments();
|
int numOfSegments = versionedList.getNumOfSegments();
|
||||||
if (numOfSegments > pipelineThreshold) {
|
if (numOfSegments > pipelineThreshold) {
|
||||||
// to avoid too many segments, merge now
|
// to avoid too many segments, merge now
|
||||||
LOG.debug(strategy+" memory compaction for store " + cfName
|
LOG.debug("{} in-memory compaction of {}; merging {} segments",
|
||||||
+ " merging " + numOfSegments + " segments");
|
strategy, cfName, numOfSegments);
|
||||||
return getMergingAction();
|
return getMergingAction();
|
||||||
}
|
}
|
||||||
|
|
||||||
// just flatten a segment
|
// just flatten a segment
|
||||||
LOG.debug(strategy+" memory compaction for store " + cfName
|
LOG.debug("{} in-memory compaction of {}; flattening a segment", strategy, cfName);
|
||||||
+ " flattening a segment in the pipeline");
|
|
||||||
return getFlattenAction();
|
return getFlattenAction();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -92,11 +92,8 @@ public class MemStoreCompactor {
|
||||||
// get a snapshot of the list of the segments from the pipeline,
|
// get a snapshot of the list of the segments from the pipeline,
|
||||||
// this local copy of the list is marked with specific version
|
// this local copy of the list is marked with specific version
|
||||||
versionedList = compactingMemStore.getImmutableSegments();
|
versionedList = compactingMemStore.getImmutableSegments();
|
||||||
|
LOG.debug("Starting In-Memory Compaction of {}",
|
||||||
if (LOG.isDebugEnabled()) {
|
compactingMemStore.getStore().getColumnFamilyName());
|
||||||
LOG.debug("Starting the In-Memory Compaction for store "
|
|
||||||
+ compactingMemStore.getStore().getColumnFamilyName());
|
|
||||||
}
|
|
||||||
HStore store = compactingMemStore.getStore();
|
HStore store = compactingMemStore.getStore();
|
||||||
RegionCoprocessorHost cpHost = store.getCoprocessorHost();
|
RegionCoprocessorHost cpHost = store.getCoprocessorHost();
|
||||||
if (cpHost != null) {
|
if (cpHost != null) {
|
||||||
|
|
|
@ -351,13 +351,13 @@ public abstract class Segment {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
String res = "Store segment of type "+this.getClass().getName()+"; ";
|
String res = "Type=" + this.getClass().getSimpleName() + ", ";
|
||||||
res += "isEmpty "+(isEmpty()?"yes":"no")+"; ";
|
res += "empty=" + (isEmpty()? "yes": "no") + ", ";
|
||||||
res += "cellsCount "+getCellsCount()+"; ";
|
res += "cellCount=" + getCellsCount() + ", ";
|
||||||
res += "cellsSize "+keySize()+"; ";
|
res += "cellSize=" + keySize() + ", ";
|
||||||
res += "totalHeapSize "+heapSize()+"; ";
|
res += "totalHeapSize=" + heapSize() + ", ";
|
||||||
res += "Min ts " + timeRangeTracker.getMin() + "; ";
|
res += "min timestamp=" + timeRangeTracker.getMin() + ", ";
|
||||||
res += "Max ts " + timeRangeTracker.getMax() + "; ";
|
res += "max timestamp=" + timeRangeTracker.getMax();
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue