HBASE-20043 ITBLL fails against hadoop3

Fix MoveRandomRegionOfTableAction. It depended on old AM behavior.
Make it do explicit move as is required in AMv3; w/o it, it was just
closing region causing test to fail.

Fix pom so hadoop3 profile specifies a different netty3 version.

Bunch of logging format change that came of trying trying to read
the spew from this test.
This commit is contained in:
Michael Stack 2018-02-23 14:53:52 -08:00
parent ba5fb53d14
commit 549a6d93d4
29 changed files with 108 additions and 98 deletions

View File

@ -80,8 +80,9 @@ public class ByteBufferPool {
this.maxPoolSize = maxPoolSize; this.maxPoolSize = maxPoolSize;
this.directByteBuffer = directByteBuffer; this.directByteBuffer = directByteBuffer;
// TODO can add initialPoolSize config also and make those many BBs ready for use. // TODO can add initialPoolSize config also and make those many BBs ready for use.
LOG.info("Created ByteBufferPool with bufferSize : " + bufferSize + " and maxPoolSize : " LOG.info("Created with bufferSize={} and maxPoolSize={}",
+ maxPoolSize); org.apache.hadoop.util.StringUtils.byteDesc(bufferSize),
org.apache.hadoop.util.StringUtils.byteDesc(maxPoolSize));
this.count = new AtomicInteger(0); this.count = new AtomicInteger(0);
} }

View File

@ -138,63 +138,63 @@ public class Action {
} }
protected void killMaster(ServerName server) throws IOException { protected void killMaster(ServerName server) throws IOException {
LOG.info("Killing master:" + server); LOG.info("Killing master " + server);
cluster.killMaster(server); cluster.killMaster(server);
cluster.waitForMasterToStop(server, killMasterTimeout); cluster.waitForMasterToStop(server, killMasterTimeout);
LOG.info("Killed master server:" + server); LOG.info("Killed master " + server);
} }
protected void startMaster(ServerName server) throws IOException { protected void startMaster(ServerName server) throws IOException {
LOG.info("Starting master:" + server.getHostname()); LOG.info("Starting master " + server.getHostname());
cluster.startMaster(server.getHostname(), server.getPort()); cluster.startMaster(server.getHostname(), server.getPort());
cluster.waitForActiveAndReadyMaster(startMasterTimeout); cluster.waitForActiveAndReadyMaster(startMasterTimeout);
LOG.info("Started master: " + server); LOG.info("Started master " + server.getHostname());
} }
protected void killRs(ServerName server) throws IOException { protected void killRs(ServerName server) throws IOException {
LOG.info("Killing region server:" + server); LOG.info("Killing regionserver " + server);
cluster.killRegionServer(server); cluster.killRegionServer(server);
cluster.waitForRegionServerToStop(server, killRsTimeout); cluster.waitForRegionServerToStop(server, killRsTimeout);
LOG.info("Killed region server:" + server + ". Reported num of rs:" LOG.info("Killed regionserver " + server + ". Reported num of rs:"
+ cluster.getClusterMetrics().getLiveServerMetrics().size()); + cluster.getClusterMetrics().getLiveServerMetrics().size());
} }
protected void startRs(ServerName server) throws IOException { protected void startRs(ServerName server) throws IOException {
LOG.info("Starting region server:" + server.getHostname()); LOG.info("Starting regionserver " + server.getAddress());
cluster.startRegionServer(server.getHostname(), server.getPort()); cluster.startRegionServer(server.getHostname(), server.getPort());
cluster.waitForRegionServerToStart(server.getHostname(), server.getPort(), startRsTimeout); cluster.waitForRegionServerToStart(server.getHostname(), server.getPort(), startRsTimeout);
LOG.info("Started region server:" + server + ". Reported num of rs:" LOG.info("Started regionserver " + server.getAddress() + ". Reported num of rs:"
+ cluster.getClusterMetrics().getLiveServerMetrics().size()); + cluster.getClusterMetrics().getLiveServerMetrics().size());
} }
protected void killZKNode(ServerName server) throws IOException { protected void killZKNode(ServerName server) throws IOException {
LOG.info("Killing zookeeper node:" + server); LOG.info("Killing zookeeper node " + server);
cluster.killZkNode(server); cluster.killZkNode(server);
cluster.waitForZkNodeToStop(server, killZkNodeTimeout); cluster.waitForZkNodeToStop(server, killZkNodeTimeout);
LOG.info("Killed zookeeper node:" + server + ". Reported num of rs:" LOG.info("Killed zookeeper node " + server + ". Reported num of rs:"
+ cluster.getClusterMetrics().getLiveServerMetrics().size()); + cluster.getClusterMetrics().getLiveServerMetrics().size());
} }
protected void startZKNode(ServerName server) throws IOException { protected void startZKNode(ServerName server) throws IOException {
LOG.info("Starting zookeeper node:" + server.getHostname()); LOG.info("Starting zookeeper node " + server.getHostname());
cluster.startZkNode(server.getHostname(), server.getPort()); cluster.startZkNode(server.getHostname(), server.getPort());
cluster.waitForZkNodeToStart(server, startZkNodeTimeout); cluster.waitForZkNodeToStart(server, startZkNodeTimeout);
LOG.info("Started zookeeper node:" + server); LOG.info("Started zookeeper node " + server);
} }
protected void killDataNode(ServerName server) throws IOException { protected void killDataNode(ServerName server) throws IOException {
LOG.info("Killing datanode:" + server); LOG.info("Killing datanode " + server);
cluster.killDataNode(server); cluster.killDataNode(server);
cluster.waitForDataNodeToStop(server, killDataNodeTimeout); cluster.waitForDataNodeToStop(server, killDataNodeTimeout);
LOG.info("Killed datanode:" + server + ". Reported num of rs:" LOG.info("Killed datanode " + server + ". Reported num of rs:"
+ cluster.getClusterMetrics().getLiveServerMetrics().size()); + cluster.getClusterMetrics().getLiveServerMetrics().size());
} }
protected void startDataNode(ServerName server) throws IOException { protected void startDataNode(ServerName server) throws IOException {
LOG.info("Starting datanode:" + server.getHostname()); LOG.info("Starting datanode " + server.getHostname());
cluster.startDataNode(server); cluster.startDataNode(server);
cluster.waitForDataNodeToStart(server, startDataNodeTimeout); cluster.waitForDataNodeToStart(server, startDataNodeTimeout);
LOG.info("Started datanode:" + server); LOG.info("Started datanode " + server);
} }
protected void unbalanceRegions(ClusterMetrics clusterStatus, protected void unbalanceRegions(ClusterMetrics clusterStatus,

View File

@ -21,10 +21,10 @@ package org.apache.hadoop.hbase.chaos.actions;
import java.util.List; import java.util.List;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.RegionInfo;
/** /**
* Action that tries to move a random region of a table. * Action that tries to move a random region of a table.
@ -52,16 +52,17 @@ public class MoveRandomRegionOfTableAction extends Action {
Admin admin = util.getAdmin(); Admin admin = util.getAdmin();
LOG.info("Performing action: Move random region of table " + tableName); LOG.info("Performing action: Move random region of table " + tableName);
List<HRegionInfo> regions = admin.getTableRegions(tableName); List<RegionInfo> regions = admin.getRegions(tableName);
if (regions == null || regions.isEmpty()) { if (regions == null || regions.isEmpty()) {
LOG.info("Table " + tableName + " doesn't have regions to move"); LOG.info("Table " + tableName + " doesn't have regions to move");
return; return;
} }
HRegionInfo region = PolicyBasedChaosMonkey.selectRandomItem( RegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(
regions.toArray(new HRegionInfo[regions.size()])); regions.toArray(new RegionInfo[regions.size()]));
LOG.debug("Unassigning region " + region.getRegionNameAsString()); LOG.debug("Move random region {}", region.getRegionNameAsString());
admin.unassign(region.getRegionName(), false); // Use facility over in MoveRegionsOfTableAction...
MoveRegionsOfTableAction.moveRegion(admin, MoveRegionsOfTableAction.getServers(admin), region);
if (sleepTime > 0) { if (sleepTime > 0) {
Thread.sleep(sleepTime); Thread.sleep(sleepTime);
} }

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import java.io.IOException;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.EnumSet; import java.util.EnumSet;
@ -29,6 +30,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants; import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
/** /**
@ -56,14 +58,12 @@ public class MoveRegionsOfTableAction extends Action {
} }
Admin admin = this.context.getHBaseIntegrationTestingUtility().getAdmin(); Admin admin = this.context.getHBaseIntegrationTestingUtility().getAdmin();
Collection<ServerName> serversList = ServerName[] servers = getServers(admin);
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet();
ServerName[] servers = serversList.toArray(new ServerName[serversList.size()]);
LOG.info("Performing action: Move regions of table " + tableName); LOG.info("Performing action: Move regions of table {}", tableName);
List<HRegionInfo> regions = admin.getTableRegions(tableName); List<HRegionInfo> regions = admin.getTableRegions(tableName);
if (regions == null || regions.isEmpty()) { if (regions == null || regions.isEmpty()) {
LOG.info("Table " + tableName + " doesn't have regions to move"); LOG.info("Table {} doesn't have regions to move", tableName);
return; return;
} }
@ -77,14 +77,7 @@ public class MoveRegionsOfTableAction extends Action {
return; return;
} }
try { moveRegion(admin, servers, regionInfo);
String destServerName =
servers[RandomUtils.nextInt(0, servers.length)].getServerName();
LOG.debug("Moving " + regionInfo.getRegionNameAsString() + " to " + destServerName);
admin.move(regionInfo.getEncodedNameAsBytes(), Bytes.toBytes(destServerName));
} catch (Exception ex) {
LOG.warn("Move failed, might be caused by other chaos: " + ex.getMessage());
}
if (sleepTime > 0) { if (sleepTime > 0) {
Thread.sleep(sleepTime); Thread.sleep(sleepTime);
} }
@ -96,4 +89,20 @@ public class MoveRegionsOfTableAction extends Action {
} }
} }
} }
static ServerName [] getServers(Admin admin) throws IOException {
Collection<ServerName> serversList =
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet();
return serversList.toArray(new ServerName[serversList.size()]);
}
static void moveRegion(Admin admin, ServerName [] servers, RegionInfo regionInfo) {
try {
String destServerName = servers[RandomUtils.nextInt(0, servers.length)].getServerName();
LOG.debug("Moving {} to {}", regionInfo.getRegionNameAsString(), destServerName);
admin.move(regionInfo.getEncodedNameAsBytes(), Bytes.toBytes(destServerName));
} catch (Exception ex) {
LOG.warn("Move failed, might be caused by other chaos: {}", ex.getMessage());
}
}
} }

View File

@ -113,7 +113,7 @@ public class PolicyBasedChaosMonkey extends ChaosMonkey {
for (int i=0; i<policies.length; i++) { for (int i=0; i<policies.length; i++) {
policies[i].init(new Policy.PolicyContext(this.util)); policies[i].init(new Policy.PolicyContext(this.util));
Thread monkeyThread = new Thread(policies[i], "ChaosMonkeyThread"); Thread monkeyThread = new Thread(policies[i], "ChaosMonkey");
monkeyThread.start(); monkeyThread.start();
monkeyThreads[i] = monkeyThread; monkeyThreads[i] = monkeyThread;
} }

View File

@ -33,7 +33,7 @@ public abstract class PeriodicPolicy extends Policy {
public void run() { public void run() {
// Add some jitter. // Add some jitter.
int jitter = RandomUtils.nextInt(0, (int) periodMs); int jitter = RandomUtils.nextInt(0, (int) periodMs);
LOG.info("Sleeping for " + jitter + " to add jitter"); LOG.info("Sleeping for {} ms to add jitter", jitter);
Threads.sleep(jitter); Threads.sleep(jitter);
while (!isStopped()) { while (!isStopped()) {
@ -43,7 +43,7 @@ public abstract class PeriodicPolicy extends Policy {
if (isStopped()) return; if (isStopped()) return;
long sleepTime = periodMs - (System.currentTimeMillis() - start); long sleepTime = periodMs - (System.currentTimeMillis() - start);
if (sleepTime > 0) { if (sleepTime > 0) {
LOG.info("Sleeping for: " + sleepTime); LOG.info("Sleeping for {} ms", sleepTime);
Threads.sleep(sleepTime); Threads.sleep(sleepTime);
} }
} }
@ -54,6 +54,6 @@ public abstract class PeriodicPolicy extends Policy {
@Override @Override
public void init(PolicyContext context) throws Exception { public void init(PolicyContext context) throws Exception {
super.init(context); super.init(context);
LOG.info("Using ChaosMonkey Policy: " + this.getClass() + ", period: " + periodMs); LOG.info("Using ChaosMonkey Policy {}, period={} ms", this.getClass(), periodMs);
} }
} }

View File

@ -58,8 +58,7 @@ public class PeriodicRandomActionPolicy extends PeriodicPolicy {
try { try {
action.perform(); action.perform();
} catch (Exception ex) { } catch (Exception ex) {
LOG.warn("Exception occurred during performing action: " LOG.warn("Exception performing action: " + StringUtils.stringifyException(ex));
+ StringUtils.stringifyException(ex));
} }
} }

View File

@ -94,8 +94,8 @@ public abstract class RemoteProcedureDispatcher<TEnv, TRemote extends Comparable
return false; return false;
} }
LOG.info("Started, threads=" + this.corePoolSize + LOG.info("Instantiated, coreThreads={} (allowCoreThreadTimeOut=true), queueMaxSize={}, " +
", queueMaxSize=" + this.queueMaxSize + ", operationDelay=" + this.operationDelay); "operationDelay={}", this.corePoolSize, this.queueMaxSize, this.operationDelay);
// Create the timeout executor // Create the timeout executor
timeoutExecutor = new TimeoutExecutorThread(); timeoutExecutor = new TimeoutExecutorThread();

View File

@ -550,7 +550,7 @@ public class CacheConfig {
} }
if (blockCacheDisabled) return null; if (blockCacheDisabled) return null;
int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE); int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE);
LOG.info("Allocating On heap LruBlockCache size=" + LOG.info("Allocating onheap LruBlockCache size=" +
StringUtils.byteDesc(cacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize)); StringUtils.byteDesc(cacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize));
ONHEAP_CACHE_INSTANCE = new LruBlockCache(cacheSize, blockSize, true, c); ONHEAP_CACHE_INSTANCE = new LruBlockCache(cacheSize, blockSize, true, c);
return ONHEAP_CACHE_INSTANCE; return ONHEAP_CACHE_INSTANCE;

View File

@ -731,15 +731,15 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
public String toString() { public String toString() {
return MoreObjects.toStringHelper(this) return MoreObjects.toStringHelper(this)
.add("blockCount", getBlockCount()) .add("blockCount", getBlockCount())
.add("currentSize", getCurrentSize()) .add("currentSize", StringUtils.byteDesc(getCurrentSize()))
.add("freeSize", getFreeSize()) .add("freeSize", StringUtils.byteDesc(getFreeSize()))
.add("maxSize", getMaxSize()) .add("maxSize", StringUtils.byteDesc(getMaxSize()))
.add("heapSize", heapSize()) .add("heapSize", StringUtils.byteDesc(heapSize()))
.add("minSize", minSize()) .add("minSize", StringUtils.byteDesc(minSize()))
.add("minFactor", minFactor) .add("minFactor", minFactor)
.add("multiSize", multiSize()) .add("multiSize", StringUtils.byteDesc(multiSize()))
.add("multiFactor", multiFactor) .add("multiFactor", multiFactor)
.add("singleSize", singleSize()) .add("singleSize", StringUtils.byteDesc(singleSize()))
.add("singleFactor", singleFactor) .add("singleFactor", singleFactor)
.toString(); .toString();
} }

View File

@ -106,7 +106,7 @@ public class NettyRpcServer extends RpcServer {
}); });
try { try {
serverChannel = bootstrap.bind(this.bindAddress).sync().channel(); serverChannel = bootstrap.bind(this.bindAddress).sync().channel();
LOG.info("NettyRpcServer bind to address=" + serverChannel.localAddress()); LOG.info("Bind to {}", serverChannel.localAddress());
} catch (InterruptedException e) { } catch (InterruptedException e) {
throw new InterruptedIOException(e.getMessage()); throw new InterruptedIOException(e.getMessage());
} }
@ -140,7 +140,7 @@ public class NettyRpcServer extends RpcServer {
if (!running) { if (!running) {
return; return;
} }
LOG.info("Stopping server on " + this.bindAddress.getPort()); LOG.info("Stopping server on " + this.serverChannel.localAddress());
if (authTokenSecretMgr != null) { if (authTokenSecretMgr != null) {
authTokenSecretMgr.stop(); authTokenSecretMgr.stop();
authTokenSecretMgr = null; authTokenSecretMgr = null;

View File

@ -145,9 +145,9 @@ public abstract class RpcExecutor {
queueClass = LinkedBlockingQueue.class; queueClass = LinkedBlockingQueue.class;
} }
LOG.info("RpcExecutor " + this.name + " using " + this.queueClass LOG.info("Instantiated {} with queueClass={}; " +
+ " as call queue; numCallQueues=" + this.numCallQueues + "; maxQueueLength=" "numCallQueues={}, maxQueueLength={}, handlerCount={}",
+ maxQueueLength + "; handlerCount=" + this.handlerCount); this.name, this.queueClass, this.numCallQueues, maxQueueLength, this.handlerCount);
} }
protected int computeNumCallQueues(final int handlerCount, final float callQueuesHandlersFactor) { protected int computeNumCallQueues(final int handlerCount, final float callQueuesHandlersFactor) {
@ -260,8 +260,8 @@ public abstract class RpcExecutor {
handler.start(); handler.start();
handlers.add(handler); handlers.add(handler);
} }
LOG.debug("Started " + handlers.size() + " " + threadPrefix + LOG.debug("Started handlerCount={} with threadPrefix={}, numCallQueues={}, port={}",
" handlers, queues=" + qsize + ", port=" + port); handlers.size(), threadPrefix, qsize, port);
} }
/** /**

View File

@ -3543,7 +3543,6 @@ public class HMaster extends HRegionServer implements MasterServices {
@Override @Override
public boolean recoverMeta() throws IOException { public boolean recoverMeta() throws IOException {
ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0); ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0);
LOG.info("Running RecoverMetaProcedure to ensure proper hbase:meta deploy.");
procedureExecutor.submitProcedure(new RecoverMetaProcedure(null, true, latch)); procedureExecutor.submitProcedure(new RecoverMetaProcedure(null, true, latch));
latch.await(); latch.await();
LOG.info("hbase:meta deployed at=" + LOG.info("hbase:meta deployed at=" +

View File

@ -41,17 +41,17 @@ public class RegionServerProcedureManagerHost extends
public void initialize(RegionServerServices rss) throws KeeperException { public void initialize(RegionServerServices rss) throws KeeperException {
for (RegionServerProcedureManager proc : procedures) { for (RegionServerProcedureManager proc : procedures) {
LOG.debug("Procedure " + proc.getProcedureSignature() + " is initializing"); LOG.debug("Procedure {} initializing", proc.getProcedureSignature());
proc.initialize(rss); proc.initialize(rss);
LOG.debug("Procedure " + proc.getProcedureSignature() + " is initialized"); LOG.debug("Procedure {} initialized", proc.getProcedureSignature());
} }
} }
public void start() { public void start() {
for (RegionServerProcedureManager proc : procedures) { for (RegionServerProcedureManager proc : procedures) {
LOG.debug("Procedure " + proc.getProcedureSignature() + " is starting"); LOG.debug("Procedure {} starting", proc.getProcedureSignature());
proc.start(); proc.start();
LOG.debug("Procedure " + proc.getProcedureSignature() + " is started"); LOG.debug("Procedure {} started", proc.getProcedureSignature());
} }
} }

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException; import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
import org.apache.hadoop.util.StringUtils;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -144,8 +145,8 @@ public class CompactingMemStore extends AbstractMemStore {
IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT); IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT);
} }
inmemoryFlushSize = (long) (inmemoryFlushSize * factor); inmemoryFlushSize = (long) (inmemoryFlushSize * factor);
LOG.info("Setting in-memory flush size threshold to " + inmemoryFlushSize LOG.info("Setting in-memory flush size threshold to {} and immutable segments index to type={}",
+ " and immutable segments index to be of type " + indexType); StringUtils.byteDesc(inmemoryFlushSize), indexType);
} }
/** /**

View File

@ -125,7 +125,7 @@ public class CompactionPipeline {
return false; return false;
} }
suffix = versionedList.getStoreSegments(); suffix = versionedList.getStoreSegments();
LOG.debug("Swapping pipeline suffix; before={}, new segement={}", LOG.debug("Swapping pipeline suffix; before={}, new segment={}",
versionedList.getStoreSegments().size(), segment); versionedList.getStoreSegments().size(), segment);
swapSuffix(suffix, segment, closeSuffix); swapSuffix(suffix, segment, closeSuffix);
readOnlyCopy = new LinkedList<>(pipeline); readOnlyCopy = new LinkedList<>(pipeline);

View File

@ -955,8 +955,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
nextSeqid++; nextSeqid++;
} }
LOG.info("Onlined " + this.getRegionInfo().getShortNameToLog() + LOG.info("Opened {}; next sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqid);
"; next sequenceid=" + nextSeqid);
// A region can be reopened if failed a split; reset flags // A region can be reopened if failed a split; reset flags
this.closing.set(false); this.closing.set(false);

View File

@ -1146,9 +1146,7 @@ public class HRegionServer extends HasThread implements
if (this.zooKeeper != null) { if (this.zooKeeper != null) {
this.zooKeeper.close(); this.zooKeeper.close();
} }
LOG.info("stopping server " + this.serverName + "; zookeeper connection closed."); LOG.info("Exiting; stopping=" + this.serverName + "; zookeeper connection closed.");
LOG.info(Thread.currentThread().getName() + " exiting");
} }
private boolean containsMetaTableRegions() { private boolean containsMetaTableRegions() {

View File

@ -292,7 +292,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
this.memstore = ReflectionUtils.newInstance(clz, new Object[] { conf, this.comparator, this, this.memstore = ReflectionUtils.newInstance(clz, new Object[] { conf, this.comparator, this,
this.getHRegion().getRegionServicesForStores(), inMemoryCompaction }); this.getHRegion().getRegionServicesForStores(), inMemoryCompaction });
} }
LOG.info("Memstore class name is {}", className); LOG.debug("Memstore type={}", className);
this.offPeakHours = OffPeakHours.getInstance(conf); this.offPeakHours = OffPeakHours.getInstance(conf);
// Setting up cache configuration for this family // Setting up cache configuration for this family

View File

@ -207,7 +207,7 @@ public class HeapMemoryManager {
} }
public void start(ChoreService service) { public void start(ChoreService service) {
LOG.info("Starting HeapMemoryTuner chore."); LOG.info("Starting, tuneOn={}", this.tunerOn);
this.heapMemTunerChore = new HeapMemoryTunerChore(); this.heapMemTunerChore = new HeapMemoryTunerChore();
service.scheduleChore(heapMemTunerChore); service.scheduleChore(heapMemTunerChore);
if (tunerOn) { if (tunerOn) {
@ -218,7 +218,7 @@ public class HeapMemoryManager {
public void stop() { public void stop() {
// The thread is Daemon. Just interrupting the ongoing process. // The thread is Daemon. Just interrupting the ongoing process.
LOG.info("Stopping HeapMemoryTuner chore."); LOG.info("Stopping");
this.heapMemTunerChore.cancel(true); this.heapMemTunerChore.cancel(true);
} }

View File

@ -144,10 +144,9 @@ public class Leases extends HasThread {
* without any cancellation calls. * without any cancellation calls.
*/ */
public void close() { public void close() {
LOG.info(Thread.currentThread().getName() + " closing leases");
this.stopRequested = true; this.stopRequested = true;
leases.clear(); leases.clear();
LOG.info(Thread.currentThread().getName() + " closed leases"); LOG.info("Closed leases");
} }
/** /**

View File

@ -85,13 +85,12 @@ public abstract class MemStoreCompactionStrategy {
int numOfSegments = versionedList.getNumOfSegments(); int numOfSegments = versionedList.getNumOfSegments();
if (numOfSegments > pipelineThreshold) { if (numOfSegments > pipelineThreshold) {
// to avoid too many segments, merge now // to avoid too many segments, merge now
LOG.debug("{} in-memory compaction of {}; merging {} segments", LOG.debug("{} {}; merging {} segments", strategy, cfName, numOfSegments);
strategy, cfName, numOfSegments);
return getMergingAction(); return getMergingAction();
} }
// just flatten a segment // just flatten a segment
LOG.debug("{} in-memory compaction of {}; flattening a segment", strategy, cfName); LOG.debug("{} {}; flattening a segment", strategy, cfName);
return getFlattenAction(); return getFlattenAction();
} }

View File

@ -92,7 +92,8 @@ public class MemStoreCompactor {
// get a snapshot of the list of the segments from the pipeline, // get a snapshot of the list of the segments from the pipeline,
// this local copy of the list is marked with specific version // this local copy of the list is marked with specific version
versionedList = compactingMemStore.getImmutableSegments(); versionedList = compactingMemStore.getImmutableSegments();
LOG.debug("Starting In-Memory Compaction of {}", LOG.debug("Starting on {}/{}",
compactingMemStore.getStore().getHRegion().getRegionInfo().getEncodedName(),
compactingMemStore.getStore().getColumnFamilyName()); compactingMemStore.getStore().getColumnFamilyName());
HStore store = compactingMemStore.getStore(); HStore store = compactingMemStore.getStore();
RegionCoprocessorHost cpHost = store.getCoprocessorHost(); RegionCoprocessorHost cpHost = store.getCoprocessorHost();

View File

@ -413,7 +413,7 @@ public abstract class Segment {
@Override @Override
public String toString() { public String toString() {
String res = "Type=" + this.getClass().getSimpleName() + ", "; String res = "type=" + this.getClass().getSimpleName() + ", ";
res += "empty=" + (isEmpty()? "yes": "no") + ", "; res += "empty=" + (isEmpty()? "yes": "no") + ", ";
res += "cellCount=" + getCellsCount() + ", "; res += "cellCount=" + getCellsCount() + ", ";
res += "cellSize=" + keySize() + ", "; res += "cellSize=" + keySize() + ", ";

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver.compactions;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.util.StringUtils;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -149,14 +150,14 @@ public class CompactionConfiguration {
@Override @Override
public String toString() { public String toString() {
return String.format( return String.format(
"size [%d, %d, %d); files [%d, %d); ratio %f; off-peak ratio %f; throttle point %d;" "size [%s, %s, %s); files [%d, %d); ratio %f; off-peak ratio %f; throttle point %d;"
+ " major period %d, major jitter %f, min locality to compact %f;" + " major period %d, major jitter %f, min locality to compact %f;"
+ " tiered compaction: max_age %d, incoming window min %d," + " tiered compaction: max_age %d, incoming window min %d,"
+ " compaction policy for tiered window %s, single output for minor %b," + " compaction policy for tiered window %s, single output for minor %b,"
+ " compaction window factory %s", + " compaction window factory %s",
minCompactSize, StringUtils.byteDesc(minCompactSize),
maxCompactSize, StringUtils.byteDesc(maxCompactSize),
offPeakMaxCompactSize, StringUtils.byteDesc(offPeakMaxCompactSize),
minFilesToCompact, minFilesToCompact,
maxFilesToCompact, maxFilesToCompact,
compactionRatio, compactionRatio,

View File

@ -118,9 +118,9 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy {
+ " files of size "+ smallestSize + " because the store might be stuck"); + " files of size "+ smallestSize + " because the store might be stuck");
return new ArrayList<>(smallest); return new ArrayList<>(smallest);
} }
LOG.debug("Exploring compaction algorithm has selected " + bestSelection.size() LOG.debug("Exploring compaction algorithm has selected {} files of size {} starting at " +
+ " files of size " + bestSize + " starting at candidate #" + bestStart + "candidate #{} after considering {} permutations with {} in ratio", bestSelection.size(),
" after considering " + opts + " permutations with " + optsInRatio + " in ratio"); bestSize, bestSize, opts, optsInRatio);
return new ArrayList<>(bestSelection); return new ArrayList<>(bestSelection);
} }

View File

@ -90,7 +90,7 @@ public abstract class PressureAwareThroughputController extends Configured imple
if (speed >= 1E15) { // large enough to say it is unlimited if (speed >= 1E15) { // large enough to say it is unlimited
return "unlimited"; return "unlimited";
} else { } else {
return String.format("%.2f MB/sec", speed / 1024 / 1024); return String.format("%.2f MB/second", speed / 1024 / 1024);
} }
} }

View File

@ -129,7 +129,7 @@ public class Replication implements ReplicationSourceService, ReplicationSinkSer
} }
this.statsThreadPeriod = this.statsThreadPeriod =
this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60);
LOG.debug("ReplicationStatisticsThread " + this.statsThreadPeriod); LOG.debug("Replication stats-in-log period={} seconds", this.statsThreadPeriod);
this.replicationLoad = new ReplicationLoad(); this.replicationLoad = new ReplicationLoad();
this.peerProcedureHandler = new PeerProcedureHandlerImpl(replicationManager); this.peerProcedureHandler = new PeerProcedureHandlerImpl(replicationManager);

View File

@ -1445,9 +1445,6 @@
<clover.version>4.0.3</clover.version> <clover.version>4.0.3</clover.version>
<jamon-runtime.version>2.4.1</jamon-runtime.version> <jamon-runtime.version>2.4.1</jamon-runtime.version>
<jettison.version>1.3.8</jettison.version> <jettison.version>1.3.8</jettison.version>
<!--This property is for hadoops netty. HBase netty
comes in via hbase-thirdparty hbase-shaded-netty-->
<netty.hadoop.version>3.6.2.Final</netty.hadoop.version>
<!--Make sure these joni/jcodings are compatible with the versions used by jruby--> <!--Make sure these joni/jcodings are compatible with the versions used by jruby-->
<joni.version>2.1.11</joni.version> <joni.version>2.1.11</joni.version>
<jcodings.version>1.0.18</jcodings.version> <jcodings.version>1.0.18</jcodings.version>
@ -2444,6 +2441,9 @@
<hadoop.version>${hadoop-two.version}</hadoop.version> <hadoop.version>${hadoop-two.version}</hadoop.version>
<compat.module>hbase-hadoop2-compat</compat.module> <compat.module>hbase-hadoop2-compat</compat.module>
<assembly.file>src/main/assembly/hadoop-two-compat.xml</assembly.file> <assembly.file>src/main/assembly/hadoop-two-compat.xml</assembly.file>
<!--This property is for hadoops netty. HBase netty
comes in via hbase-thirdparty hbase-shaded-netty-->
<netty.hadoop.version>3.6.2.Final</netty.hadoop.version>
</properties> </properties>
<dependencyManagement> <dependencyManagement>
<dependencies> <dependencies>
@ -2718,6 +2718,9 @@
<!--Use this compat module for now. TODO: Make h3 one if we need one--> <!--Use this compat module for now. TODO: Make h3 one if we need one-->
<compat.module>hbase-hadoop2-compat</compat.module> <compat.module>hbase-hadoop2-compat</compat.module>
<assembly.file>src/main/assembly/hadoop-two-compat.xml</assembly.file> <assembly.file>src/main/assembly/hadoop-two-compat.xml</assembly.file>
<!--This property is for hadoops netty. HBase netty
comes in via hbase-thirdparty hbase-shaded-netty-->
<netty.hadoop.version>3.10.5.Final</netty.hadoop.version>
</properties> </properties>
<dependencyManagement> <dependencyManagement>
<dependencies> <dependencies>