HBASE-20043 ITBLL fails against hadoop3
Fix MoveRandomRegionOfTableAction. It depended on old AM behavior. Make it do explicit move as is required in AMv3; w/o it, it was just closing region causing test to fail. Fix pom so hadoop3 profile specifies a different netty3 version. Bunch of logging format change that came of trying trying to read the spew from this test.
This commit is contained in:
parent
ba5fb53d14
commit
549a6d93d4
|
@ -80,8 +80,9 @@ public class ByteBufferPool {
|
|||
this.maxPoolSize = maxPoolSize;
|
||||
this.directByteBuffer = directByteBuffer;
|
||||
// TODO can add initialPoolSize config also and make those many BBs ready for use.
|
||||
LOG.info("Created ByteBufferPool with bufferSize : " + bufferSize + " and maxPoolSize : "
|
||||
+ maxPoolSize);
|
||||
LOG.info("Created with bufferSize={} and maxPoolSize={}",
|
||||
org.apache.hadoop.util.StringUtils.byteDesc(bufferSize),
|
||||
org.apache.hadoop.util.StringUtils.byteDesc(maxPoolSize));
|
||||
this.count = new AtomicInteger(0);
|
||||
}
|
||||
|
||||
|
|
|
@ -138,63 +138,63 @@ public class Action {
|
|||
}
|
||||
|
||||
protected void killMaster(ServerName server) throws IOException {
|
||||
LOG.info("Killing master:" + server);
|
||||
LOG.info("Killing master " + server);
|
||||
cluster.killMaster(server);
|
||||
cluster.waitForMasterToStop(server, killMasterTimeout);
|
||||
LOG.info("Killed master server:" + server);
|
||||
LOG.info("Killed master " + server);
|
||||
}
|
||||
|
||||
protected void startMaster(ServerName server) throws IOException {
|
||||
LOG.info("Starting master:" + server.getHostname());
|
||||
LOG.info("Starting master " + server.getHostname());
|
||||
cluster.startMaster(server.getHostname(), server.getPort());
|
||||
cluster.waitForActiveAndReadyMaster(startMasterTimeout);
|
||||
LOG.info("Started master: " + server);
|
||||
LOG.info("Started master " + server.getHostname());
|
||||
}
|
||||
|
||||
protected void killRs(ServerName server) throws IOException {
|
||||
LOG.info("Killing region server:" + server);
|
||||
LOG.info("Killing regionserver " + server);
|
||||
cluster.killRegionServer(server);
|
||||
cluster.waitForRegionServerToStop(server, killRsTimeout);
|
||||
LOG.info("Killed region server:" + server + ". Reported num of rs:"
|
||||
LOG.info("Killed regionserver " + server + ". Reported num of rs:"
|
||||
+ cluster.getClusterMetrics().getLiveServerMetrics().size());
|
||||
}
|
||||
|
||||
protected void startRs(ServerName server) throws IOException {
|
||||
LOG.info("Starting region server:" + server.getHostname());
|
||||
LOG.info("Starting regionserver " + server.getAddress());
|
||||
cluster.startRegionServer(server.getHostname(), server.getPort());
|
||||
cluster.waitForRegionServerToStart(server.getHostname(), server.getPort(), startRsTimeout);
|
||||
LOG.info("Started region server:" + server + ". Reported num of rs:"
|
||||
LOG.info("Started regionserver " + server.getAddress() + ". Reported num of rs:"
|
||||
+ cluster.getClusterMetrics().getLiveServerMetrics().size());
|
||||
}
|
||||
|
||||
protected void killZKNode(ServerName server) throws IOException {
|
||||
LOG.info("Killing zookeeper node:" + server);
|
||||
LOG.info("Killing zookeeper node " + server);
|
||||
cluster.killZkNode(server);
|
||||
cluster.waitForZkNodeToStop(server, killZkNodeTimeout);
|
||||
LOG.info("Killed zookeeper node:" + server + ". Reported num of rs:"
|
||||
LOG.info("Killed zookeeper node " + server + ". Reported num of rs:"
|
||||
+ cluster.getClusterMetrics().getLiveServerMetrics().size());
|
||||
}
|
||||
|
||||
protected void startZKNode(ServerName server) throws IOException {
|
||||
LOG.info("Starting zookeeper node:" + server.getHostname());
|
||||
LOG.info("Starting zookeeper node " + server.getHostname());
|
||||
cluster.startZkNode(server.getHostname(), server.getPort());
|
||||
cluster.waitForZkNodeToStart(server, startZkNodeTimeout);
|
||||
LOG.info("Started zookeeper node:" + server);
|
||||
LOG.info("Started zookeeper node " + server);
|
||||
}
|
||||
|
||||
protected void killDataNode(ServerName server) throws IOException {
|
||||
LOG.info("Killing datanode:" + server);
|
||||
LOG.info("Killing datanode " + server);
|
||||
cluster.killDataNode(server);
|
||||
cluster.waitForDataNodeToStop(server, killDataNodeTimeout);
|
||||
LOG.info("Killed datanode:" + server + ". Reported num of rs:"
|
||||
LOG.info("Killed datanode " + server + ". Reported num of rs:"
|
||||
+ cluster.getClusterMetrics().getLiveServerMetrics().size());
|
||||
}
|
||||
|
||||
protected void startDataNode(ServerName server) throws IOException {
|
||||
LOG.info("Starting datanode:" + server.getHostname());
|
||||
LOG.info("Starting datanode " + server.getHostname());
|
||||
cluster.startDataNode(server);
|
||||
cluster.waitForDataNodeToStart(server, startDataNodeTimeout);
|
||||
LOG.info("Started datanode:" + server);
|
||||
LOG.info("Started datanode " + server);
|
||||
}
|
||||
|
||||
protected void unbalanceRegions(ClusterMetrics clusterStatus,
|
||||
|
|
|
@ -21,10 +21,10 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
|
||||
/**
|
||||
* Action that tries to move a random region of a table.
|
||||
|
@ -52,16 +52,17 @@ public class MoveRandomRegionOfTableAction extends Action {
|
|||
Admin admin = util.getAdmin();
|
||||
|
||||
LOG.info("Performing action: Move random region of table " + tableName);
|
||||
List<HRegionInfo> regions = admin.getTableRegions(tableName);
|
||||
List<RegionInfo> regions = admin.getRegions(tableName);
|
||||
if (regions == null || regions.isEmpty()) {
|
||||
LOG.info("Table " + tableName + " doesn't have regions to move");
|
||||
return;
|
||||
}
|
||||
|
||||
HRegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(
|
||||
regions.toArray(new HRegionInfo[regions.size()]));
|
||||
LOG.debug("Unassigning region " + region.getRegionNameAsString());
|
||||
admin.unassign(region.getRegionName(), false);
|
||||
RegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(
|
||||
regions.toArray(new RegionInfo[regions.size()]));
|
||||
LOG.debug("Move random region {}", region.getRegionNameAsString());
|
||||
// Use facility over in MoveRegionsOfTableAction...
|
||||
MoveRegionsOfTableAction.moveRegion(admin, MoveRegionsOfTableAction.getServers(admin), region);
|
||||
if (sleepTime > 0) {
|
||||
Thread.sleep(sleepTime);
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
|
@ -29,6 +30,7 @@ import org.apache.hadoop.hbase.ServerName;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
|
@ -56,14 +58,12 @@ public class MoveRegionsOfTableAction extends Action {
|
|||
}
|
||||
|
||||
Admin admin = this.context.getHBaseIntegrationTestingUtility().getAdmin();
|
||||
Collection<ServerName> serversList =
|
||||
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet();
|
||||
ServerName[] servers = serversList.toArray(new ServerName[serversList.size()]);
|
||||
ServerName[] servers = getServers(admin);
|
||||
|
||||
LOG.info("Performing action: Move regions of table " + tableName);
|
||||
LOG.info("Performing action: Move regions of table {}", tableName);
|
||||
List<HRegionInfo> regions = admin.getTableRegions(tableName);
|
||||
if (regions == null || regions.isEmpty()) {
|
||||
LOG.info("Table " + tableName + " doesn't have regions to move");
|
||||
LOG.info("Table {} doesn't have regions to move", tableName);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -77,14 +77,7 @@ public class MoveRegionsOfTableAction extends Action {
|
|||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
String destServerName =
|
||||
servers[RandomUtils.nextInt(0, servers.length)].getServerName();
|
||||
LOG.debug("Moving " + regionInfo.getRegionNameAsString() + " to " + destServerName);
|
||||
admin.move(regionInfo.getEncodedNameAsBytes(), Bytes.toBytes(destServerName));
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Move failed, might be caused by other chaos: " + ex.getMessage());
|
||||
}
|
||||
moveRegion(admin, servers, regionInfo);
|
||||
if (sleepTime > 0) {
|
||||
Thread.sleep(sleepTime);
|
||||
}
|
||||
|
@ -96,4 +89,20 @@ public class MoveRegionsOfTableAction extends Action {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
static ServerName [] getServers(Admin admin) throws IOException {
|
||||
Collection<ServerName> serversList =
|
||||
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet();
|
||||
return serversList.toArray(new ServerName[serversList.size()]);
|
||||
}
|
||||
|
||||
static void moveRegion(Admin admin, ServerName [] servers, RegionInfo regionInfo) {
|
||||
try {
|
||||
String destServerName = servers[RandomUtils.nextInt(0, servers.length)].getServerName();
|
||||
LOG.debug("Moving {} to {}", regionInfo.getRegionNameAsString(), destServerName);
|
||||
admin.move(regionInfo.getEncodedNameAsBytes(), Bytes.toBytes(destServerName));
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Move failed, might be caused by other chaos: {}", ex.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -113,7 +113,7 @@ public class PolicyBasedChaosMonkey extends ChaosMonkey {
|
|||
|
||||
for (int i=0; i<policies.length; i++) {
|
||||
policies[i].init(new Policy.PolicyContext(this.util));
|
||||
Thread monkeyThread = new Thread(policies[i], "ChaosMonkeyThread");
|
||||
Thread monkeyThread = new Thread(policies[i], "ChaosMonkey");
|
||||
monkeyThread.start();
|
||||
monkeyThreads[i] = monkeyThread;
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ public abstract class PeriodicPolicy extends Policy {
|
|||
public void run() {
|
||||
// Add some jitter.
|
||||
int jitter = RandomUtils.nextInt(0, (int) periodMs);
|
||||
LOG.info("Sleeping for " + jitter + " to add jitter");
|
||||
LOG.info("Sleeping for {} ms to add jitter", jitter);
|
||||
Threads.sleep(jitter);
|
||||
|
||||
while (!isStopped()) {
|
||||
|
@ -43,7 +43,7 @@ public abstract class PeriodicPolicy extends Policy {
|
|||
if (isStopped()) return;
|
||||
long sleepTime = periodMs - (System.currentTimeMillis() - start);
|
||||
if (sleepTime > 0) {
|
||||
LOG.info("Sleeping for: " + sleepTime);
|
||||
LOG.info("Sleeping for {} ms", sleepTime);
|
||||
Threads.sleep(sleepTime);
|
||||
}
|
||||
}
|
||||
|
@ -54,6 +54,6 @@ public abstract class PeriodicPolicy extends Policy {
|
|||
@Override
|
||||
public void init(PolicyContext context) throws Exception {
|
||||
super.init(context);
|
||||
LOG.info("Using ChaosMonkey Policy: " + this.getClass() + ", period: " + periodMs);
|
||||
LOG.info("Using ChaosMonkey Policy {}, period={} ms", this.getClass(), periodMs);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,8 +58,7 @@ public class PeriodicRandomActionPolicy extends PeriodicPolicy {
|
|||
try {
|
||||
action.perform();
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Exception occurred during performing action: "
|
||||
+ StringUtils.stringifyException(ex));
|
||||
LOG.warn("Exception performing action: " + StringUtils.stringifyException(ex));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -94,8 +94,8 @@ public abstract class RemoteProcedureDispatcher<TEnv, TRemote extends Comparable
|
|||
return false;
|
||||
}
|
||||
|
||||
LOG.info("Started, threads=" + this.corePoolSize +
|
||||
", queueMaxSize=" + this.queueMaxSize + ", operationDelay=" + this.operationDelay);
|
||||
LOG.info("Instantiated, coreThreads={} (allowCoreThreadTimeOut=true), queueMaxSize={}, " +
|
||||
"operationDelay={}", this.corePoolSize, this.queueMaxSize, this.operationDelay);
|
||||
|
||||
// Create the timeout executor
|
||||
timeoutExecutor = new TimeoutExecutorThread();
|
||||
|
|
|
@ -550,7 +550,7 @@ public class CacheConfig {
|
|||
}
|
||||
if (blockCacheDisabled) return null;
|
||||
int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE);
|
||||
LOG.info("Allocating On heap LruBlockCache size=" +
|
||||
LOG.info("Allocating onheap LruBlockCache size=" +
|
||||
StringUtils.byteDesc(cacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize));
|
||||
ONHEAP_CACHE_INSTANCE = new LruBlockCache(cacheSize, blockSize, true, c);
|
||||
return ONHEAP_CACHE_INSTANCE;
|
||||
|
|
|
@ -731,15 +731,15 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
|
|||
public String toString() {
|
||||
return MoreObjects.toStringHelper(this)
|
||||
.add("blockCount", getBlockCount())
|
||||
.add("currentSize", getCurrentSize())
|
||||
.add("freeSize", getFreeSize())
|
||||
.add("maxSize", getMaxSize())
|
||||
.add("heapSize", heapSize())
|
||||
.add("minSize", minSize())
|
||||
.add("currentSize", StringUtils.byteDesc(getCurrentSize()))
|
||||
.add("freeSize", StringUtils.byteDesc(getFreeSize()))
|
||||
.add("maxSize", StringUtils.byteDesc(getMaxSize()))
|
||||
.add("heapSize", StringUtils.byteDesc(heapSize()))
|
||||
.add("minSize", StringUtils.byteDesc(minSize()))
|
||||
.add("minFactor", minFactor)
|
||||
.add("multiSize", multiSize())
|
||||
.add("multiSize", StringUtils.byteDesc(multiSize()))
|
||||
.add("multiFactor", multiFactor)
|
||||
.add("singleSize", singleSize())
|
||||
.add("singleSize", StringUtils.byteDesc(singleSize()))
|
||||
.add("singleFactor", singleFactor)
|
||||
.toString();
|
||||
}
|
||||
|
|
|
@ -106,7 +106,7 @@ public class NettyRpcServer extends RpcServer {
|
|||
});
|
||||
try {
|
||||
serverChannel = bootstrap.bind(this.bindAddress).sync().channel();
|
||||
LOG.info("NettyRpcServer bind to address=" + serverChannel.localAddress());
|
||||
LOG.info("Bind to {}", serverChannel.localAddress());
|
||||
} catch (InterruptedException e) {
|
||||
throw new InterruptedIOException(e.getMessage());
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ public class NettyRpcServer extends RpcServer {
|
|||
if (!running) {
|
||||
return;
|
||||
}
|
||||
LOG.info("Stopping server on " + this.bindAddress.getPort());
|
||||
LOG.info("Stopping server on " + this.serverChannel.localAddress());
|
||||
if (authTokenSecretMgr != null) {
|
||||
authTokenSecretMgr.stop();
|
||||
authTokenSecretMgr = null;
|
||||
|
|
|
@ -145,9 +145,9 @@ public abstract class RpcExecutor {
|
|||
queueClass = LinkedBlockingQueue.class;
|
||||
}
|
||||
|
||||
LOG.info("RpcExecutor " + this.name + " using " + this.queueClass
|
||||
+ " as call queue; numCallQueues=" + this.numCallQueues + "; maxQueueLength="
|
||||
+ maxQueueLength + "; handlerCount=" + this.handlerCount);
|
||||
LOG.info("Instantiated {} with queueClass={}; " +
|
||||
"numCallQueues={}, maxQueueLength={}, handlerCount={}",
|
||||
this.name, this.queueClass, this.numCallQueues, maxQueueLength, this.handlerCount);
|
||||
}
|
||||
|
||||
protected int computeNumCallQueues(final int handlerCount, final float callQueuesHandlersFactor) {
|
||||
|
@ -260,8 +260,8 @@ public abstract class RpcExecutor {
|
|||
handler.start();
|
||||
handlers.add(handler);
|
||||
}
|
||||
LOG.debug("Started " + handlers.size() + " " + threadPrefix +
|
||||
" handlers, queues=" + qsize + ", port=" + port);
|
||||
LOG.debug("Started handlerCount={} with threadPrefix={}, numCallQueues={}, port={}",
|
||||
handlers.size(), threadPrefix, qsize, port);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -3543,7 +3543,6 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
@Override
|
||||
public boolean recoverMeta() throws IOException {
|
||||
ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0);
|
||||
LOG.info("Running RecoverMetaProcedure to ensure proper hbase:meta deploy.");
|
||||
procedureExecutor.submitProcedure(new RecoverMetaProcedure(null, true, latch));
|
||||
latch.await();
|
||||
LOG.info("hbase:meta deployed at=" +
|
||||
|
|
|
@ -41,17 +41,17 @@ public class RegionServerProcedureManagerHost extends
|
|||
|
||||
public void initialize(RegionServerServices rss) throws KeeperException {
|
||||
for (RegionServerProcedureManager proc : procedures) {
|
||||
LOG.debug("Procedure " + proc.getProcedureSignature() + " is initializing");
|
||||
LOG.debug("Procedure {} initializing", proc.getProcedureSignature());
|
||||
proc.initialize(rss);
|
||||
LOG.debug("Procedure " + proc.getProcedureSignature() + " is initialized");
|
||||
LOG.debug("Procedure {} initialized", proc.getProcedureSignature());
|
||||
}
|
||||
}
|
||||
|
||||
public void start() {
|
||||
for (RegionServerProcedureManager proc : procedures) {
|
||||
LOG.debug("Procedure " + proc.getProcedureSignature() + " is starting");
|
||||
LOG.debug("Procedure {} starting", proc.getProcedureSignature());
|
||||
proc.start();
|
||||
LOG.debug("Procedure " + proc.getProcedureSignature() + " is started");
|
||||
LOG.debug("Procedure {} started", proc.getProcedureSignature());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -144,8 +145,8 @@ public class CompactingMemStore extends AbstractMemStore {
|
|||
IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT);
|
||||
}
|
||||
inmemoryFlushSize = (long) (inmemoryFlushSize * factor);
|
||||
LOG.info("Setting in-memory flush size threshold to " + inmemoryFlushSize
|
||||
+ " and immutable segments index to be of type " + indexType);
|
||||
LOG.info("Setting in-memory flush size threshold to {} and immutable segments index to type={}",
|
||||
StringUtils.byteDesc(inmemoryFlushSize), indexType);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -125,7 +125,7 @@ public class CompactionPipeline {
|
|||
return false;
|
||||
}
|
||||
suffix = versionedList.getStoreSegments();
|
||||
LOG.debug("Swapping pipeline suffix; before={}, new segement={}",
|
||||
LOG.debug("Swapping pipeline suffix; before={}, new segment={}",
|
||||
versionedList.getStoreSegments().size(), segment);
|
||||
swapSuffix(suffix, segment, closeSuffix);
|
||||
readOnlyCopy = new LinkedList<>(pipeline);
|
||||
|
|
|
@ -955,8 +955,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
nextSeqid++;
|
||||
}
|
||||
|
||||
LOG.info("Onlined " + this.getRegionInfo().getShortNameToLog() +
|
||||
"; next sequenceid=" + nextSeqid);
|
||||
LOG.info("Opened {}; next sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqid);
|
||||
|
||||
// A region can be reopened if failed a split; reset flags
|
||||
this.closing.set(false);
|
||||
|
|
|
@ -1146,9 +1146,7 @@ public class HRegionServer extends HasThread implements
|
|||
if (this.zooKeeper != null) {
|
||||
this.zooKeeper.close();
|
||||
}
|
||||
LOG.info("stopping server " + this.serverName + "; zookeeper connection closed.");
|
||||
|
||||
LOG.info(Thread.currentThread().getName() + " exiting");
|
||||
LOG.info("Exiting; stopping=" + this.serverName + "; zookeeper connection closed.");
|
||||
}
|
||||
|
||||
private boolean containsMetaTableRegions() {
|
||||
|
|
|
@ -292,7 +292,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
|
|||
this.memstore = ReflectionUtils.newInstance(clz, new Object[] { conf, this.comparator, this,
|
||||
this.getHRegion().getRegionServicesForStores(), inMemoryCompaction });
|
||||
}
|
||||
LOG.info("Memstore class name is {}", className);
|
||||
LOG.debug("Memstore type={}", className);
|
||||
this.offPeakHours = OffPeakHours.getInstance(conf);
|
||||
|
||||
// Setting up cache configuration for this family
|
||||
|
|
|
@ -207,10 +207,10 @@ public class HeapMemoryManager {
|
|||
}
|
||||
|
||||
public void start(ChoreService service) {
|
||||
LOG.info("Starting HeapMemoryTuner chore.");
|
||||
this.heapMemTunerChore = new HeapMemoryTunerChore();
|
||||
service.scheduleChore(heapMemTunerChore);
|
||||
if (tunerOn) {
|
||||
LOG.info("Starting, tuneOn={}", this.tunerOn);
|
||||
this.heapMemTunerChore = new HeapMemoryTunerChore();
|
||||
service.scheduleChore(heapMemTunerChore);
|
||||
if (tunerOn) {
|
||||
// Register HeapMemoryTuner as a memstore flush listener
|
||||
memStoreFlusher.registerFlushRequestListener(heapMemTunerChore);
|
||||
}
|
||||
|
@ -218,7 +218,7 @@ public class HeapMemoryManager {
|
|||
|
||||
public void stop() {
|
||||
// The thread is Daemon. Just interrupting the ongoing process.
|
||||
LOG.info("Stopping HeapMemoryTuner chore.");
|
||||
LOG.info("Stopping");
|
||||
this.heapMemTunerChore.cancel(true);
|
||||
}
|
||||
|
||||
|
|
|
@ -144,10 +144,9 @@ public class Leases extends HasThread {
|
|||
* without any cancellation calls.
|
||||
*/
|
||||
public void close() {
|
||||
LOG.info(Thread.currentThread().getName() + " closing leases");
|
||||
this.stopRequested = true;
|
||||
leases.clear();
|
||||
LOG.info(Thread.currentThread().getName() + " closed leases");
|
||||
LOG.info("Closed leases");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -85,13 +85,12 @@ public abstract class MemStoreCompactionStrategy {
|
|||
int numOfSegments = versionedList.getNumOfSegments();
|
||||
if (numOfSegments > pipelineThreshold) {
|
||||
// to avoid too many segments, merge now
|
||||
LOG.debug("{} in-memory compaction of {}; merging {} segments",
|
||||
strategy, cfName, numOfSegments);
|
||||
LOG.debug("{} {}; merging {} segments", strategy, cfName, numOfSegments);
|
||||
return getMergingAction();
|
||||
}
|
||||
|
||||
// just flatten a segment
|
||||
LOG.debug("{} in-memory compaction of {}; flattening a segment", strategy, cfName);
|
||||
LOG.debug("{} {}; flattening a segment", strategy, cfName);
|
||||
return getFlattenAction();
|
||||
}
|
||||
|
||||
|
|
|
@ -92,7 +92,8 @@ public class MemStoreCompactor {
|
|||
// get a snapshot of the list of the segments from the pipeline,
|
||||
// this local copy of the list is marked with specific version
|
||||
versionedList = compactingMemStore.getImmutableSegments();
|
||||
LOG.debug("Starting In-Memory Compaction of {}",
|
||||
LOG.debug("Starting on {}/{}",
|
||||
compactingMemStore.getStore().getHRegion().getRegionInfo().getEncodedName(),
|
||||
compactingMemStore.getStore().getColumnFamilyName());
|
||||
HStore store = compactingMemStore.getStore();
|
||||
RegionCoprocessorHost cpHost = store.getCoprocessorHost();
|
||||
|
|
|
@ -413,7 +413,7 @@ public abstract class Segment {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
String res = "Type=" + this.getClass().getSimpleName() + ", ";
|
||||
String res = "type=" + this.getClass().getSimpleName() + ", ";
|
||||
res += "empty=" + (isEmpty()? "yes": "no") + ", ";
|
||||
res += "cellCount=" + getCellsCount() + ", ";
|
||||
res += "cellSize=" + keySize() + ", ";
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver.compactions;
|
|||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -149,14 +150,14 @@ public class CompactionConfiguration {
|
|||
@Override
|
||||
public String toString() {
|
||||
return String.format(
|
||||
"size [%d, %d, %d); files [%d, %d); ratio %f; off-peak ratio %f; throttle point %d;"
|
||||
"size [%s, %s, %s); files [%d, %d); ratio %f; off-peak ratio %f; throttle point %d;"
|
||||
+ " major period %d, major jitter %f, min locality to compact %f;"
|
||||
+ " tiered compaction: max_age %d, incoming window min %d,"
|
||||
+ " compaction policy for tiered window %s, single output for minor %b,"
|
||||
+ " compaction window factory %s",
|
||||
minCompactSize,
|
||||
maxCompactSize,
|
||||
offPeakMaxCompactSize,
|
||||
StringUtils.byteDesc(minCompactSize),
|
||||
StringUtils.byteDesc(maxCompactSize),
|
||||
StringUtils.byteDesc(offPeakMaxCompactSize),
|
||||
minFilesToCompact,
|
||||
maxFilesToCompact,
|
||||
compactionRatio,
|
||||
|
|
|
@ -118,9 +118,9 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy {
|
|||
+ " files of size "+ smallestSize + " because the store might be stuck");
|
||||
return new ArrayList<>(smallest);
|
||||
}
|
||||
LOG.debug("Exploring compaction algorithm has selected " + bestSelection.size()
|
||||
+ " files of size " + bestSize + " starting at candidate #" + bestStart +
|
||||
" after considering " + opts + " permutations with " + optsInRatio + " in ratio");
|
||||
LOG.debug("Exploring compaction algorithm has selected {} files of size {} starting at " +
|
||||
"candidate #{} after considering {} permutations with {} in ratio", bestSelection.size(),
|
||||
bestSize, bestSize, opts, optsInRatio);
|
||||
return new ArrayList<>(bestSelection);
|
||||
}
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ public abstract class PressureAwareThroughputController extends Configured imple
|
|||
if (speed >= 1E15) { // large enough to say it is unlimited
|
||||
return "unlimited";
|
||||
} else {
|
||||
return String.format("%.2f MB/sec", speed / 1024 / 1024);
|
||||
return String.format("%.2f MB/second", speed / 1024 / 1024);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -129,7 +129,7 @@ public class Replication implements ReplicationSourceService, ReplicationSinkSer
|
|||
}
|
||||
this.statsThreadPeriod =
|
||||
this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60);
|
||||
LOG.debug("ReplicationStatisticsThread " + this.statsThreadPeriod);
|
||||
LOG.debug("Replication stats-in-log period={} seconds", this.statsThreadPeriod);
|
||||
this.replicationLoad = new ReplicationLoad();
|
||||
|
||||
this.peerProcedureHandler = new PeerProcedureHandlerImpl(replicationManager);
|
||||
|
|
9
pom.xml
9
pom.xml
|
@ -1445,9 +1445,6 @@
|
|||
<clover.version>4.0.3</clover.version>
|
||||
<jamon-runtime.version>2.4.1</jamon-runtime.version>
|
||||
<jettison.version>1.3.8</jettison.version>
|
||||
<!--This property is for hadoops netty. HBase netty
|
||||
comes in via hbase-thirdparty hbase-shaded-netty-->
|
||||
<netty.hadoop.version>3.6.2.Final</netty.hadoop.version>
|
||||
<!--Make sure these joni/jcodings are compatible with the versions used by jruby-->
|
||||
<joni.version>2.1.11</joni.version>
|
||||
<jcodings.version>1.0.18</jcodings.version>
|
||||
|
@ -2444,6 +2441,9 @@
|
|||
<hadoop.version>${hadoop-two.version}</hadoop.version>
|
||||
<compat.module>hbase-hadoop2-compat</compat.module>
|
||||
<assembly.file>src/main/assembly/hadoop-two-compat.xml</assembly.file>
|
||||
<!--This property is for hadoops netty. HBase netty
|
||||
comes in via hbase-thirdparty hbase-shaded-netty-->
|
||||
<netty.hadoop.version>3.6.2.Final</netty.hadoop.version>
|
||||
</properties>
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
|
@ -2718,6 +2718,9 @@
|
|||
<!--Use this compat module for now. TODO: Make h3 one if we need one-->
|
||||
<compat.module>hbase-hadoop2-compat</compat.module>
|
||||
<assembly.file>src/main/assembly/hadoop-two-compat.xml</assembly.file>
|
||||
<!--This property is for hadoops netty. HBase netty
|
||||
comes in via hbase-thirdparty hbase-shaded-netty-->
|
||||
<netty.hadoop.version>3.10.5.Final</netty.hadoop.version>
|
||||
</properties>
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
|
|
Loading…
Reference in New Issue