HBASE-24295 [Chaos Monkey] abstract logging through the class hierarchy
Adds `protected abstract Logger getLogger()` to `Action` so that implementation's names are logged when actions are performed. Signed-off-by: stack <stack@apache.org> Signed-off-by: Jan Hentschel <jan.hentschel@ultratendency.com> foo
This commit is contained in:
parent
9e975d1b98
commit
8d1228ece7
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -39,12 +39,11 @@ import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
|||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A (possibly mischievous) action that the ChaosMonkey can perform.
|
||||
*/
|
||||
public class Action {
|
||||
public abstract class Action {
|
||||
|
||||
public static final String KILL_MASTER_TIMEOUT_KEY =
|
||||
"hbase.chaosmonkey.action.killmastertimeout";
|
||||
|
@ -65,8 +64,6 @@ public class Action {
|
|||
public static final String START_NAMENODE_TIMEOUT_KEY =
|
||||
"hbase.chaosmonkey.action.startnamenodetimeout";
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(Action.class);
|
||||
|
||||
protected static final long KILL_MASTER_TIMEOUT_DEFAULT = PolicyBasedChaosMonkey.TIMEOUT;
|
||||
protected static final long START_MASTER_TIMEOUT_DEFAULT = PolicyBasedChaosMonkey.TIMEOUT;
|
||||
protected static final long KILL_RS_TIMEOUT_DEFAULT = PolicyBasedChaosMonkey.TIMEOUT;
|
||||
|
@ -121,6 +118,11 @@ public class Action {
|
|||
cluster.getConf().getLong(START_NAMENODE_TIMEOUT_KEY, START_NAMENODE_TIMEOUT_DEFAULT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the instance's {@link Logger}, for use throughout the class hierarchy.
|
||||
*/
|
||||
protected abstract Logger getLogger();
|
||||
|
||||
public void perform() throws Exception { }
|
||||
|
||||
/** Returns current region servers - active master */
|
||||
|
@ -138,110 +140,110 @@ public class Action {
|
|||
ArrayList<ServerName> tmp = new ArrayList<>(count);
|
||||
tmp.addAll(regionServers);
|
||||
tmp.removeAll(masters);
|
||||
return tmp.toArray(new ServerName[tmp.size()]);
|
||||
return tmp.toArray(new ServerName[0]);
|
||||
}
|
||||
|
||||
protected void killMaster(ServerName server) throws IOException {
|
||||
LOG.info("Killing master:" + server);
|
||||
getLogger().info("Killing master:" + server);
|
||||
cluster.killMaster(server);
|
||||
cluster.waitForMasterToStop(server, killMasterTimeout);
|
||||
LOG.info("Killed master server:" + server);
|
||||
getLogger().info("Killed master server:" + server);
|
||||
}
|
||||
|
||||
protected void startMaster(ServerName server) throws IOException {
|
||||
LOG.info("Starting master:" + server.getHostname());
|
||||
getLogger().info("Starting master:" + server.getHostname());
|
||||
cluster.startMaster(server.getHostname(), server.getPort());
|
||||
cluster.waitForActiveAndReadyMaster(startMasterTimeout);
|
||||
LOG.info("Started master: " + server);
|
||||
getLogger().info("Started master: " + server);
|
||||
}
|
||||
|
||||
protected void stopRs(ServerName server) throws IOException {
|
||||
LOG.info("Stopping regionserver " + server);
|
||||
getLogger().info("Stopping regionserver " + server);
|
||||
cluster.stopRegionServer(server);
|
||||
cluster.waitForRegionServerToStop(server, killRsTimeout);
|
||||
LOG.info(String.format("Stopping regionserver %s. Reported num of rs: %s", server,
|
||||
getLogger().info(String.format("Stopping regionserver %s. Reported num of rs: %s", server,
|
||||
cluster.getClusterStatus().getLiveServersLoad().size()));
|
||||
}
|
||||
|
||||
protected void suspendRs(ServerName server) throws IOException {
|
||||
LOG.info("Suspending regionserver %s" + server);
|
||||
getLogger().info("Suspending regionserver %s" + server);
|
||||
cluster.suspendRegionServer(server);
|
||||
if(!(cluster instanceof MiniHBaseCluster)){
|
||||
cluster.waitForRegionServerToStop(server, killRsTimeout);
|
||||
}
|
||||
LOG.info(String.format("Suspending regionserver %s. Reported num of rs: %s", server,
|
||||
getLogger().info(String.format("Suspending regionserver %s. Reported num of rs: %s", server,
|
||||
cluster.getClusterStatus().getLiveServersLoad().size()));
|
||||
}
|
||||
|
||||
protected void resumeRs(ServerName server) throws IOException {
|
||||
LOG.info("Resuming regionserver " + server);
|
||||
getLogger().info("Resuming regionserver " + server);
|
||||
cluster.resumeRegionServer(server);
|
||||
if(!(cluster instanceof MiniHBaseCluster)){
|
||||
cluster.waitForRegionServerToStart(server.getHostname(), server.getPort(), startRsTimeout);
|
||||
}
|
||||
LOG.info(String.format("Resuming regionserver %s. Reported num of rs: %s", server,
|
||||
getLogger().info(String.format("Resuming regionserver %s. Reported num of rs: %s", server,
|
||||
cluster.getClusterStatus().getLiveServersLoad().size()));
|
||||
}
|
||||
|
||||
protected void killRs(ServerName server) throws IOException {
|
||||
LOG.info("Killing regionserver " + server);
|
||||
getLogger().info("Killing regionserver " + server);
|
||||
cluster.killRegionServer(server);
|
||||
cluster.waitForRegionServerToStop(server, killRsTimeout);
|
||||
LOG.info(String.format("Killed regionserver %s. Reported num of rs: %s", server,
|
||||
getLogger().info(String.format("Killed regionserver %s. Reported num of rs: %s", server,
|
||||
cluster.getClusterStatus().getLiveServersLoad().size()));
|
||||
}
|
||||
|
||||
protected void startRs(ServerName server) throws IOException {
|
||||
LOG.info("Starting regionserver " + server.getAddress());
|
||||
getLogger().info("Starting regionserver " + server.getAddress());
|
||||
cluster.startRegionServer(server.getHostname(), server.getPort());
|
||||
cluster.waitForRegionServerToStart(server.getHostname(), server.getPort(), startRsTimeout);
|
||||
LOG.info(String.format("Started regionserver %s. Reported num of rs: %s", server.getAddress(),
|
||||
cluster.getClusterStatus().getLiveServersLoad().size()));
|
||||
getLogger().info(String.format("Started regionserver %s. Reported num of rs: %s",
|
||||
server.getAddress(), cluster.getClusterStatus().getLiveServersLoad().size()));
|
||||
}
|
||||
|
||||
protected void killZKNode(ServerName server) throws IOException {
|
||||
LOG.info("Killing zookeeper node " + server);
|
||||
getLogger().info("Killing zookeeper node " + server);
|
||||
cluster.killZkNode(server);
|
||||
cluster.waitForZkNodeToStop(server, killZkNodeTimeout);
|
||||
LOG.info(String.format("Killed zookeeper node %s. Reported num of rs: %s", server,
|
||||
getLogger().info(String.format("Killed zookeeper node %s. Reported num of rs: %s", server,
|
||||
cluster.getClusterStatus().getLiveServersLoad().size()));
|
||||
}
|
||||
|
||||
protected void startZKNode(ServerName server) throws IOException {
|
||||
LOG.info("Starting zookeeper node " + server.getHostname());
|
||||
getLogger().info("Starting zookeeper node " + server.getHostname());
|
||||
cluster.startZkNode(server.getHostname(), server.getPort());
|
||||
cluster.waitForZkNodeToStart(server, startZkNodeTimeout);
|
||||
LOG.info("Started zookeeper node " + server);
|
||||
getLogger().info("Started zookeeper node " + server);
|
||||
}
|
||||
|
||||
protected void killDataNode(ServerName server) throws IOException {
|
||||
LOG.info("Killing datanode " + server);
|
||||
getLogger().info("Killing datanode " + server);
|
||||
cluster.killDataNode(server);
|
||||
cluster.waitForDataNodeToStop(server, killDataNodeTimeout);
|
||||
LOG.info(String.format("Killed datanode %s. Reported num of rs: %s", server,
|
||||
getLogger().info(String.format("Killed datanode %s. Reported num of rs: %s", server,
|
||||
cluster.getClusterStatus().getLiveServersLoad().size()));
|
||||
}
|
||||
|
||||
protected void startDataNode(ServerName server) throws IOException {
|
||||
LOG.info("Starting datanode " + server.getHostname());
|
||||
getLogger().info("Starting datanode " + server.getHostname());
|
||||
cluster.startDataNode(server);
|
||||
cluster.waitForDataNodeToStart(server, startDataNodeTimeout);
|
||||
LOG.info("Started datanode " + server);
|
||||
getLogger().info("Started datanode " + server);
|
||||
}
|
||||
|
||||
protected void killNameNode(ServerName server) throws IOException {
|
||||
LOG.info("Killing namenode : " + server.getHostname());
|
||||
getLogger().info("Killing namenode : " + server.getHostname());
|
||||
cluster.killNameNode(server);
|
||||
cluster.waitForNameNodeToStop(server, killNameNodeTimeout);
|
||||
LOG.info("Killed namenode: " + server + ". Reported num of rs:"
|
||||
getLogger().info("Killed namenode: " + server + ". Reported num of rs:"
|
||||
+ cluster.getClusterStatus().getServersSize());
|
||||
}
|
||||
|
||||
protected void startNameNode(ServerName server) throws IOException {
|
||||
LOG.info("Starting Namenode : " + server.getHostname());
|
||||
getLogger().info("Starting Namenode : " + server.getHostname());
|
||||
cluster.startNameNode(server);
|
||||
cluster.waitForNameNodeToStart(server, startNameNodeTimeout);
|
||||
LOG.info("Started namenode: " + server);
|
||||
getLogger().info("Started namenode: " + server);
|
||||
}
|
||||
|
||||
protected void unbalanceRegions(ClusterStatus clusterStatus,
|
||||
|
@ -253,7 +255,8 @@ public class Action {
|
|||
// Ugh.
|
||||
List<byte[]> regions = new LinkedList<byte[]>(serverLoad.getRegionsLoad().keySet());
|
||||
int victimRegionCount = (int)Math.ceil(fractionOfRegions * regions.size());
|
||||
LOG.debug("Removing " + victimRegionCount + " regions from " + server.getServerName());
|
||||
getLogger().debug("Removing " + victimRegionCount + " regions from "
|
||||
+ server.getServerName());
|
||||
for (int i = 0; i < victimRegionCount; ++i) {
|
||||
int victimIx = RandomUtils.nextInt(regions.size());
|
||||
String regionId = HRegionInfo.encodeRegionName(regions.remove(victimIx));
|
||||
|
@ -261,7 +264,7 @@ public class Action {
|
|||
}
|
||||
}
|
||||
|
||||
LOG.info("Moving " + victimRegions.size() + " regions from " + fromServers.size()
|
||||
getLogger().info("Moving " + victimRegions.size() + " regions from " + fromServers.size()
|
||||
+ " servers to " + toServers.size() + " different servers");
|
||||
Admin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin();
|
||||
for (byte[] victimRegion : victimRegions) {
|
||||
|
@ -281,10 +284,10 @@ public class Action {
|
|||
try {
|
||||
result = admin.balancer();
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Got exception while doing balance ", e);
|
||||
getLogger().warn("Got exception while doing balance ", e);
|
||||
}
|
||||
if (!result) {
|
||||
LOG.error("Balancer didn't succeed");
|
||||
getLogger().error("Balancer didn't succeed");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -293,7 +296,7 @@ public class Action {
|
|||
try {
|
||||
admin.setBalancerRunning(onOrOff, synchronous);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Got exception while switching balance ", e);
|
||||
getLogger().warn("Got exception while switching balance ", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -40,6 +40,10 @@ public class AddColumnAction extends Action {
|
|||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(ActionContext context) throws IOException {
|
||||
super.init(context);
|
||||
|
@ -61,7 +65,7 @@ public class AddColumnAction extends Action {
|
|||
return;
|
||||
}
|
||||
|
||||
LOG.debug("Performing action: Adding " + columnDescriptor + " to " + tableName);
|
||||
getLogger().debug("Performing action: Adding " + columnDescriptor + " to " + tableName);
|
||||
|
||||
tableDescriptor.addFamily(columnDescriptor);
|
||||
admin.modifyTable(tableName, tableDescriptor);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -32,17 +31,20 @@ import org.slf4j.LoggerFactory;
|
|||
*/
|
||||
public class BatchRestartRsAction extends RestartActionBaseAction {
|
||||
float ratio; //ratio of regionservers to restart
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(BatchRestartRsAction.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BatchRestartRsAction.class);
|
||||
|
||||
public BatchRestartRsAction(long sleepTime, float ratio) {
|
||||
super(sleepTime);
|
||||
this.ratio = ratio;
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
LOG.info(String.format("Performing action: Batch restarting %d%% of region servers",
|
||||
getLogger().info(String.format("Performing action: Batch restarting %d%% of region servers",
|
||||
(int)(ratio * 100)));
|
||||
List<ServerName> selectedServers = PolicyBasedChaosMonkey.selectRandomItems(getCurrentServers(),
|
||||
ratio);
|
||||
|
@ -55,7 +57,7 @@ public class BatchRestartRsAction extends RestartActionBaseAction {
|
|||
if (context.isStopping()) {
|
||||
break;
|
||||
}
|
||||
LOG.info("Killing region server:" + server);
|
||||
getLogger().info("Killing region server:" + server);
|
||||
cluster.killRegionServer(server);
|
||||
killedServers.add(server);
|
||||
}
|
||||
|
@ -64,13 +66,13 @@ public class BatchRestartRsAction extends RestartActionBaseAction {
|
|||
cluster.waitForRegionServerToStop(server, PolicyBasedChaosMonkey.TIMEOUT);
|
||||
}
|
||||
|
||||
LOG.info("Killed " + killedServers.size() + " region servers. Reported num of rs:"
|
||||
getLogger().info("Killed " + killedServers.size() + " region servers. Reported num of rs:"
|
||||
+ cluster.getClusterStatus().getServersSize());
|
||||
|
||||
sleep(sleepTime);
|
||||
|
||||
for (ServerName server : killedServers) {
|
||||
LOG.info("Starting region server:" + server.getHostname());
|
||||
getLogger().info("Starting region server:" + server.getHostname());
|
||||
cluster.startRegionServer(server.getHostname(), server.getPort());
|
||||
|
||||
}
|
||||
|
@ -79,7 +81,7 @@ public class BatchRestartRsAction extends RestartActionBaseAction {
|
|||
server.getPort(),
|
||||
PolicyBasedChaosMonkey.TIMEOUT);
|
||||
}
|
||||
LOG.info("Started " + killedServers.size() +" region servers. Reported num of rs:"
|
||||
getLogger().info("Started " + killedServers.size() +" region servers. Reported num of rs:"
|
||||
+ cluster.getClusterStatus().getServersSize());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -47,13 +47,17 @@ public class ChangeBloomFilterAction extends Action {
|
|||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
Random random = new Random();
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
Admin admin = util.getHBaseAdmin();
|
||||
|
||||
LOG.info("Performing action: Change bloom filter on all columns of table "
|
||||
getLogger().info("Performing action: Change bloom filter on all columns of table "
|
||||
+ tableName);
|
||||
HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
|
||||
HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();
|
||||
|
@ -67,11 +71,11 @@ public class ChangeBloomFilterAction extends Action {
|
|||
|
||||
for (HColumnDescriptor descriptor : columnDescriptors) {
|
||||
int bloomFilterIndex = random.nextInt(bloomArraySize);
|
||||
LOG.debug("Performing action: About to set bloom filter type to "
|
||||
getLogger().debug("Performing action: About to set bloom filter type to "
|
||||
+ bloomArray[bloomFilterIndex] + " on column "
|
||||
+ descriptor.getNameAsString() + " of table " + tableName);
|
||||
descriptor.setBloomFilterType(bloomArray[bloomFilterIndex]);
|
||||
LOG.debug("Performing action: Just set bloom filter type to "
|
||||
getLogger().debug("Performing action: Just set bloom filter type to "
|
||||
+ bloomArray[bloomFilterIndex] + " on column "
|
||||
+ descriptor.getNameAsString() + " of table " + tableName);
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -46,6 +46,10 @@ public class ChangeCompressionAction extends Action {
|
|||
this.random = new Random();
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(ActionContext context) throws IOException {
|
||||
super.init(context);
|
||||
|
@ -82,12 +86,12 @@ public class ChangeCompressionAction extends Action {
|
|||
algo.returnCompressor(c);
|
||||
break;
|
||||
} catch (Throwable t) {
|
||||
LOG.info("Performing action: Changing compression algorithms to " + algo +
|
||||
getLogger().info("Performing action: Changing compression algorithms to " + algo +
|
||||
" is not supported, pick another one");
|
||||
}
|
||||
} while (true);
|
||||
|
||||
LOG.debug("Performing action: Changing compression algorithms on "
|
||||
getLogger().debug("Performing action: Changing compression algorithms on "
|
||||
+ tableName.getNameAsString() + " to " + algo);
|
||||
for (HColumnDescriptor descriptor : columnDescriptors) {
|
||||
if (random.nextBoolean()) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -44,6 +44,10 @@ public class ChangeEncodingAction extends Action {
|
|||
this.random = new Random();
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(ActionContext context) throws IOException {
|
||||
super.init(context);
|
||||
|
@ -59,13 +63,13 @@ public class ChangeEncodingAction extends Action {
|
|||
return;
|
||||
}
|
||||
|
||||
LOG.debug("Performing action: Changing encodings on " + tableName);
|
||||
getLogger().debug("Performing action: Changing encodings on " + tableName);
|
||||
// possible DataBlockEncoding id's
|
||||
int[] possibleIds = {0, 2, 3, 4, 6};
|
||||
for (HColumnDescriptor descriptor : columnDescriptors) {
|
||||
short id = (short) possibleIds[random.nextInt(possibleIds.length)];
|
||||
descriptor.setDataBlockEncoding(DataBlockEncoding.getEncodingById(id));
|
||||
LOG.debug("Set encoding of column family " + descriptor.getNameAsString()
|
||||
getLogger().debug("Set encoding of column family " + descriptor.getNameAsString()
|
||||
+ " to: " + descriptor.getDataBlockEncoding());
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -45,17 +45,20 @@ public class ChangeSplitPolicyAction extends Action {
|
|||
this.random = new Random();
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
Admin admin = util.getHBaseAdmin();
|
||||
|
||||
LOG.info("Performing action: Change split policy of table " + tableName);
|
||||
getLogger().info("Performing action: Change split policy of table " + tableName);
|
||||
HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
|
||||
String chosenPolicy = possiblePolicies[random.nextInt(possiblePolicies.length)];
|
||||
tableDescriptor.setRegionSplitPolicyClassName(chosenPolicy);
|
||||
LOG.info("Changing " + tableName + " split policy to " + chosenPolicy);
|
||||
getLogger().info("Changing " + tableName + " split policy to " + chosenPolicy);
|
||||
admin.modifyTable(tableName, tableDescriptor);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -34,17 +34,21 @@ import org.slf4j.LoggerFactory;
|
|||
* Always keeps at least 1 as the number of versions.
|
||||
*/
|
||||
public class ChangeVersionsAction extends Action {
|
||||
private final TableName tableName;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ChangeVersionsAction.class);
|
||||
private final TableName tableName;
|
||||
private final Random random;
|
||||
|
||||
private Admin admin;
|
||||
private Random random;
|
||||
|
||||
public ChangeVersionsAction(TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
this.random = new Random();
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(ActionContext context) throws IOException {
|
||||
super.init(context);
|
||||
|
@ -68,7 +72,7 @@ public class ChangeVersionsAction extends Action {
|
|||
if (context.isStopping()) {
|
||||
return;
|
||||
}
|
||||
LOG.debug("Performing action: Changing versions on " + tableName.getNameAsString());
|
||||
getLogger().debug("Performing action: Changing versions on " + tableName.getNameAsString());
|
||||
admin.modifyTable(tableName, tableDescriptor);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -33,11 +33,11 @@ import org.slf4j.LoggerFactory;
|
|||
* Region that queues a compaction of a random region from the table.
|
||||
*/
|
||||
public class CompactRandomRegionOfTableAction extends Action {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(CompactRandomRegionOfTableAction.class);
|
||||
|
||||
private final int majorRatio;
|
||||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(CompactRandomRegionOfTableAction.class);
|
||||
|
||||
public CompactRandomRegionOfTableAction(
|
||||
TableName tableName, float majorRatio) {
|
||||
|
@ -51,33 +51,37 @@ public class CompactRandomRegionOfTableAction extends Action {
|
|||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
Admin admin = util.getHBaseAdmin();
|
||||
boolean major = RandomUtils.nextInt(100) < majorRatio;
|
||||
|
||||
LOG.info("Performing action: Compact random region of table "
|
||||
getLogger().info("Performing action: Compact random region of table "
|
||||
+ tableName + ", major=" + major);
|
||||
List<HRegionInfo> regions = admin.getTableRegions(tableName);
|
||||
if (regions == null || regions.isEmpty()) {
|
||||
LOG.info("Table " + tableName + " doesn't have regions to compact");
|
||||
getLogger().info("Table " + tableName + " doesn't have regions to compact");
|
||||
return;
|
||||
}
|
||||
|
||||
HRegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(
|
||||
regions.toArray(new HRegionInfo[regions.size()]));
|
||||
regions.toArray(new HRegionInfo[0]));
|
||||
|
||||
try {
|
||||
if (major) {
|
||||
LOG.debug("Major compacting region " + region.getRegionNameAsString());
|
||||
getLogger().debug("Major compacting region " + region.getRegionNameAsString());
|
||||
admin.majorCompactRegion(region.getRegionName());
|
||||
} else {
|
||||
LOG.debug("Compacting region " + region.getRegionNameAsString());
|
||||
getLogger().debug("Compacting region " + region.getRegionNameAsString());
|
||||
admin.compactRegion(region.getRegionName());
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
|
||||
getLogger().warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
|
||||
}
|
||||
if (sleepTime > 0) {
|
||||
Thread.sleep(sleepTime);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -29,10 +29,11 @@ import org.slf4j.LoggerFactory;
|
|||
* Action that queues a table compaction.
|
||||
*/
|
||||
public class CompactTableAction extends Action {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(CompactTableAction.class);
|
||||
|
||||
private final TableName tableName;
|
||||
private final int majorRatio;
|
||||
private final long sleepTime;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(CompactTableAction.class);
|
||||
|
||||
public CompactTableAction(TableName tableName, float majorRatio) {
|
||||
this(-1, tableName, majorRatio);
|
||||
|
@ -45,13 +46,17 @@ public class CompactTableAction extends Action {
|
|||
this.sleepTime = sleepTime;
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
Admin admin = util.getHBaseAdmin();
|
||||
boolean major = RandomUtils.nextInt(100) < majorRatio;
|
||||
|
||||
LOG.info("Performing action: Compact table " + tableName + ", major=" + major);
|
||||
getLogger().info("Performing action: Compact table " + tableName + ", major=" + major);
|
||||
try {
|
||||
if (major) {
|
||||
admin.majorCompact(tableName);
|
||||
|
@ -59,7 +64,7 @@ public class CompactTableAction extends Action {
|
|||
admin.compact(tableName);
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
|
||||
getLogger().warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
|
||||
}
|
||||
if (sleepTime > 0) {
|
||||
Thread.sleep(sleepTime);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -25,10 +25,13 @@ import org.apache.hadoop.hbase.TableName;
|
|||
import org.apache.hadoop.hbase.client.Admin;
|
||||
|
||||
import java.util.Random;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class DecreaseMaxHFileSizeAction extends Action {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(DecreaseMaxHFileSizeAction.class);
|
||||
|
||||
private static final long minFileSize = 1 * 1024 * 1024 * 1024L;
|
||||
private static final long minFileSize = 1024 * 1024 * 1024L;
|
||||
|
||||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
|
@ -40,6 +43,10 @@ public class DecreaseMaxHFileSizeAction extends Action {
|
|||
this.random = new Random();
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -19,7 +19,6 @@
|
|||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -27,8 +26,11 @@ import org.slf4j.LoggerFactory;
|
|||
* Action to dump the cluster status.
|
||||
*/
|
||||
public class DumpClusterStatusAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(DumpClusterStatusAction.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(DumpClusterStatusAction.class);
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(ActionContext context) throws IOException {
|
||||
|
@ -37,7 +39,7 @@ public class DumpClusterStatusAction extends Action {
|
|||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
LOG.debug("Performing action: Dump cluster status");
|
||||
LOG.info("Cluster status\n" + cluster.getClusterStatus());
|
||||
getLogger().debug("Performing action: Dump cluster status");
|
||||
getLogger().info("Cluster status\n" + cluster.getClusterStatus());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -46,25 +46,29 @@ public class FlushRandomRegionOfTableAction extends Action {
|
|||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
Admin admin = util.getHBaseAdmin();
|
||||
|
||||
LOG.info("Performing action: Flush random region of table " + tableName);
|
||||
getLogger().info("Performing action: Flush random region of table " + tableName);
|
||||
List<HRegionInfo> regions = admin.getTableRegions(tableName);
|
||||
if (regions == null || regions.isEmpty()) {
|
||||
LOG.info("Table " + tableName + " doesn't have regions to flush");
|
||||
getLogger().info("Table " + tableName + " doesn't have regions to flush");
|
||||
return;
|
||||
}
|
||||
|
||||
HRegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(
|
||||
regions.toArray(new HRegionInfo[regions.size()]));
|
||||
LOG.debug("Flushing region " + region.getRegionNameAsString());
|
||||
regions.toArray(new HRegionInfo[0]));
|
||||
getLogger().debug("Flushing region " + region.getRegionNameAsString());
|
||||
try {
|
||||
admin.flushRegion(region.getRegionName());
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Flush failed, might be caused by other chaos: " + ex.getMessage());
|
||||
getLogger().warn("Flush failed, might be caused by other chaos: " + ex.getMessage());
|
||||
}
|
||||
if (sleepTime > 0) {
|
||||
Thread.sleep(sleepTime);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -28,8 +28,7 @@ import org.slf4j.LoggerFactory;
|
|||
* Action that tries to flush a table.
|
||||
*/
|
||||
public class FlushTableAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(FlushTableAction.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(FlushTableAction.class);
|
||||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
|
||||
|
@ -42,6 +41,10 @@ public class FlushTableAction extends Action {
|
|||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
|
@ -52,11 +55,11 @@ public class FlushTableAction extends Action {
|
|||
return;
|
||||
}
|
||||
|
||||
LOG.info("Performing action: Flush table " + tableName);
|
||||
getLogger().info("Performing action: Flush table " + tableName);
|
||||
try {
|
||||
admin.flush(tableName);
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Flush failed, might be caused by other chaos: " + ex.getMessage());
|
||||
getLogger().warn("Flush failed, might be caused by other chaos: " + ex.getMessage());
|
||||
}
|
||||
if (sleepTime > 0) {
|
||||
Thread.sleep(sleepTime);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -25,8 +25,11 @@ import org.slf4j.LoggerFactory;
|
|||
* Action that tries to force a balancer run.
|
||||
*/
|
||||
public class ForceBalancerAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(ForceBalancerAction.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ForceBalancerAction.class);
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
|
@ -34,7 +37,7 @@ public class ForceBalancerAction extends Action {
|
|||
if (context.isStopping()) {
|
||||
return;
|
||||
}
|
||||
LOG.info("Balancing regions");
|
||||
getLogger().info("Balancing regions");
|
||||
forceBalancer();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -46,22 +46,26 @@ public class MergeRandomAdjacentRegionsOfTableAction extends Action {
|
|||
this.sleepTime = sleepTime;
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
Admin admin = util.getHBaseAdmin();
|
||||
|
||||
LOG.info("Performing action: Merge random adjacent regions of table " + tableName);
|
||||
getLogger().info("Performing action: Merge random adjacent regions of table " + tableName);
|
||||
List<HRegionInfo> regions = admin.getTableRegions(tableName);
|
||||
if (regions == null || regions.size() < 2) {
|
||||
LOG.info("Table " + tableName + " doesn't have enough regions to merge");
|
||||
getLogger().info("Table " + tableName + " doesn't have enough regions to merge");
|
||||
return;
|
||||
}
|
||||
|
||||
int i = RandomUtils.nextInt(regions.size() - 1);
|
||||
HRegionInfo a = regions.get(i++);
|
||||
HRegionInfo b = regions.get(i);
|
||||
LOG.debug("Merging " + a.getRegionNameAsString() + " and " + b.getRegionNameAsString());
|
||||
getLogger().debug("Merging " + a.getRegionNameAsString() + " and " + b.getRegionNameAsString());
|
||||
|
||||
// Don't try the merge if we're stopping
|
||||
if (context.isStopping()) {
|
||||
|
@ -71,7 +75,7 @@ public class MergeRandomAdjacentRegionsOfTableAction extends Action {
|
|||
try {
|
||||
admin.mergeRegions(a.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), false);
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Merge failed, might be caused by other chaos: " + ex.getMessage());
|
||||
getLogger().warn("Merge failed, might be caused by other chaos: " + ex.getMessage());
|
||||
}
|
||||
if (sleepTime > 0) {
|
||||
Thread.sleep(sleepTime);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -46,6 +46,10 @@ public class MoveRandomRegionOfTableAction extends Action {
|
|||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
if (sleepTime > 0) {
|
||||
|
@ -55,16 +59,16 @@ public class MoveRandomRegionOfTableAction extends Action {
|
|||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
Admin admin = util.getHBaseAdmin();
|
||||
|
||||
LOG.info("Performing action: Move random region of table " + tableName);
|
||||
getLogger().info("Performing action: Move random region of table " + tableName);
|
||||
List<HRegionInfo> regions = admin.getTableRegions(tableName);
|
||||
if (regions == null || regions.isEmpty()) {
|
||||
LOG.info("Table " + tableName + " doesn't have regions to move");
|
||||
getLogger().info("Table " + tableName + " doesn't have regions to move");
|
||||
return;
|
||||
}
|
||||
|
||||
HRegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(
|
||||
regions.toArray(new HRegionInfo[regions.size()]));
|
||||
LOG.debug("Unassigning region " + region.getRegionNameAsString());
|
||||
getLogger().debug("Unassigning region " + region.getRegionNameAsString());
|
||||
admin.unassign(region.getRegionName(), false);
|
||||
if (sleepTime > 0) {
|
||||
Thread.sleep(sleepTime);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -46,6 +46,10 @@ public class MoveRegionsOfTableAction extends Action {
|
|||
this(-1, MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, tableName);
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
public MoveRegionsOfTableAction(long sleepTime, long maxSleepTime, TableName tableName) {
|
||||
this.sleepTime = sleepTime;
|
||||
this.tableName = tableName;
|
||||
|
@ -62,10 +66,10 @@ public class MoveRegionsOfTableAction extends Action {
|
|||
Collection<ServerName> serversList = admin.getClusterStatus().getServers();
|
||||
ServerName[] servers = serversList.toArray(new ServerName[serversList.size()]);
|
||||
|
||||
LOG.info("Performing action: Move regions of table " + tableName);
|
||||
getLogger().info("Performing action: Move regions of table " + tableName);
|
||||
List<HRegionInfo> regions = admin.getTableRegions(tableName);
|
||||
if (regions == null || regions.isEmpty()) {
|
||||
LOG.info("Table " + tableName + " doesn't have regions to move");
|
||||
getLogger().info("Table " + tableName + " doesn't have regions to move");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -82,10 +86,10 @@ public class MoveRegionsOfTableAction extends Action {
|
|||
try {
|
||||
String destServerName =
|
||||
servers[RandomUtils.nextInt(servers.length)].getServerName();
|
||||
LOG.debug("Moving " + regionInfo.getRegionNameAsString() + " to " + destServerName);
|
||||
getLogger().debug("Moving " + regionInfo.getRegionNameAsString() + " to " + destServerName);
|
||||
admin.move(regionInfo.getEncodedNameAsBytes(), Bytes.toBytes(destServerName));
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Move failed, might be caused by other chaos: " + ex.getMessage());
|
||||
getLogger().warn("Move failed, might be caused by other chaos: " + ex.getMessage());
|
||||
}
|
||||
if (sleepTime > 0) {
|
||||
Thread.sleep(sleepTime);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -47,6 +47,10 @@ public class RemoveColumnAction extends Action {
|
|||
random = new Random();
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(ActionContext context) throws IOException {
|
||||
super.init(context);
|
||||
|
@ -68,7 +72,7 @@ public class RemoveColumnAction extends Action {
|
|||
index = random.nextInt(columnDescriptors.length);
|
||||
}
|
||||
byte[] colDescName = columnDescriptors[index].getName();
|
||||
LOG.debug("Performing action: Removing " + Bytes.toString(colDescName)+ " from "
|
||||
getLogger().debug("Performing action: Removing " + Bytes.toString(colDescName)+ " from "
|
||||
+ tableName.getNameAsString());
|
||||
tableDescriptor.removeFamily(colDescName);
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -37,8 +37,12 @@ public class RestartActionBaseAction extends Action {
|
|||
this.sleepTime = sleepTime;
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
void sleep(long sleepTime) {
|
||||
LOG.info("Sleeping for:" + sleepTime);
|
||||
getLogger().info("Sleeping for:" + sleepTime);
|
||||
Threads.sleep(sleepTime);
|
||||
}
|
||||
|
||||
|
@ -49,10 +53,10 @@ public class RestartActionBaseAction extends Action {
|
|||
return;
|
||||
}
|
||||
|
||||
LOG.info("Killing master: " + server);
|
||||
getLogger().info("Killing master: " + server);
|
||||
killMaster(server);
|
||||
sleep(sleepTime);
|
||||
LOG.info("Starting master: " + server);
|
||||
getLogger().info("Starting master: " + server);
|
||||
startMaster(server);
|
||||
}
|
||||
|
||||
|
@ -68,10 +72,10 @@ public class RestartActionBaseAction extends Action {
|
|||
if (context.isStopping()) {
|
||||
return;
|
||||
}
|
||||
LOG.info("Stopping region server: " + server);
|
||||
getLogger().info("Stopping region server: " + server);
|
||||
stopRs(server);
|
||||
sleep(sleepTime);
|
||||
LOG.info("Starting region server: " + server);
|
||||
getLogger().info("Starting region server: " + server);
|
||||
startRs(server);
|
||||
}
|
||||
|
||||
|
@ -81,10 +85,10 @@ public class RestartActionBaseAction extends Action {
|
|||
if (context.isStopping()) {
|
||||
return;
|
||||
}
|
||||
LOG.info("Killing region server: " + server);
|
||||
getLogger().info("Killing region server: " + server);
|
||||
killRs(server);
|
||||
sleep(sleepTime);
|
||||
LOG.info("Starting region server: " + server);
|
||||
getLogger().info("Starting region server: " + server);
|
||||
startRs(server);
|
||||
}
|
||||
|
||||
|
@ -94,10 +98,10 @@ public class RestartActionBaseAction extends Action {
|
|||
if (context.isStopping()) {
|
||||
return;
|
||||
}
|
||||
LOG.info("Killing zookeeper node: " + server);
|
||||
getLogger().info("Killing zookeeper node: " + server);
|
||||
killZKNode(server);
|
||||
sleep(sleepTime);
|
||||
LOG.info("Starting zookeeper node: " + server);
|
||||
getLogger().info("Starting zookeeper node: " + server);
|
||||
startZKNode(server);
|
||||
}
|
||||
|
||||
|
@ -107,10 +111,10 @@ public class RestartActionBaseAction extends Action {
|
|||
if (context.isStopping()) {
|
||||
return;
|
||||
}
|
||||
LOG.info("Killing data node: " + server);
|
||||
getLogger().info("Killing data node: " + server);
|
||||
killDataNode(server);
|
||||
sleep(sleepTime);
|
||||
LOG.info("Starting data node: " + server);
|
||||
getLogger().info("Starting data node: " + server);
|
||||
startDataNode(server);
|
||||
}
|
||||
|
||||
|
@ -120,10 +124,10 @@ public class RestartActionBaseAction extends Action {
|
|||
if (context.isStopping()) {
|
||||
return;
|
||||
}
|
||||
LOG.info("Killing name node: " + server);
|
||||
getLogger().info("Killing name node: " + server);
|
||||
killNameNode(server);
|
||||
sleep(sleepTime);
|
||||
LOG.info("Starting name node: " + server);
|
||||
getLogger().info("Starting name node: " + server);
|
||||
startNameNode(server);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -31,9 +31,14 @@ public class RestartActiveMasterAction extends RestartActionBaseAction {
|
|||
public RestartActiveMasterAction(long sleepTime) {
|
||||
super(sleepTime);
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
LOG.info("Performing action: Restart active master");
|
||||
getLogger().info("Performing action: Restart active master");
|
||||
|
||||
ServerName master = cluster.getClusterStatus().getMaster();
|
||||
restartMaster(master, sleepTime);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -51,9 +51,13 @@ public class RestartActiveNameNodeAction extends RestartActionBaseAction {
|
|||
super(sleepTime);
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
LOG.info("Performing action: Restart active namenode");
|
||||
getLogger().info("Performing action: Restart active namenode");
|
||||
Configuration conf = FSUtils.getRootDir(getConf()).getFileSystem(getConf()).getConf();
|
||||
String nameServiceID = DFSUtil.getNamenodeNameServiceId(conf);
|
||||
if (!HAUtil.isHAEnabled(conf, nameServiceID)) {
|
||||
|
@ -85,9 +89,9 @@ public class RestartActiveNameNodeAction extends RestartActionBaseAction {
|
|||
if (activeNamenode == null) {
|
||||
throw new Exception("No active Name node found in zookeeper under " + hadoopHAZkNode);
|
||||
}
|
||||
LOG.info("Found active namenode host:" + activeNamenode);
|
||||
getLogger().info("Found active namenode host:" + activeNamenode);
|
||||
ServerName activeNNHost = ServerName.valueOf(activeNamenode, -1, -1);
|
||||
LOG.info("Restarting Active NameNode :" + activeNamenode);
|
||||
getLogger().info("Restarting Active NameNode :" + activeNamenode);
|
||||
restartNameNode(activeNNHost, sleepTime);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -42,9 +42,13 @@ public class RestartRandomDataNodeAction extends RestartActionBaseAction {
|
|||
super(sleepTime);
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
LOG.info("Performing action: Restart random data node");
|
||||
getLogger().info("Performing action: Restart random data node");
|
||||
ServerName server = PolicyBasedChaosMonkey.selectRandomItem(getDataNodes());
|
||||
restartDataNode(server, sleepTime);
|
||||
}
|
||||
|
@ -57,6 +61,6 @@ public class RestartRandomDataNodeAction extends RestartActionBaseAction {
|
|||
for (DatanodeInfo dataNode: dfsClient.datanodeReport(HdfsConstants.DatanodeReportType.LIVE)) {
|
||||
hosts.add(ServerName.valueOf(dataNode.getHostName(), -1, -1));
|
||||
}
|
||||
return hosts.toArray(new ServerName[hosts.size()]);
|
||||
return hosts.toArray(new ServerName[0]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -33,9 +33,13 @@ public class RestartRandomRsAction extends RestartActionBaseAction {
|
|||
super(sleepTime);
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
LOG.info("Performing action: Restart random region server");
|
||||
getLogger().info("Performing action: Restart random region server");
|
||||
ServerName server = PolicyBasedChaosMonkey.selectRandomItem(getCurrentServers());
|
||||
|
||||
restartRs(server, sleepTime);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -20,12 +20,20 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class RestartRandomRsExceptMetaAction extends RestartRandomRsAction {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(RestartRandomRsExceptMetaAction.class);
|
||||
|
||||
public RestartRandomRsExceptMetaAction(long sleepTime) {
|
||||
super(sleepTime);
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
int tries = 10;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -34,9 +34,13 @@ public class RestartRandomZKNodeAction extends RestartActionBaseAction {
|
|||
super(sleepTime);
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
LOG.info("Performing action: Restart random zookeeper node");
|
||||
getLogger().info("Performing action: Restart random zookeeper node");
|
||||
ServerName server = PolicyBasedChaosMonkey.selectRandomItem(
|
||||
ZKServerTool.readZKNodes(getConf()));
|
||||
restartZKNode(server, sleepTime);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -33,12 +33,17 @@ public class RestartRsHoldingMetaAction extends RestartActionBaseAction {
|
|||
public RestartRsHoldingMetaAction(long sleepTime) {
|
||||
super(sleepTime);
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
LOG.info("Performing action: Restart region server holding META");
|
||||
getLogger().info("Performing action: Restart region server holding META");
|
||||
ServerName server = cluster.getServerHoldingMeta();
|
||||
if (server == null) {
|
||||
LOG.warn("No server is holding hbase:meta right now.");
|
||||
getLogger().warn("No server is holding hbase:meta right now.");
|
||||
return;
|
||||
}
|
||||
ClusterStatus clusterStatus = cluster.getClusterStatus();
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -43,15 +43,19 @@ public class RestartRsHoldingTableAction extends RestartActionBaseAction {
|
|||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
HTable table = null;
|
||||
try {
|
||||
LOG.info("Performing action: Restart random RS holding table " + this.tableName);
|
||||
getLogger().info("Performing action: Restart random RS holding table " + this.tableName);
|
||||
Configuration conf = context.getHBaseIntegrationTestingUtility().getConfiguration();
|
||||
table = new HTable(conf, TableName.valueOf(tableName));
|
||||
} catch (IOException e) {
|
||||
LOG.debug("Error creating HTable used to get list of region locations.", e);
|
||||
getLogger().debug("Error creating HTable used to get list of region locations.", e);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -27,10 +27,10 @@ import java.util.Objects;
|
|||
import java.util.Queue;
|
||||
|
||||
import org.apache.commons.lang.math.RandomUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Restarts a ratio of the regionservers in a rolling fashion. At each step, either kills a
|
||||
|
@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
|||
* can be down at the same time during rolling restarts.
|
||||
*/
|
||||
public class RollingBatchRestartRsAction extends BatchRestartRsAction {
|
||||
private static final Log LOG = LogFactory.getLog(RollingBatchRestartRsAction.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(RollingBatchRestartRsAction.class);
|
||||
protected int maxDeadServers; // number of maximum dead servers at any given time. Defaults to 5
|
||||
|
||||
public RollingBatchRestartRsAction(long sleepTime, float ratio) {
|
||||
|
@ -56,9 +56,14 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction {
|
|||
START
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
LOG.info(String.format("Performing action: Rolling batch restarting %d%% of region servers",
|
||||
getLogger().info(
|
||||
String.format("Performing action: Rolling batch restarting %d%% of region servers",
|
||||
(int)(ratio * 100)));
|
||||
List<ServerName> selectedServers = selectServers();
|
||||
|
||||
|
@ -91,7 +96,7 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction {
|
|||
} catch (org.apache.hadoop.util.Shell.ExitCodeException e) {
|
||||
// We've seen this in test runs where we timeout but the kill went through. HBASE-9743
|
||||
// So, add to deadServers even if exception so the start gets called.
|
||||
LOG.info("Problem killing but presume successful; code=" + e.getExitCode(), e);
|
||||
getLogger().info("Problem killing but presume successful; code=" + e.getExitCode(), e);
|
||||
}
|
||||
deadServers.add(server);
|
||||
break;
|
||||
|
@ -105,7 +110,7 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction {
|
|||
// The start may fail but better to just keep going though we may lose server.
|
||||
// Shuffle the dead list to avoid getting stuck on a single stubborn host.
|
||||
Collections.shuffle(deadServers);
|
||||
LOG.info(String.format(
|
||||
getLogger().info(String.format(
|
||||
"Problem starting %s, will retry; code=%s", server, e.getExitCode(), e));
|
||||
}
|
||||
break;
|
||||
|
@ -139,7 +144,7 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction {
|
|||
|
||||
@Override
|
||||
protected void killRs(ServerName server) throws IOException {
|
||||
LOG.info("Killed " + server);
|
||||
getLogger().info("Killed " + server);
|
||||
if (this.invocations++ % 3 == 0) {
|
||||
throw new org.apache.hadoop.util.Shell.ExitCodeException(-1, "Failed");
|
||||
}
|
||||
|
@ -147,7 +152,7 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction {
|
|||
|
||||
@Override
|
||||
protected void startRs(ServerName server) throws IOException {
|
||||
LOG.info("Started " + server);
|
||||
getLogger().info("Started " + server);
|
||||
if (this.invocations++ % 3 == 0) {
|
||||
throw new org.apache.hadoop.util.Shell.ExitCodeException(-1, "Failed");
|
||||
}
|
||||
|
@ -156,4 +161,4 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction {
|
|||
|
||||
action.perform();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -58,9 +58,14 @@ public class RollingBatchSuspendResumeRsAction extends Action {
|
|||
SUSPEND, RESUME
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
LOG.info(String.format("Performing action: Rolling batch restarting %d%% of region servers",
|
||||
getLogger().info(
|
||||
String.format("Performing action: Rolling batch restarting %d%% of region servers",
|
||||
(int) (ratio * 100)));
|
||||
List<ServerName> selectedServers = selectServers();
|
||||
|
||||
|
@ -91,7 +96,8 @@ public class RollingBatchSuspendResumeRsAction extends Action {
|
|||
try {
|
||||
suspendRs(server);
|
||||
} catch (Shell.ExitCodeException e) {
|
||||
LOG.warn("Problem suspending but presume successful; code=" + e.getExitCode(), e);
|
||||
getLogger().warn("Problem suspending but presume successful; code="
|
||||
+ e.getExitCode(), e);
|
||||
}
|
||||
suspendedServers.add(server);
|
||||
break;
|
||||
|
@ -100,7 +106,7 @@ public class RollingBatchSuspendResumeRsAction extends Action {
|
|||
try {
|
||||
resumeRs(server);
|
||||
} catch (Shell.ExitCodeException e) {
|
||||
LOG.info("Problem resuming, will retry; code= " + e.getExitCode(), e);
|
||||
getLogger().info("Problem resuming, will retry; code= " + e.getExitCode(), e);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
@ -108,7 +114,7 @@ public class RollingBatchSuspendResumeRsAction extends Action {
|
|||
"Encountered unexpected action type: " + action.name());
|
||||
}
|
||||
|
||||
LOG.info("Sleeping for: " + sleepTime);
|
||||
getLogger().info("Sleeping for: " + sleepTime);
|
||||
Threads.sleep(sleepTime);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -28,8 +28,7 @@ import org.slf4j.LoggerFactory;
|
|||
* Action that tries to take a snapshot of a table.
|
||||
*/
|
||||
public class SnapshotTableAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(SnapshotTableAction.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SnapshotTableAction.class);
|
||||
private final TableName tableName;
|
||||
private final long sleepTime;
|
||||
|
||||
|
@ -42,6 +41,10 @@ public class SnapshotTableAction extends Action {
|
|||
this.sleepTime = sleepTime;
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
|
@ -53,7 +56,7 @@ public class SnapshotTableAction extends Action {
|
|||
return;
|
||||
}
|
||||
|
||||
LOG.info("Performing action: Snapshot table " + tableName);
|
||||
getLogger().info("Performing action: Snapshot table {}", tableName);
|
||||
admin.snapshot(snapshotName, tableName);
|
||||
if (sleepTime > 0) {
|
||||
Thread.sleep(sleepTime);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -19,14 +19,12 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
||||
public class SplitAllRegionOfTableAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(SplitAllRegionOfTableAction.class);
|
||||
|
@ -47,6 +45,10 @@ public class SplitAllRegionOfTableAction extends Action {
|
|||
this.maxFullTableSplits = getConf().getInt(MAX_SPLIT_KEY, DEFAULT_MAX_SPLITS);
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
|
@ -61,10 +63,10 @@ public class SplitAllRegionOfTableAction extends Action {
|
|||
if (ThreadLocalRandom.current().nextDouble()
|
||||
< (((double) splits) / ((double) maxFullTableSplits)) / ((double) 2)) {
|
||||
splits++;
|
||||
LOG.info("Performing action: Split all regions of " + tableName);
|
||||
getLogger().info("Performing action: Split all regions of {}", tableName);
|
||||
admin.split(tableName);
|
||||
} else {
|
||||
LOG.info("Skipping split of all regions.");
|
||||
getLogger().info("Skipping split of all regions.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -46,15 +46,19 @@ public class SplitRandomRegionOfTableAction extends Action {
|
|||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
Admin admin = util.getHBaseAdmin();
|
||||
|
||||
LOG.info("Performing action: Split random region of table " + tableName);
|
||||
getLogger().info("Performing action: Split random region of table " + tableName);
|
||||
List<HRegionInfo> regions = admin.getTableRegions(tableName);
|
||||
if (regions == null || regions.isEmpty()) {
|
||||
LOG.info("Table " + tableName + " doesn't have regions to split");
|
||||
getLogger().info("Table " + tableName + " doesn't have regions to split");
|
||||
return;
|
||||
}
|
||||
// Don't try the split if we're stopping
|
||||
|
@ -64,11 +68,11 @@ public class SplitRandomRegionOfTableAction extends Action {
|
|||
|
||||
HRegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(
|
||||
regions.toArray(new HRegionInfo[regions.size()]));
|
||||
LOG.debug("Splitting region " + region.getRegionNameAsString());
|
||||
getLogger().debug("Splitting region " + region.getRegionNameAsString());
|
||||
try {
|
||||
admin.splitRegion(region.getRegionName());
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Split failed, might be caused by other chaos: " + ex.getMessage());
|
||||
getLogger().warn("Split failed, might be caused by other chaos: " + ex.getMessage());
|
||||
}
|
||||
if (sleepTime > 0) {
|
||||
Thread.sleep(sleepTime);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -19,7 +19,6 @@
|
|||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
|
@ -31,8 +30,7 @@ import org.slf4j.LoggerFactory;
|
|||
* Action that tries to truncate of a table.
|
||||
*/
|
||||
public class TruncateTableAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(TruncateTableAction.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(TruncateTableAction.class);
|
||||
private final TableName tableName;
|
||||
private final Random random;
|
||||
|
||||
|
@ -41,6 +39,10 @@ public class TruncateTableAction extends Action {
|
|||
this.random = new Random();
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
|
@ -52,8 +54,8 @@ public class TruncateTableAction extends Action {
|
|||
}
|
||||
|
||||
boolean preserveSplits = random.nextBoolean();
|
||||
LOG.info("Performing action: Truncate table " + tableName.getNameAsString() +
|
||||
"preserve splits " + preserveSplits);
|
||||
getLogger().info("Performing action: Truncate table {} preserve splits {}",
|
||||
tableName.getNameAsString(), preserveSplits);
|
||||
admin.truncateTable(tableName, preserveSplits);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -56,6 +56,10 @@ public class UnbalanceKillAndRebalanceAction extends Action {
|
|||
this.killMetaRs = killMetaRs;
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
ClusterStatus status = this.cluster.getClusterStatus();
|
||||
|
@ -86,7 +90,7 @@ public class UnbalanceKillAndRebalanceAction extends Action {
|
|||
}
|
||||
|
||||
if (!killMetaRs && targetServer.equals(metaServer)) {
|
||||
LOG.info("Not killing server because it holds hbase:meta.");
|
||||
getLogger().info("Not killing server because it holds hbase:meta.");
|
||||
} else {
|
||||
killRs(targetServer);
|
||||
killedServers.add(targetServer);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -48,9 +48,13 @@ public class UnbalanceRegionsAction extends Action {
|
|||
this.fractionOfServers = fractionOfServers;
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
return LOG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
LOG.info("Unbalancing regions");
|
||||
getLogger().info("Unbalancing regions");
|
||||
ClusterStatus status = this.cluster.getClusterStatus();
|
||||
List<ServerName> victimServers = new LinkedList<ServerName>(status.getServers());
|
||||
int targetServerCount = (int)Math.ceil(fractionOfServers * victimServers.size());
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -23,7 +23,7 @@ import java.util.List;
|
|||
|
||||
/** A policy that runs multiple other policies one after the other */
|
||||
public class CompositeSequentialPolicy extends Policy {
|
||||
private List<Policy> policies;
|
||||
private final List<Policy> policies;
|
||||
public CompositeSequentialPolicy(Policy... policies) {
|
||||
this.policies = Arrays.asList(policies);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue